content
stringlengths
0
14.9M
filename
stringlengths
44
136
anova.modelFrame <- structure(function #Compare modelFrame objects ###Models in \code{\link{modelFrame}} lists are compared with ###\code{\link{anova.lme}} method. ##references<< Lara W., F. Bravo, ##D. Maguire. 2013. Modeling patterns between ##drought and tree biomass growth from ##dendrochronological data: A multilevel ##approach. Agric. For. Meteorol., ##178-179:140-151. ( object, ##<< an object inheriting from class "modelFrame". ..., ##<< other optional fitted model objects inheriting from ##classes "modelFrame", "lme", "lm", among other (see ##\code{\link{anova.lme}}). test, ##<< optional character string specifying the type of sum of ##squares to be used in F-tests for the terms in the model ##(see \code{\link{anova.lme}}). type, ##<<optional character string specifying the type of sum ##of squares to be used in F-tests for the terms in the ##model (see \code{\link{anova.lme}}). adjustSigma, ##<< If TRUE and the estimation method used to obtain ##object was maximum likelihood, the residual ##standard error is multiplied by sqrt(nobs/(nobs - ##npar)), converting it to a REML-like estimate (see ##\code{\link{anova.lme}}). Terms, ##<< optional integer or character vector specifying which ##terms in the model should be jointly tested to be zero ##using a Wald F-test (see \code{\link{anova.lme}}). L, ##<< optional numeric vector or array specifying linear ##combinations of the coefficients in the model that should be ##tested to be zero (see \code{\link{anova.lme}}). verbose ##<< optional logical value. If TRUE, the calling ##sequences for each fitted model object are printed with ##the rest of the output, being omitted if verbose = ##FALSE (see \code{\link{anova.lme}}). ) { sc <- as.list(sys.call())[-1L] sch <- sc. <- sapply(sc,as.character) scn. <- sapply(names(sc), function(x)x%in%"") if(length(scn.) != 0){ schn <- names(sch) sc. <- sch[scn.] sch <- c(sc., schn[!scn.]) } names(sc) <- sch sc[sc.] <- Map(as.character, sc[sc.] ) sc[sc.] <- lapply(sc[sc.], get) names(sc) <- c('object', sch[2:length(sch)]) for(i in 1:length(sc)){ if(inherits(sc[[i]], 'modelFrame')){ sc[[i]] <- sc[[i]]$'model' } } aov <- do.call(anova, sc) rownames(aov) <- sc. return(aov) ### data frame inheriting from class "anova.lme". } , ex=function() { ##TRW chronology (mm) and inside-bark radii data(Pchron,envir = environment()) ## Parameters of allometric model to compute Diameter at Breast ## Height over bark (DBH, cm) from diameter inside bark (dib, cm) ## and Total Tree Biomass (TTB, kg tree -1 ) from DBH (Lara ## et. al. 2013): biom_param <- c(2.87, 0.85, 0.05, 2.5) ## Modeling tree-biomass fluctuations while accounting for ## within-plot source variability (see defaults in "modelFrame" ## function) ## \donttest{ ## trwf <- modelFrame(Pchron, ## to = 'cm', ## MoreArgs = list(mp = c(2,1, biom_param)), ## log.t = FALSE, ## on.time = FALSE) ## } ## Fitting a single linear regression of the "tdForm" formula ## without random effects to the tree-biomass data: ## \donttest{ ## trwfl <- lm(log(x) ~ log(csx) + year, ## data = trwf$'model'$'data') ## } ## Comparing model likelihoods with anova method: ## \donttest{ ## anova(trwf, trwfl) ## } })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/anova.modelFrame.R
arguSelect <- structure(function#Argument selection ###Arguments of specific functions are selected from arbitrary numbers ###and varieties of arguments. ##details<<Closures with ellipsis terms use this ## function to extract and pass arguments to ## other functions. Arguments in \code{MoreArgs} ## lists are also extracted and stored again as ## \code{MoreArgs} lists. ( rd = NULL, ##<<\code{NULL} or \code{data.frame}. Multilevel ##ecological data series. If \code{NULL} then this ##argument is ignored. fun = c('mapply','ringApply'), ##<< \code{character} or ##\code{NULL}. Vector of function ##names. ... ##<< Further arguments not necessarily contained in the ##processed function(s). ) { mx <- list(...) if('ref'%in%names(mx)){ nl <- names(slitFrame(rd)) refs <- levexp(mx[['ref']],nl) mx[['ref']] <- refs[nl]} mar <- 'MoreArgs' fn <- mx[['fn']] fun <- c(fun,fn) fca <- lapply(fun, function(x)names(formals(x))) nfr <- unlist(fca) mx. <- mx[!names(mx)%in%mar] sel <- mx.[names(mx.)%in%nfr] s <- names(mx[[mar]])%in%nfr if(any(s)) sel[[mar]] <- mx[[mar]][s] if(is.data.frame(rd)) sel[['rd']] <- rd return(sel) ### \code{list} of selected arguments. } , ex=function() { ##Multilevel ecological data series of tree-ring widths: data(Prings05,envir = environment()) ## Radial increments measured on 2003: data(Pradii03,envir = environment()) ## Selection of arguments in some functions: ar1 <- arguSelect(fun = c('amod'), only.dup = TRUE, mp = c(0.5,1), rf.t = 2003) str(ar1) ar2 <- arguSelect(fn = 'amod', only.dup = TRUE, mp = c(0.5,1), rf.t = 2003) str(ar2) ar3 <- arguSelect(rd = Prings05, fn = 'amod', only.dup = TRUE, mp = c(0.5,1), rf.t = 2003) str(ar3) ar4 <- arguSelect(rd = Prings05, fun = 'scacum', sc.c = Pradii03, MoreArgs = list(only.dup = TRUE, mp = c(0.5,1), rf.t = 2003)) str(ar4) ar5 <- arguSelect(rd = Prings05, fun = 'scacum', ref = Pradii03, rf.t = rep(2003:2011), MoreArgs = list(only.dup = TRUE, mp = c(0.5,1))) str(ar5) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/arguSelect.R
cClass <- structure(function# Column-class extraction. ### Column names of multilevel data sets are extracted according to ### three classes: \code{numeric} values, \code{integer} sequences, ### and \code{factor} levels. ( rd, ##<<\code{data.frame}. Multilevel data series. cl = 'all' ##<<\code{character} or \code{NULL}. Character vector ##of classes to be considered. These can be ##'numeric', 'integer', or 'factor'. If \code{'all'} ##then all column names of \code{rd} are extracted. ) { fnm <- function(nml){names(nml)[nml]} nml <- sapply(rd, is.numeric) nm <- fnm(nml) inl <- sapply(rd[,nm],function(x) all(floor(x) == x, na.rm = TRUE)) in. <- fnm(inl) nu <- nm[!nm%in%in.] fct <- sapply(rd, is.factor) fc <- fnm(fct) cls <- list(numeric = nu, integer = in., factor = fc) if(cl[1L]%in%'all') cl <- names(cls) cls <- unlist(cls[cl]) return(cls) ### \code{character} names. } , ex=function() { ##Multilevel data frame of tree-ring widths: data(Prings05,envir = environment()) ## Names of variables in Prings05 data containing numeric classes: cClass(Prings05, 'numeric') # 'x' ## Names of variables containing time units: cClass(Prings05, 'integer') # 'year' ## Names of variables containing factors: cClass(Prings05, 'factor') # 'sample', 'tree', 'plot' })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/cClass.R
frametoLme <- structure(function# LME modeling ### LME models are fitted to detrend multilevel ecological data series. ##details<<This function implements ##\code{\link{lme}} function to fit linear ##mixed-effects models on multilevel ##ecological data series processed by the ##\code{\link{modelFrame}} function. Two kind ##of model formulas can be fitted: 'lmeForm' ##and 'tdForm'; these characters implement ##functions with same names ##(\code{\link{tdForm}} and ##\code{\link{lmeForm}}). Other lme formulas ##can be specified by modifying arguments in ##any of these two functions. After the lme ##models are fitted, they can be extended by ##implementing methods in \code{\link{nlme}} ##package. ##references<< Pinheiro J. C., ##D. M. Bates. 2000. Mixed-effects models in S ##and S-PLUS. Springer, New York. ( rd, ##<<\code{data.frame}. Multilevel ecological data series. form = 'lmeForm', ##<<\code{character}. Any of two lme formulas: ##'lmeForm' and 'tdForm' (see details). res.data =TRUE, ##<< \code{logical}. Save residuals as a ##multilevel ecological data series. If TRUE then ##a data frame of name 'fluc' is added to output ##list. ... ##<< Further arguments to be passed to \code{\link{lme}} ##function or to the lme formula in \code{form}. ) { pr.cov <- function(form){ chf <- Reduce(paste,deparse(form)) fnc <- gsub('.*~|\\|.*','',chf) fnc <- paste('~',fnc,sep = '') return(formula(fnc))} ## Implementation of lme form: if(grepl('~',form)) formu <- formula(form) if(!grepl('~',form)){ arf <- arguSelect(rd,fun = form,...) formu <- do.call(form,arf)} prc <- pr.cov(formu) environment(prc) <- .GlobalEnv gd <- groupedData( formula = formu,data = na.omit(rd)) #<< arl <- arguSelect(NULL,fun = 'lme',...) arl[['fixed']] <- gd if(!'random'%in%names(arl)) arl[['random']] <- pdDiag(prc) if(!'control'%in%names(arl)) arl[['control']] <- list(msMaxIter = 200) argn <- lapply(names(arl), as.name) names(argn) <- names(arl) call <- as.call(c(list(as.name("lme")), argn)) mem. <- eval(call, arl) mem <- list(model = mem.,call = sys.call()) rset <- function(r.model){ md <- r.model[['data']] tim <- cClass(md, 'integer') lev <- cClass(md, 'factor') lg <- ncol(data.frame(getGroups(md))) dcum <- residuals(r.model,level = lg:1,type = 'p') dcum <- as.data.frame(dcum) nam. <- names(dcum) names(dcum) <- paste(nam.,'.res',sep = '') ## dres <- cbind(dcum,md[,c(tim,lev)]) dres <- merge(dcum,md[,c(tim,lev)], by = 'row.names', all.x = TRUE,sort = FALSE) dres <- dres[,c(names(dcum),tim,lev)] return(dres)} if(res.data){ residu <- rset(mem.) mem[['fluc']] <- groupedData(lmeForm(residu),data=residu) } ## mem[['resid']] <- rset(mem.) return(mem) ### \code{\link{groupedData}} object. } , ex=function() { ##TRW chronology (mm) and inside-bark radii data(Pchron,envir = environment()) ## Parameters of allometric model to compute Diameter at Breast ## Height over bark (DBH, cm) from diameter inside bark (dib, cm) ## and Total Tree Biomass (TTB, kg tree -1 ) from DBH (Lara ## et. al. 2013): biom_param <- c(2.87, 0.85, 0.05, 2.5) ## Modeling tree-biomass fluctuations while accounting for ## within-plot source variability (see defaults in "modelFrame" ## function) ## \donttest{ ## trwf <- modelFrame(Pchron, ## to = 'cm', ## MoreArgs = list(mp = c(2,1, biom_param)), ## log.t = FALSE, ## on.time = FALSE) ## } ## Detrending the fluctuations by fitting a (l)td-form model ## with Maximum-likelihood method (ML): ## \donttest{ ## pdata <- trwf$'model'$'data' ## rlme <- frametoLme(pdata, ## form = 'tdForm', ## method = 'ML', ## log.t = TRUE) ## summary(rlme$model) ## } ##a plot of the modeled fluctuations: ## \donttest{ ## d <- groupedData(lmeForm(rlme$fluc,lev.rm = 1),data = rlme$fluc) ## plot(d,groups = ~ sample,auto.key = TRUE) ## } ## A model of aridity: ## \donttest{ ## cf <- modelFrame(PTclim05, ## lv = list('year','year'), ## fn = list('moveYr','wlai'), ## form = NULL) ## summary(cf) ## } ## An lme model of aridity at 'plot' level: ## \donttest{ ## cdata <- cf$'model'$'data' ## rmod <- frametoLme(cdata,form = 'lmeForm') ## summary(rmod$model) ## rk <- groupedData(lmeForm(rmod$fluc),data=rmod$fluc) ## plot(rk,ylab = 'detrended AI') ## } })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/frametoLme.R
levexp <- structure(function# Vector releveling ### Expansion or reduction of a numeric vector by matching its level names ### with the ecological factors of a multilevel ecological data ### series. ( x, ##<<\code{numeric} vector with names of the vector representing ##the levels to be matched. levels ##<<\code{data.frame}. Multilevel ecological data series, ##or \code{character} vector of levels. ) { tx <- split(x,names(x)) if(is.character(levels)) dsp <- split(levels,levels) if(is.data.frame(levels)) dsp <- slitFrame(levels) nam <- lapply(seq_len(length(tx)), function(i)paste("\\b", names(tx[i]),"\\b",sep = "")) nms <- lapply(seq_len(length(tx)), function(i)grep(nam[[i]], names(dsp),value = TRUE)) names(nms) <- names(tx) nnms <- lapply(nms,length) nms1 <- lapply(seq_len(length(tx)), function(i)rep(tx[[i]],nnms[[i]])) nm <- lapply(seq_len(length(tx)), function(i)data.frame(nms[[i]],nms1[[i]])) nmd <- do.call(rbind,nm) nmd1 <- nmd[,2] names(nmd1) <- nmd[,1] nmd1 <- nmd1[names(dsp)] nmd1 <- nmd1[!is.na(nmd1)] return(nmd1) ### numeric vector with expanded/reduced levels. } , ex=function(){ ##Multilevel ecological data series of tree-ring widths: data(Prings05,envir = environment()) ## tree radii measured at 2003: data(Pradii03,envir = environment()) ## Releveling the tree radii refs <- levexp(Pradii03,Prings05) refs })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/levexp.R
lmeForm <- structure(function#LME formula ### This function computes LME formulas from multilevel ecological ### data series (MEDS). ##details<< Formulas of the form \code{resp ~ cov ##| group} (see \code{\link{groupedData}} ##function) are computed from MEDS. The formulas ##can be implemented by ##\code{\link{modelFrame}} function to detrend MEDS ##references<< Pinheiro J. C., ##D. M. Bates. 2000. Mixed-effects models in S ##and S-PLUS. Springer, New York. ( rd, ##<< \code{data.frame}. Multilevel ecological data series prim.cov = FALSE, ##<<\code{Logical}: should the LME formula only ##be printed in primary covariate form: '~ cov'? ##If FALSE then a complete form: 'resp ~ covar | ##group' is formulated. resp = NULL, ##<<\code{NULL} or \code{character}. Column name of ##the response. If NULL then the name of the first ##numeric column of the MEDS is used. covar = NULL, ##<<\code{NULL} or \code{character}. Column name(s) ##of the covariate(s). If \code{NULL} then the name ##of the first time-unit column in the MEDS is used. lev.rm = NULL ##<< \code{NULL}, \code{character} or \code{numeric} ##vector of levels in the MEDS to be removed from ##the groups. ) { if(is.null(resp)) resp <- cClass(rd, 'numeric')[1L] if(is.null(covar)) covar <- cClass(rd, 'integer')[1L] covar. <- paste('~',covar,sep = ' ') covar <- paste(resp,'~',covar,sep = ' ') f <- cClass(rd, 'factor') if(is.numeric(lev.rm)) lev.rm <- f[lev.rm] nf <- rev(f[!f%in%lev.rm]) sep. <- ' | ' if(length(nf) == 0) sep. <- '' fc <- paste(nf,collapse = '/') fr <- paste(covar,fc,sep = sep.) fr <- formula(fr,showEnv = FALSE) if(prim.cov)fr <- covar. return(fr) ### \code{formula} with any of the forms: \code{resp ~ cov | group} or ### \code{~ cov}. } , ex=function(){ ##Multilevel ecological data series of tree-ring widths: data(Prings05,envir = environment()) ## LME formula: form1 <- lmeForm(Prings05,prim.cov = FALSE) print(form1) ## removing the sample level from the formula form2 <- lmeForm(Prings05,lev.rm = 'sample') form2 <- lmeForm(Prings05,lev.rm = 1) ## groupedData object with the LME formula gdata <- groupedData(lmeForm(Prings05,lev.rm = 1), data = Prings05) plot(gdata,groups = ~ sample) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/lmeForm.R
mUnits <- structure(function#Metric system ### This function control metric units. ##details<<Characters in \code{from} and \code{to} ##arguments have the form 'p_', where 'p' is the ##metric prefix and '_' is a base unit. Sixteen ##metric prefixes are supported: atto 'a', femto ##'f', pico 'p', nano 'n', micro 'mm', mili 'm', ##centi 'c', deci 'd', deca 'da', hecto 'h', kilo ##'k', mega 'M', giga 'G', tera 'T', peta 'P', and ##exa 'E'. ( x, ##<<\code{numeric} vector. from = 'mm', ##<<\code{character}. Initial metric unit. to = 'mm' ##<<\code{character}. Final metric unit. ) { fmu <- function(x){ substr(x, nchar(x), nchar(x))} mu <- fmu(from);mu. <- fmu(to) err. <- paste('different metric units: ', mu,' vs. ',mu.,sep = '') if(!mu%in%fmu(to)) stop(err.) xp <- c(0:2,seq(3,18,3)) ex <- c(-xp,xp) sm <- 10^(unique(ex[order(ex)])) us <- c('a','f','p','n','mm','m', 'c','d','', 'da','h','k', 'M','G','T','P','E') un <- paste(us,mu,sep ='') names(sm) <- un eq <- sm[from]/sm[to] names(eq) <- to x <- x * eq return(x) ### \code{numeric} vector. } , ex=function() { ## Simulation of TRW data set.seed(1) w <- abs(rnorm(12,1,1)) trw <- ts(w,start = 1970) ## transforming metric units of trw vector from milimeters to meters sr <- mUnits(trw, from = 'mm', to = 'm') attributes(sr) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/mUnits.R
modelFrame <- structure(function #Dendroclimatic-fluctuations modeling ### This function develops recursive evaluation of functions for ### one-level modeling (FOLM) and LME detrending of dendroclimatic ### chronologies. ##details<< Defaults model fluctuations in ##tree-ring width chronologies via recursive ##implementation of four FOLM: ##\code{\link{rtimes}}, \code{\link{scacum}}, ##\code{\link{amod}}, and ##\code{\link{frametoLme}}. Nevertheless, ##other FOLM can be implemented to model ##aridity-index fluctuations(see example with ##climatic data). Processed chronologies are ##detrended with \code{\link{lme}} function ##and other \code{\link{nlme}} methods ##. Internal algorithm uses ##\code{\link{shiftFrame}} ##\code{\link{arguSelect}} and ##\code{\link{ringApply}} ##functions. Consequently, arguments that are ##not iterated over factor-level labels in the ##processed data are specified in 'MoreArgs' ##lists (see examples). Arguments in ##\code{modelFrame} objects can be updated ##with \code{\link{update}} function. ##references<< Lara W., F. Bravo, ##D. Maguire. 2013. Modeling patterns between ##drought and tree biomass growth from ##dendrochronological data: A multilevel ##approach. Agric. For. Meteorol., ##178-179:140-151. ( rd, ##<<\code{data.frame} or \code{list}. Dendroclimatic ##chronology or Multilevel ecological data series. fn = list('rtimes','scacum','amod'), ##<< \code{list}. Names of ##the functions for one-level ##modeling to be recursively ##implemented. lv = list(2,1,1), ##<< \code{list}. \code{numeric} positions in ##the factor-level labels of \code{rd} to ##implement the one-level functions. If ##\code{rd} is a MEDS, then \code{character} ##names of the factor-level columns. form = 'tdForm', ##<<\code{character} or \code{NULL}. Name of a ##detrending formula. Two in-package ##methods are available: the default ##\code{\link{tdForm}} or ##\code{\link{lmeForm}}. ... ##<< Further arguments in \code{\link{mUnits}}, or in the ##functions for one-level modeling, or in the ##\code{\link{lme}} function/methods, or in the detrending ##formula. ) { lse <- list(...) mln <- length(lv) iswide <- all(sapply(rd, is.numeric)) islist <- class(rd)%in%'list' if(any(iswide, islist)){ rd <- shiftFrame(rd) } fns <- 'mUnits' if(any(names(lse)%in%names(formals(fns)[-1L]))){ nmu <- cClass(rd, 'numeric') rdu <- arguSelect(x = rd[,nmu], fun = fns, ...) rd[,nmu] <- do.call(fns, rdu) if('sc.c'%in%names(lse)){ sca <- arguSelect(x = lse$'sc.c', fun = fns, ...) lse[['sc.c']] <- do.call(fns, sca) } } mar <- 'MoreArgs' ls. <- lapply(lse,class)%in%'list' yls <- Map(unlist,lse[ls.]) yls[c('fn','lv')] <- list(fn,lv) nma <- yls[!names(yls)%in%mar] lsp <- lse[!names(lse)%in%names(yls)] s <- names(lse)%in%mar if(any(s)) lsp[[mar]] <- lse[[mar]] ar <- list() mln <- length(nma[[1L]]) for(i in 1:mln){ lsl <- lapply(nma, '[[', i) lt <- list(rd, fun = 'ringApply') nl <- unlist(Map(levels, rd[cClass(rd,'factor')])) spd <- function(x){ unlist(strsplit(x, '\\.'))} my <- unlist(Map(function(x) !is.null(names(x)) && spd(names(x))%in% nl, lsp)) if(any(my)) { lsp[names(lsp)[my]] <- Map(function(x) levexp(x, rd),lsp[names(lsp)[my]])} lst <- c(lsl, lsp, lt) ar[[i]] <- do.call('arguSelect', lst) rd <- do.call('ringApply', ar[[i]]) } arl <- arguSelect(rd, fun = c('frametoLme','lme', form),...) arl[['form']] <- form rd <- do.call('frametoLme',arl) rd[['call']] <- sys.call() class(rd) <- c('modelFrame', class(rd)) return(rd) ### Threefold list with fluctuations in \code{fluc}, ### {\link{groupedData}} object in \code{model}, and model call in ### \code{call}. } , ex=function() { ##TRW chronology (mm) and inside-bark radii data(Pchron,envir = environment()) ## Parameters of allometric model to compute Diameter at Breast ## Height over bark (DBH, cm) from diameter inside bark (dib, cm) ## and Total Tree Biomass (TTB, kg tree -1 ) from DBH (Lara ## et. al. 2013): biom_param <- c(2.87, 0.85, 0.05, 2.5) ## Modeling tree-biomass fluctuations while accounting for ## within-plot source variability (see defaults in "modelFrame" ## function) ## \donttest{ ## trwf <- modelFrame(Pchron, ## to = 'cm', ## MoreArgs = list(mp = c(2,1, biom_param)), ## log.t = FALSE, ## on.time = FALSE) ## } ## Climatic records: data(Temp,envir = environment()) data(Prec,envir = environment()) ## Aridity-index fluctuations: ## \donttest{ ## aif <- modelFrame(rd = list(Prec, Temp), ## fn = list('moveYr','wlai'), ## lv = list('year','year'), ## form = 'lmeForm') ## summary(aif$'model') ## } })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/modelFrame.R
moveYr <- structure(function#Seasonal years ### Monthly records in time-series replicates (usually of climate) are ### labeled for the years can begin in a month other than January. ##details<<\code{character} months as defined in ##\code{\link{month.abb}} or \code{\link{month.name}}. ( cd, ##<<\code{data.frame}. Multilevel ecological data series or ##\code{numeric} vector of repeated years with vector names ##belonging to \code{month.abb}. ini.mnt = 'Oct' ##<<\code{character}, or \code{numeric} from 1 to ##12. Initial month of the seasonal year. If ##\code{character} then the months are built-in ##constants in R-package \code{base}. Default ##\code{'Oct'} makes the years begin in October, ##for example. ) { chn <- function(tmp){ tmp. <- as.character(tmp) nm <- 1:12 fm <- 'month.abb' mna <- tmp%in%month.abb[nm] if(!all(mna)) fm <- 'month.name' names(nm) <- get(fm)[nm] if(!is.numeric(tmp)){ tmp <- nm[tmp][tmp.]} names(tmp) <- get(fm)[tmp] return(tmp)} isdf <- is.data.frame(cd) if(isdf){ ny <- cd cd <- cd[,'year'] dt <- ny dtf <- dt[,cClass(dt,'factor')] emnt. <- unlist(Map(function(x) all(levels(x)%in%month.abb), dtf)) names(emnt.)[emnt.] names(cd) <- chn(ny[,names(emnt.)[emnt.]])} if(!isdf) names(cd) <- chn(names(cd)) ini.mnt <- chn(ini.mnt) mn <- 1:12 ncd <- ifelse( mn[as.numeric(names(cd))] >= mn[ini.mnt], ifelse( mn[ini.mnt] > mn[5], cd + 1, cd), ifelse(mn[ini.mnt] <= mn[5], cd - 1, cd)) if(isdf){ ny[,'year'] <- ncd ny[,'month'] <- factor(month.abb[ as.numeric(names(cd))], levels = month.abb[as.numeric(names(cd))]) ny <- ny[cClass(ny)] } if(!isdf){ ny <- ncd names(ny) <- month.abb[as.numeric(names(cd))] } return(ny) ### \code{data.frame} object with the months being \code{numeric} ### values and the years beginning at \code{ini.mnt} argument. } , ex=function() { ## Climatic records of monthly precipitation sums and monthly ## average temperatures data(PTclim05,envir = environment()) ## Making the year 1955 in plot 'P16106' to begin on 'April' cl1 <- slitFrame(PTclim05,c('year','plot'))[[1]] cl2 <- moveYr(cl1,ini.mnt = 'Mar') head(cl2) ## a simple vector of years yr <- rep(2005,12) names(yr) <- month.abb[1:12] moveYr(yr) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/moveYr.R
muleMan <- structure(function#Multilevel dendroclimatic correlograms ### Multivariate correlograms between TRW fluctuations and climatic ### fluctuations. ##details<<Function \code{\link{mgram}} in ##package \code{\link{ecodist}} is implemented to ##compare the dendroclimatic fluctuations. Models ##being compared should have common higher-level ##factors (see example). ##references<< Lara W., F. Bravo, ##D. Maguire. 2013. Modeling patterns between ##drought and tree biomass growth from ##dendrochronological data: A multilevel ##approach. Agric. For. Meteorol., ##178-179:140-151. ( rd, ##<<\code{dataframe} or \code{\link{groupedData}}. TRW ##fluctuations such as that produced by ##\code{\link{modelFrame}}. cd, ##<<\code{dataframe} or ##\code{\link{groupedData}}. Aridity-index fluctuations such ##as that produced by \code{\link{modelFrame}}. rd.var = NULL, ##<<\code{character} or \code{NULL}. Column name of ##the TRW fluctuations to be compared. If ##\code{NULL} then the first column is processed. cd.var = NULL, ##<<\code{character} or \code{NULL}. Column name of ##the aridity-index fluctuations to be compared. If ##\code{NULL} then the first column is used. ... ##<<Further arguments in \code{\link{mgram}} ) { if(!is.data.frame(rd)){ rd <- rd[['fluc']] } if(!is.data.frame(cd)){ cd <- cd[['fluc']] } if(is.null(rd.var)){ rd.var <- names(rd)[1L] } if(is.null(cd.var)){ cd.var <- names(cd)[1L] } ford <- function(cd,nm = 'year'){ cd[do.call(order, as.list(cd[, rev(c(nm,cClass(cd,'factor')))])),]} tmp0 <- slitFrame(rd, cClass(rd, 'factor')) cf <- function(x, cl = 'F'){ if(cl%in%'I') cl <- 'integer' if(cl%in%'F') cl <- 'factor' cClass(x, cl) } flf. <- unlist(Map(function(x) any(x%in%cd[,cf(cd)]), rd[,cf(rd)])) fli. <- unlist(Map(function(x) any(x%in%cd[,cf(cd, 'I')]), rd[,cf(rd, 'I')])) fsl <- c(fli.,flf.) nrd <- names(fsl)[fsl] ncd <- cClass(cd, c('integer', 'factor')) fm <- function(x,...){ tme <- merge(x,cd,by.x = nrd, by.y = ncd) tme <- na.omit(ford(tme)) fny <- function(x,nm){ data.frame(x[,nm])} tmw <- fny(tme,cd.var) tmt <- fny(tme,rd.var) spd <- dist(tmw) spp <- dist(tmt) man <- mgram(spp,spd,...) dman <- data.frame(man$'mgram') pnm <- c('mantelr','lag','pval') npnm <- names(dman)[!names(dman)%in%pnm] pn. <- c(pnm,npnm) dman <- dman[,pn.] return(dman)} tmp <- Map(function(x,...)fm(x,...),tmp0,...) ## lsdfn add levels in rd to mancor lsdfn <- function(mancor,rd){ class(rd) rn <- do.call(rbind,mancor) code. <- rownames(rn) revn <- cClass(rd, 'factor') codes. <- do.call(rbind, strsplit(code.,'\\.'))[ ,1:length(revn)] codes. <- data.frame(codes.) codes. <- codes.[,rev(names(codes.))] codes. <- lapply(codes.,as.factor) names(codes.) <- revn rn <- cbind(rn,codes.) rownames(rn) <- NULL return(rn)} ## adding levels in rd to tmp tmp <- lsdfn(tmp,rd) ## order data in tmp tmp <- ford(tmp,nm = 'lag') tmp <- groupedData( lmeForm(tmp,covar = 'lag'),data = tmp) md <- list(mmgram = tmp, call = sys.call()) class(md) <- c('muleMan', class(md)) return(md) ### \code{data.frame} object of multivariate correlations. } , ex=function() { ##TRW chronology (mm) and inside-bark radii data(Pchron,envir = environment()) ## Parameters of allometric model to compute Diameter at Breast ## Height over bark (DBH, cm) from diameter inside bark (dib, cm) ## and Total Tree Biomass (TTB, kg tree -1 ) from DBH (Lara ## et. al. 2013): biom_param <- c(2.87, 0.85, 0.05, 2.5) ## Modeling tree-biomass fluctuations while accounting for ## within-plot source variability (see defaults in "modelFrame" ## function) ## /donttest{ ## trwf <- modelFrame(Pchron, ## to = 'cm', ## MoreArgs = list(mp = c(2,1, biom_param)), ## log.t = FALSE, ## on.time = FALSE) ## } ## Climatic Records: data(Temp,envir = environment()) data(Prec,envir = environment()) ## Aridity-index fluctuations: ## /donttest{ ## aif <- modelFrame(rd = list(Prec, Temp), ## fn = list('moveYr','wlai'), ## lv = list('year','year'), ## form = 'lmeForm') ## } ##Multivariate comparison: ## /donttest{ ## mcomp <- muleMan(trwf, ## aif, ## nperm = 10^3) ## str(mcomp) ## } })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/muleMan.R
plot.modelFrame <- structure(function #Plot modelFrame objects ### Diagnostic Trellis plot for fluctuations in ### \code{\link{modelFrame}} objects are obtained. ( x, ##<< An object inheriting from class \code{\link{modelFrame}}. ... ##<< further arguments passed to the Trellis plot function. ) { plot(x$'fluc', type = 'l', ## grid = FALSE, abline = list(h = 0, lty = 2, lwd = 0.5, col = 'gray30'), ...) ## A diagnostic Trellis plot. } , ex=function() { ##TRW chronology (mm) and inside-bark radii data(Pchron,envir = environment()) ## Parameters of allometric model to compute Diameter at Breast ## Height over bark (DBH, cm) from diameter inside bark (dib, cm) ## and Total Tree Biomass (TTB, kg tree -1 ) from DBH (Lara ## et. al. 2013): biom_param <- c(2.87, 0.85, 0.05, 2.5) ## Modeling tree-biomass fluctuations while accounting for ## within-plot source variability (see defaults in "modelFrame" ## function) ## /donttest{ ## trwf <- modelFrame(Pchron, ## to = 'cm', ## MoreArgs = list(mp = c(2,1, biom_param)), ## log.t = FALSE, ## on.time = FALSE) ## plot(trwf, grid = FALSE) ## } })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/plot.modelFrame.R
plot.muleMan <- structure(function #Plot muleMan objects ### Diagnostic Trellis plot for fluctuations in ### \code{\link{muleMan}} objects are obtained. ( x, ##<< An object inheriting from class \code{\link{muleMan}}. ... ##<< further arguments passed to the Trellis plot function. ) { plot(x$'mmgram', groups = ifelse(x$'mmgram'$'pval' < 0.05, TRUE, FALSE), pch = c(21,19), abline = list(h = 0, lty = 2, lwd = 0.5, col = 'black'), ...) ## A diagnostic Trellis plot. } , ex=function() { ##TRW chronology (mm) and inside-bark radii data(Pchron,envir = environment()) ## Parameters of allometric model to compute Diameter at Breast ## Height over bark (DBH, cm) from diameter inside bark (dib, cm) ## and Total Tree Biomass (TTB, kg tree -1 ) from DBH (Lara ## et. al. 2013): biom_param <- c(2.87, 0.85, 0.05, 2.5) ## Modeling tree-biomass fluctuations while accounting for ## within-plot source variability (see defaults in "modelFrame" ## function): ## \donttest{ ## trwf <- modelFrame(Pchron, ## to = 'cm', ## MoreArgs = list(mp = c(2,1, biom_param)), ## log.t = FALSE, ## on.time = FALSE) ## } ## Climatic Records: data(Temp,envir = environment()) data(Prec,envir = environment()) ## Aridity-index fluctuations: ## \donttest{ ## aif <- modelFrame(rd = list(Prec, Temp), ## fn = list('moveYr','wlai'), ## lv = list('year','year'), ## form = 'lmeForm') ## } ##Multivariate comparison: ## \donttest{ ## mcomp <- muleMan(trwf, ## aif, ## nperm = 10^3) ## plot(mcomp, grid = FALSE) ## } })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/plot.muleMan.R
plot.wlai <- structure(function#Plot an wlai object ### A Walter-Lieth climate diagram is produced. ##details<< Areas between temperature and ##precipitation lines when precipitation ##exceeds temperature (moist seasons) are ##plotted in gray color, and areas where ##temperature exceeds precipitation (dry ##seasons) are plotted in black color. Monthly ##cumulative precipitations over 100 mm are ##scaled such that 1 degree C of average ##temperature is equal to 5 mm of ##precipitation. ##references<< Manrique E., ##A. Fernandez-Cancio. 2000. Extreme climatic events ##in dendroclimatic reconstructions from ##Spain. Clim. Chang., 44: 123-138. ( x,##<< \code{vector} or \code{data.frame}. An object inheriting ##from class \code{\link{wlai}}, representing the Aridity Index. ... ##<<\code{logical}. Further arguments passed to ##\code{\link{plot}} function. ) { if(!inherits(x, 'wlai')) stop("'x' does not belong to class 'wlai'") pl <- attributes(x) fb <- function(x){ nx <- c(pl$'ptm',x,pl$'ptm') y <- as.numeric(names(x)) names(nx) <- c(min(y),names(x),max(y)) nxn <- as.numeric(names(nx)) - 0.5 dt <- data.frame(x = nxn, y = nx) return(dt)} ## return(fb(pl$'xn')) ##color palette col. <- paste('gray',c(70,60,30), sep = '') par(oma = c(0,0,0,2)) plot(fb(pl$'xn'), ylim = c(pl$'ptm',max(c(pl$'pr.',pl$'tm'))), col = col.[1],type='l', xaxt = 'n',yaxt = 'n', xlab = 'Month',ylab = NA, ...) mns <- rownames(pl$'cd') xap <- unique(fb(pl$'pr')[,1]) yap <- pretty(c(pl$'ptm',max(c(pl$'pr.',pl$'tm')))) axis(4,at = yap, labels = yap,las = 1) axis(2,at = yap, labels = yap/2,las = 1) axis(1,at = xap, tick = FALSE,labels = mns) axis(1,at = xap - 0.5, tick = TRUE,labels = FALSE) mtext(text = expression(~degree~C), las = 1,at = min(xap) - 1.2 * par('cex')) mtext(text = 'mm',las = 1, at = max(xap) + 1.2 *par('cex')) polygon(fb(pl$'xn'), border = NA, col = col.[2]) polygon(fb(pl$'xn..'), border = NA, col = col.[1]) polygon(fb(pl$'tm'), border = NA, col = col.[3]) polygon(fb(pl$'xn1'), border = col.[1], col = 'white') lines(c(min(xap),max(xap)),c(100,100),col =col.[1],lty = 1) lp <- 'top' legend(lp,legend = c('Dry season','Moist season'), fill = c(col.[3],col.[1]),cex = 0.8, horiz = TRUE, border = NA,bty = 'n') rai <- round(pl$'ai', 3) text(1.5, max(c(pl$'pr.',pl$'tm')), cex = 0.8, col = 'gray30', paste("AI = ", rai, sep = '')) ### A \code{\link{plot}} of the Walter-Lieth diagram. } , ex=function() { ##random simulation of climatic records set.seed(1) pr <- rnorm(12,1,1) tm <- rnorm(12,0,1) cld <- data.frame(pr,tm) ##labels of months from october to september rownames(cld) <- month.abb[c(10:12,1:9)] rownames(cld) <- c(10:12,1:9) ##computation of the aridity index and climate diagram AI <- wlai(cld) plot.wlai(AI) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/plot.wlai.R
ringApply <- structure(function#Multilevel apply ### Wrapper of \code{\link{Map}} to apply functions on multilevel data ### series and preserve factor-level structure in the outputs. ##details<< Other functions such as ##\code{\link{rtimes}}, \code{\link{scacum}}, ##\code{\link{amod}}, or \code{\link{wlai}} can ##be implemented. Function arguments should be ##formulated as suggested in ##\code{\link{mapply}}, with constant ##arguments being stored in a \code{MoreArgs} ##list. This function is implemented by ##\code{\link{modelFrame}} for recursive ##modeling of MEDS. ##references<< Lara, W., F. Bravo, ##D. Maguire. 2013. Modeling patterns between ##drought and tree biomass growth from ##dendrochronological data: A multilevel ##approach. Agric. For. Meteorol., ##178-179:140-151. ( rd, ##<<\code{data.frame}. Multileve ecological data series. lv = 1, ##<< {numeric} position, or {character} name, of an ##ecological factor in the processed MEDS. fn = 'scacum', ##<< \code{character} name of the function to be ##evaluated (see details). Default 'scacum' ##computes scaled-cumulative radii. ... ##<< Further arguments in the function being specified ##\code{fn} argument (see details) ) { levs <- cClass(rd,'factor') emnt. <- unlist(Map(function(x) all(x%in%1:12) | all(x%in%month.abb), rd)) if(any(emnt.)) levs <- cClass(rd[ names(emnt.)[!emnt.]], 'factor') if(is.character(lv)){ if(!lv%in%levs){ levs <- c(lv, levs) } if(lv%in%levs) lv <- match(lv, levs) } if(is.numeric(lv)){ levs <- levs[lv:length(levs)] } cl1 <- slitFrame(rd, levs) fam <- function(x,...){ do.call(fn,list(x,...))} cl2 <- Map(function(x,...) fam(x,...), cl1,...) nord <- names(cl2)[order(names(cl2))] cl3 <- cl2[nord] cl4 <- do.call(rbind,cl3) rownames(cl4) <- NULL return(cl4) ### \code{data.frame} object preserving initial factor-level columns. } , ex=function() { ##Multilevel ecological data series (MEDS) of tree-ring widths: data(Prings05,envir = environment()) ## Radial increments measured on 2003: data(Pradii03,envir = environment()) ## MEDS of monthly precipitation sums and average temperatures: data(PTclim05,envir = environment()) ##Tree-level scaling of years of formation ##with 'rtimes' function: dfm1 <- ringApply(Prings05, lv = 2, fn = 'rtimes') str(dfm1) ##Relative time-units from year 1 to year 9: subset(dfm1,time%in%c(1:9,NA)) ## Sample-level scaling of TRW chronologies around reference radii ## which were measured at 2003: dfm2 <- ringApply(dfm1, lv = 'sample', sc.c = Pradii03, rf.t = 2003, fn = 'scacum') str(dfm2) ##Sample-level modeling of basal areas (mm2) via allometric ##scaling: dfm3 <- ringApply(dfm2, lv = 'sample', fn = 'amod', MoreArgs = list(mp = c(2,1,0.25 * pi,2))) str(dfm3) ## Seasonal years from 'October' to 'September': cl1 <- ringApply(PTclim05, lv = 'year', fn = 'moveYr') tail(cl1,15) ##Year-level aridity indexes: wl <- ringApply(cl1, lv = 'year', fn = 'wlai') str(wl) ## Plot of aridity-index fluctuations: d <- groupedData(lmeForm(wl),wl) plot(d) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/ringApply.R
rtimes <- structure(function#Time-units synchronization ### Unique observations in time-series replicates are excluded ## details<<This function is used to enhance ## convergence of mixed-effects parameters during ## detrending processes of multilevel ecological ## data series (see \code{\link{modelFrame}} ## function). ( x, ##<< multilevel ecological data series containing a column of ##time units, or \code{numeric} vector with names representing ##the time units. only.dup = TRUE ##<< \code{logical}. Extract only duplicated ##times. If TRUE then unique times are replaced ##with NA. If all computed times are unique then ##this argument is ignored. ) { csn. <- FALSE if(is.data.frame(x)){ csnu <- cClass(x, 'numeric') csn <- c(cClass(x, 'integer'), cClass(x, 'factor')) csn. <- length(csn)!=0 csn.. <- csn[!csn%in%'time'] cd <- x x <- x[,'x'] names(x) <- cd[,'year']} n <- as.numeric(names(x)) time <- abs(min(n) - n - 1) da <- data.frame(x,time) ## return(csn.&& length(csnu) > 1) if(csn.&& length(csnu) > 1){ ## csn.. <- csn[!csn%in%'time'] # da <- cd[,csnu] da[,'time'] <- time } rownames(da) <- 1:nrow(da) dp <- duplicated(da[,'time']) uni <- with(da,!time%in%da[,'time'][dp]) if(only.dup&any(dp)) da[uni,'time'] <- NA if(csn.) da <- cbind(da,cd[,csn..]) return(da) ### \code{data.frame} object with the initial vector and its time ### units. } , ex=function(){ ## row names of a vector fy <- function(y,span){(y - span):y} x <- c(fy(2005,5),fy(2007,10)) ## (not run) Simulating the vector r <- abs(rnorm(length(x))) names(r) <- x ## (not run) computing the synchronized times: rtimes(r,only.dup = TRUE) ## (not run) Extracting only duplicated times: na.omit(rtimes(r,only.dup = TRUE)) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/rtimes.R
scacum <- structure(function#Cummulative-scaled sums ### This function computes cummulative and scaled sums of time-series ### replicates. ##details<< Cummulative sums of time-series ## replicates (e.g. tree-ring widths) are scaled ## around reference values (e.g. tree radii). ( x, ##<<\code{numeric} vector of time-series replicates with names ##of the vector being time units. sc.c = NA, ##<<\code{numeric} constant. Scaling constant. If ##\code{NA} then the computed cumulative sums are not ##scaled. rf.t = NA ##<<\code{NA}, or \code{numeric} constant. Reference ##time of the scaling constant. If \code{NA} then ##maximum time in vector-name range is used. ) { csn. <- FALSE if(is.data.frame(x)){ csnu <- cClass(x, 'numeric') csn <- c(cClass(x, 'integer'), cClass(x, 'factor')) csn. <- length(csn)!=0 csn.. <- csn[!csn%in%'csx'] cd <- x x <- x[,'x'] names(x) <- cd[,'year']} if(is.null(names(x))) stop('NULL labels in x', call. = FALSE) xcum <- cumsum(x) if(is.na(rf.t)) rf.t <- max(as.numeric(names(x))) inc <- 0 if(!is.na(sc.c)) inc <- sc.c - xcum[as.character(rf.t)] csx <- xcum + inc if(any(csx < 0,na.rm = TRUE)) csx <- xcum xd <- data.frame(x,csx) if(csn.&& length(csnu) > 1){ xd <- cd[,csnu] xd[,'csx'] <- csx } if(csn.) xd <- cbind(xd,cd[,csn..]) return(xd) ### data frame with the original vector, and its scaled-cummulative sums. } , ex=function() { x <- c(0.79,0.32,0.53,0.43,0.18) names(x) <- 1948:1952 scacum(x,sc.c = 4,rf.t = 1951) ##If sc.c = NA then cummulative values are scaled arround ##max(cumsum(x)): max(cumsum(x)) scacum(x,NA,1951) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/scacum.R
shiftFrame <- structure(function#MEDS formatting ### dendroclimatic chronologies (trw, and climatic data) are formatted ### into multilevel ecological data series. SI units of continuous ### variables in the data can be transformed. ##details<< Row names of dendroclimatic data ##frames are time units (e.g. years). Column ##names are dot-separated labels representing ##the hierarchy of ecological or time-units ##factors, where the higher levels are defined ##first and the lower levels after. For ##example, code 'P16106.17' is the column name ##of core 'a' in tree '17' in plot ##'P16106'. Labels containing monthly ##abbreviations are also formatted. ( rd, ##<<\code{data.frame} or \code{list}. Dendroclimatic ##chronology DC (see details) or list of two DCs ##(e.g. precipitation and temperature records), or multilevel ##ecological data series (MEDS). f.nm = NULL, ##<<\code{character} vector. In the case of formatting ##ring-data frames, column names of the factors in ##the new MEDS. If \code{NULL} then this argument is ##recycled from attributes in \code{rd}. If such an ##attribute is also \code{NULL} then a sequence of ##codes (F1, F2, ..., Fn) is used. x.nm = names(rd)[1L], ##<<\code{character}. In the case of ##formatting MEDS, name of the variable to be ##reshaped. Default uses name of first ##variable of \code{rd}. t.nm = 'year', ##<<\code{character}.In the case of formatting MEDS, ##name of the time-units variable . ... ##<< Further arguments in \code{\link{mUnits}}. ) { fmnt <- function(dt){ dtf <- dt[,cClass(dt,'factor')] emnt. <- unlist(Map(function(x) all(levels(x)%in%month.abb), dtf)) if(any(emnt.)){ dt[, names(emnt.)[emnt.]] <- factor(dt[, names(emnt.)[emnt.]], levels = month.abb) nmd <- c(cClass(dt, 'integer'), names(emnt.)[!emnt.]) slld <- slitFrame(dt, nmd) rsr <- Map(function(x) x[do.call(order, as.list(x[,cClass(x,'factor')])),], slld) dt <- do.call(rbind,rsr) rownames(dt) <- NULL} return(dt)} wide2long <- function(rd, f.nm, ...){ sepl <- '\\.' hasdots <- length(grep(sepl, names(rd),value = TRUE)) <= 1 if(hasdots) { stop('Column names must be dot-separated labels', call. = FALSE) } nm <- rep(colnames(rd),each=nrow(rd)) lev.1 <- as.data.frame( do.call(rbind, strsplit(nm,split = sepl)), stringsAsFactors = TRUE) ## lev.1 <- as.data.frame( ## do.call(rbind, strsplit(nm,split = sepl))) if(is.null(f.nm)){ f.nm <- attributes(rd)$'f.nm' } if(is.null(f.nm)){ f.nm <- paste('F',1:ncol(lev.1), sep = '') } names(lev.1) <- f.nm lev.1 <- lev.1[,rev(names(lev.1))] yr <- as.numeric(rep(rownames(rd),ncol(rd))) x <- unlist(c(rd),use.names = FALSE) yr.nm <- attributes(rd)$'t.nm' if(is.null(yr.nm)){ yr.nm <- 'year' } xvr <- attributes(rd)$'x.nm' if(is.null(xvr)){ xvr <- 'x' } dt <- na.omit(data.frame(x, yr, lev.1, stringsAsFactors = TRUE)) names(dt) <- c(xvr,yr.nm, f.nm) dt <- dt[,cClass(dt, 'all')] rownames(dt) <- NULL fns <- 'mUnits' dtu <- arguSelect(x = dt[,xvr], fun = fns, ...) dt[,xvr] <- do.call(fns, dtu) dt <- fmnt(dt) dt <- groupedData(formula = lmeForm(dt), data = dt) return(dt)} if(is.data.frame(rd)){ iswide <- all(sapply(rd, is.numeric)) if(iswide){ dt <- wide2long(rd, f.nm, ...) } else{ nfx <- cClass(rd, cl = 'factor') dtu <- arguSelect(x = rd[,x.nm], fun = 'mUnits', ...) rd[,x.nm] <- do.call('mUnits', dtu) ds <- split(rd, rd[,rev(nfx)], drop = TRUE) vec <- c(t.nm, x.nm) dsp <- lapply(ds,function(x)x[,vec]) for(i in 1:length(dsp)) names(dsp[[i]]) <- c(t.nm, names(dsp[i])) fmatch <- function(tomatch.){ Reduce(function(x,y){ merge(x,y, by = t.nm, all = TRUE)},tomatch.)} rP <- fmatch(dsp) rownames(rP) <- rP[, t.nm] dt <- rP[,names(rP)[-1L]] dt <- dt[,order(names(dt))] attributes(dt)[c('f.nm','x.nm','t.nm')] <- list(nfx,x.nm,t.nm) } } else{ rdl <- Map(function(x) wide2long(x, f.nm, ...), rd, ...) merdt <- Reduce(function(...) merge(..., by = cClass(rdl[[1L]], c('integer','factor')), all=T), rdl) dt <- merdt[,cClass(merdt, 'all')] dt <- fmnt(merdt) dt <- dt[,cClass(dt, 'all')] ## dt <- groupedData(formula = lmeForm(dt), data = dt) } return(dt) ### When \code{rd} argument is a dendroclimatic chronology (see ### details) then the output is a \code{\link{groupedData}} object, ### and viceversa. } , ex=function(){ ##tree-ring widths formated as a groupedData object: data(Prings05,envir = environment()) ## Formatting the groupedData object into a ring-data frame: pwide <- shiftFrame(Prings05, from = 'mm', to = 'mmm') str(pwide) ## Formatting the ring-data frame into a groupedData object, and ## changing SI units from micrometers to milimeters: plong <- shiftFrame(pwide,from = 'mmm', to = 'mm') plot(plong) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/shiftFrame.R
slitFrame <- structure(function #Multilevel splitting ### This function splits a Multilevel data frame into factor levels. ( rd, ##<<\code{data.frame} object with factor-level columns. lv = cClass(rd,'factor') ##<< \code{Numeric} or ##\code{character}. Position number in ##the factor-level columns of \code{rd}, ##or correspondant column name to split ##the data. If the spliting column is not ##a factor, the character name of the ##column should be used. ) { rd11 <- rd[lv] if(is.null(lv)){ rd1 <- Filter(is.factor, rd) rd11 <- rd1[1:length(rd1)] } lrd <- split(rd, rd11, drop = TRUE) spn <- data.frame( do.call(rbind, strsplit(names(lrd), '\\.'))) nmr <- apply(spn[rev(names(spn))], 1, paste, collapse = '.') options(warn=-1) anm <- all(is.na(as.numeric(as.character(nmr)))) options(warn=0) if(anm) names(lrd) <- nmr lrd <- lrd[order(names(lrd))] return(lrd) ### \code{list} of \code{data.frame} objects. }, ex=function() { ##Ring data frame: ##Multilevel data frame of tree-ring widths: data(Prings05, envir = environment()) data(PTclim05, envir = environment()) ## split multilevel data into its second factor-level column: spl <- slitFrame(Prings05) str(spl) ## split the data into the factor-level: 'year': spl <- slitFrame(Prings05,'year') str(spl) spl <- slitFrame(PTclim05,'year') str(spl) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/slitFrame.R
summary.modelFrame <- structure(function #summarize a modelFrame object ### A summary of a \code{\link{modelFrame}} object is obtained. ( object, ##<< an object inheriting from class ##\code{\link{modelFrame}}. ... ##<< additional optional arguments passed to ##\code{\link{summary.lme}} method. ) { summary(object$'model', ...) ## A summary model. } , ex=function() { ##TRW chronology (mm) and inside-bark radii data(Pchron,envir = environment()) ## Parameters of allometric model to compute Diameter at Breast ## Height over bark (DBH, cm) from diameter inside bark (dib, cm) ## and Total Tree Biomass (TTB, kg tree -1 ) from DBH (Lara ## et. al. 2013): biom_param <- c(2.87, 0.85, 0.05, 2.5) ## Modeling tree-biomass fluctuations while accounting for ## within-plot source variability (see defaults in "modelFrame" ## function): ## \donttest{ ## trwf <- modelFrame(Pchron, ## to = 'cm', ## MoreArgs = list(mp = c(2,1, biom_param)), ## log.t = TRUE, ## on.time = TRUE) ## summary(trwf) ## } })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/summary.modelFrame.R
tdForm <- structure(function#ltd formulas ### This function formulates linear time-decline formulas (ltd) from ### categorical variables in multilevel ecological data series. ##details<< the ltd formulas belong to following ##general equation: log (x) = log (csx) + f(time); ##where the relative organic growth (x) is ##explained by the cumulative organic growth (csx) ##plus a function of time f(time); with f(time) ##being either the time or a logarithmic ##transformation the time. The ltd can be ##implemented by \code{\link{modelFrame}} function ##to subtract trends in organic MEDS ##references<< Zeide B. 1993. Analysis of Growth ##Equations. For. Sci., 39: 594-616. ( rd, ##<<\code{data.frame} or \code{character} vector. Multilevel ##ecological data series or vector of ecological factors. prim.cov = FALSE, ##<<\code{logical}. Print a primary covariate ##form: \code{'~ cov'}. If FALSE then a complete ##formula: \code{'resp ~ cov | group'} is printed. on.time = TRUE, ##<< \code{logical}. If TRUE then \code{t = ##'time'} (see \code{\link{rtimes}}). If FALSE ##then \code{t = 'year'}. log.t = FALSE, ##<< \code{logical}. If TRUE then \code{f(time) = ##ln(time)}. Default FALSE produces a log-linear ##time-decline formula. lev.rm = NULL ##<< NULL or \code{character} name of the ecological ##factor(s) in the MEDS to be removed from the ##formula. ) { rs <- 'log(x)'; lx <- '~ log(csx) +'; t <- 'time' if(!on.time)t <- 'year' if(log.t)t <- paste('log(',t,')',sep = '') ftt <- paste(rs,lx,t,sep = ' ') if(!is.character(rd)){ f <- cClass(rd, 'factor') if(is.numeric(lev.rm)) lev.rm <- f[lev.rm] nf <- rev(f[!f%in%lev.rm])} if(is.character(rd)) nf <- rd[!rd%in%lev.rm] sep. <- ' | ' if(length(nf) == 0) sep. <- '' fc <- paste(nf,collapse = '/') fr <- paste(ftt,fc,sep = sep.) if(prim.cov) fr <- paste(lx,t,sep = '') return(formula (fr)) ### \code{formula} with the forms: 'resp ~ cov | group' or '~ cov'. } , ex=function(){ ## an ltd formula: lev <- c('plot','tree') tdeq <- tdForm(lev,log.t = TRUE) tdeq ## (not run) only primary covariate: tdeq1 <- tdForm(lev,prim.cov = TRUE) tdeq1 ##Multilevel data frame of tree-ring widths: data(Prings05,envir = environment()) ## removing two levels: 'plot' and 'tree' from the formula tdea2 <- tdForm(Prings05, lev.rm = c('plot','tree')) tdea2 <- tdForm(Prings05, lev.rm = 2:3) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/tdForm.R
wlai <- structure(function#Walter-Lieth aridity index ### Computing the annual aridity index from Walter-Lieth climate diagrams ##details<<Areas between temperature and ##precipitation lines when precipitation exceeds ##temperature are calculated as indicators of moist ##seasons, and areas where temperature exceeds ##precipitation are calculated as indicator of dry ##season. The aridity index is defined as the ##quotient between the areas of dry and wet ##seasons. Precipitations over 100 mm are ##scaled such that 1 degree C is equal to 5 mm. ##references<< Manrique E., ##A. Fernandez-Cancio. 2000. Extreme climatic events ##in dendroclimatic reconstructions from ##Spain. Clim. Chang., 44: 123-138. ( cd,##<< \code{data.frame}. Multilevel climatic data series of ##monthly precipitation sums (mm), and monthly average ##temperatures (degree C), with row names being monthly ##characters in \code{\link{month.abb}} or ##\code{\link{month.name}}. sqt = TRUE ##<<\code{logical}. Print the square root of the ##aridity index. If TRUE then computed aridity index ##is normalized with a square root transformation. ) { AI <- NA csn <- c(cClass(cd, 'integer'), cClass(cd, 'factor')) csn. <- length(csn)!=0 pr <- cd[,1];tm <- cd[,2] min.length <- length(pr) >= 12 if(min.length){ tm <- 2 * tm names(pr) <- 1:length(pr) names(tm) <- names(pr) fint <- function(pr,tm, twice = FALSE){ if(twice) tm <- 2 * tm #twice temp ix<-which(diff(pr>tm)!=0) pr.m<-pr[ix+1]-pr[ix] tm.m<-tm[ix+1]-tm[ix] fx <- (tm[ix] - pr[ix]) / (pr.m-tm.m) ixc <- ix + fx prc <- pr[ix] + (pr.m*(ixc-ix)) names(pr) <- names(tm) <- 1:length(pr) names(prc) <- ixc fx <- (tm[ix] - pr[ix]) / (pr.m-tm.m) ixc <- ix + fx prc <- pr[ix] + (pr.m*(ixc-ix)) names(prc) <- ixc return(prc)} ford <- function(x){ order(as.numeric(names(x)))} pr. <- ifelse(pr > 100, 80 + 0.2 * pr,pr) pr.. <- ifelse(pr > 100,100,pr) pics <- fint(rep(100,length(pr)),pr)#} prc <- fint(pr,tm) fxn <- function(x3,x4){ xn <- c(ifelse(pr > tm,x3,x4),prc) ix <- order(as.numeric(names(xn))) xn <- xn[ix] return(xn)} xn <- fxn(pr.,tm) xn <- c(xn,pics)[ford(c(xn,pics))] xn1 <- fxn(tm,pr.) xn.. <- fxn(pr..,tm) xn.. <- c(xn..,pics)[ford(c(xn..,pics))] pr. <- c(pr.,pics)[ford(c(pr.,pics))] fb <- function(x){ nx <- c(ptm,x,ptm) y <- as.numeric(names(x)) names(nx) <- c(min(y),names(x),max(y)) nxn <- as.numeric(names(nx)) - 0.5 dt <- data.frame(x = nxn, y = nx) return(dt)} surf<-function(y){ x <- y[,1]; y <- y[,2] dosarea<-sapply(2:(length(y)-1), function(i)y[i]*(x[i+1]-x[i-1])) a <- 0.5 * sum(dosarea) return(a)} ptm <- min(pr.,tm) wet <- surf(fb(xn)) - surf(fb(tm)) dry <- surf(fb(xn)) - surf(fb(pr.)) AI <- dry/wet if(sqt)AI <- sqrt(AI) AI. <- AI } if(csn.){ emnt. <- unlist(Map(function(x) all(x%in%1:12) | all(x%in%month.abb), cd)) fc <- cClass(cd[ names(emnt.)[!emnt.]], 'factor') ni <- cClass(cd, 'integer') fni <- c(ni, fc) AI <- unique(cbind(AI, cd[, fni])) } if(min.length){ attributes(AI) <- list(xn = xn, xn.. = xn.., xn1 = xn1, ptm = ptm, pr = pr, pr. = pr., r.n = rownames(cd), tm = tm, ai = AI.)} class(AI) <- c('wlai', class(AI)) return(AI) ### \code{numeric} aridity index and plot of the Walter-Lieth diagram. } , ex=function() { ##random simulation of climatic records set.seed(1) pr <- rnorm(12,1,1) tm <- rnorm(12,0,1) cld <- data.frame(pr,tm) ##labels of months from october to september rownames(cld) <- month.abb[c(10:12,1:9)] rownames(cld) <- c(10:12,1:9) ##computation of the aridity index and climate diagram AI <- wlai(cld) AI })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/wlai.R
datasets <- function(url = "https://www.bis.org/statistics/full_data_sets.htm", ...) { u <- url(url) txt <- try(readLines(u), silent = TRUE) close(u) if (inherits(txt, "try-error")) { warning("download failed with message ", sQuote(txt, FALSE)) return(invisible(NULL)) } txt <- txt[grep('href=./statistics.*full.*zip', txt)] fn <- gsub(".*(full[^/]+?zip).*", "\\1", txt) descr <- gsub("<[^>]*?>", "", txt) descr <- gsub("&nbsp;", " ", descr) descr <- gsub("(.*[)]).*", "\\1", descr) upd <- gsub("&nbsp;", " ", txt) upd <- gsub("<[^>]*?>", "", upd) upd <- gsub(paste0(".*?([0-9]+ (", paste(month.name, collapse = "|"), ") [0-9]+).*"), "\\1", upd) for (i in 1:12) upd <- sub(month.name[i], i, upd) upd <- as.Date(upd, "%d %m %Y") data.frame(filename = trimws(fn), description = trimws(descr), updated = upd) } fetch_dataset <- function(dest.dir, dataset, bis.url = "https://www.bis.org/statistics/", exdir = tempdir(), return.class = NULL, frequency = NULL, ..., header = TRUE, sep = ",", stringsAsFactors = FALSE, check.names = FALSE, na.strings = "", quote = "\"", fill = TRUE) { if (!dir.exists(dest.dir)) { create.dir <- askYesNo( paste(sQuote("dest.dir"), "does not exist. Create it?"), default = FALSE) if (!isTRUE(create.dir)) return(invisible(NULL)) dir.create(dest.dir, recursive = TRUE) } dataset <- basename(dataset) f.name <- paste0(format(Sys.Date(), "%Y%m%d_"), dataset) dataset <- paste0(bis.url, dataset) f.path <- file.path(normalizePath(dest.dir), f.name) if (!file.exists(f.path)) { dl.result <- try(download.file(dataset, f.path), silent = TRUE) if (inherits(dl.result, "try-error")) { warning("download failed with message ", sQuote(dl.result, FALSE)) return(invisible(NULL)) } } else dl.result <- 0 if (dl.result != 0L) { warning("download failed with code ", dl.result, "; see ?download.file") return(invisible(NULL)) } txt <- process_dataset(f.path, exdir = exdir, return.class = return.class, frequency = frequency, ..., header = header, sep = sep, stringsAsFactors = stringsAsFactors, check.names = check.names, na.strings = na.strings, quote = quote, fill = fill) txt } process_dataset <- function(f.path, exdir, return.class, frequency, ..., header, sep, stringsAsFactors, check.names, na.strings, quote, fill) { tmp <- unzip(f.path, exdir = exdir) on.exit(file.remove(tmp)) txt <- read.table(tmp, header = header, sep = sep, stringsAsFactors = stringsAsFactors, check.names = check.names, na.strings = na.strings, quote = quote, fill = fill, ...) if (is.null(return.class)) return(txt) if (return.class == "zoo") { if (!requireNamespace("zoo")) { warning("package ", sQuote("zoo"), " not available") return(txt) } if (grepl("full_webstats_credit_gap_dataflow_csv.zip", basename(f.path), fixed = TRUE) || grepl("full_bis_total_credit_csv.zip", basename(f.path), fixed = TRUE)) { ## FIXME grep for first date? j <- which(colnames(txt) == "Time Period") ans <- t(txt[, -seq_len(j)]) ans <- zoo::zoo(ans, zoo::as.yearqtr(rownames(ans), format = "%Y-Q%q")) attr(ans, "headers") <- t(txt[, seq_len(j)]) colnames(ans) <- colnames(attr(ans, "headers")) <- txt[["Time Period"]] } else if (grepl("full_xru_d_csv_row.zip", basename(f.path), fixed = TRUE)) { i <- which(txt[[1]] == "Time Period") ans <- txt[-seq_len(i), -1] ans <- apply(ans, 2, as.numeric) t <- as.Date(txt[-seq_len(i), 1]) ans <- zoo::zoo(ans, t) attr(ans, "headers") <- txt[seq_len(i), -1] colnames(ans) <- colnames(attr(ans, "headers")) <- txt[i, -1] } else if (grepl("full_xru_csv.zip", f.path, fixed = TRUE)) { if (is.null(frequency)) { message(sQuote("frequency"), " set to ", sQuote("annual")) frequency <- "annual" } else if (!frequency %in% c("annual", "quarterly", "monthly")) { stop(sQuote("frequency"), " must be ", sQuote("annual"), ", ", sQuote("quarterly"), " or ", sQuote("monthly")) } if (frequency == "annual") { t.re <- "^[0-9]{4}$" txt <- txt[txt$FREQ == "A", ] t <- colnames(txt)[grepl(t.re, colnames(txt))] ans <- t(txt[, grepl(t.re, colnames(txt))]) ans <- zoo::zoo(ans, t) j <- which(colnames(txt) == "Series") attr(ans, "headers") <- t(txt[, seq_len(j)]) colnames(ans) <- colnames(attr(ans, "headers")) <- txt[, j] } else if (frequency == "quarterly") { t.re <- "^[0-9]{4}-Q[0-9]$" txt <- txt[txt$FREQ == "Q", ] t <- colnames(txt)[grepl(t.re, colnames(txt))] t <- zoo::as.yearqtr(t, format = "%Y-Q%q") ans <- t(txt[, grepl(t.re, colnames(txt))]) ans <- zoo::zoo(ans, t) j <- which(colnames(txt) == "Series") attr(ans, "headers") <- t(txt[, seq_len(j)]) colnames(ans) <- colnames(attr(ans, "headers")) <- txt[, j] } else if (frequency == "monthly") { t.re <- "^[0-9]{4}-[^Q][0-9]$" txt <- txt[txt$FREQ == "M", ] t <- colnames(txt)[grepl(t.re, colnames(txt))] t <- zoo::as.yearmon(t) ans <- t(txt[, grepl(t.re, colnames(txt))]) ans <- zoo::zoo(ans, t) j <- which(colnames(txt) == "Series") attr(ans, "headers") <- t(txt[, seq_len(j)]) colnames(ans) <- colnames(attr(ans, "headers")) <- txt[, j] } } else if (grepl("full_cbpol_d_csv_row.zip", f.path, fixed = TRUE)) { i <- which(txt[, 1L] == "Time Period") ans <- txt[-seq_len(i), -1] ans <- apply(ans, 2, as.numeric)/100 ans <- zoo::zoo(ans, as.Date(txt[-seq_len(i), 1])) attr(ans, "headers") <- txt[seq_len(i), -1] colnames(ans) <- txt[i, -1] rownames(attr(ans, "headers")) <- txt[seq_len(i), 1] colnames(attr(ans, "headers")) <- txt[i, -1] } } else ans <- txt ans }
/scratch/gouwar.j/cran-all/cranData/BISdata/R/functions.R
#' @import torch #' @include tensor_ops.R #' @include kernels.R #' @include likelihood_evaluator.R #' @include result_logger.R #' @include samplers.R #' @importFrom R6 R6Class #' @title R6 class encapsulating the BKTR regression elements #' #' @description A BKTRRegressor holds all the key elements to accomplish the MCMC sampling #' algorithm (\strong{Algorithm 1} of the paper). #' #' @examplesIf torch::torch_is_installed() #' # Create a BIXI data collection instance containing multiple dataframes #' bixi_data <- BixiData$new(is_light = TRUE) # Use light version for example #' #' # Create a BKTRRegressor instance #' bktr_regressor <- BKTRRegressor$new( #' formula = nb_departure ~ 1 + mean_temp_c + area_park, #' data_df <- bixi_data$data_df, #' spatial_positions_df = bixi_data$spatial_positions_df, #' temporal_positions_df = bixi_data$temporal_positions_df, #' burn_in_iter = 5, sampling_iter = 10) # For example only (too few iterations) #' #' # Launch the MCMC sampling #' bktr_regressor$mcmc_sampling() #' #' # Get the summary of the bktr regressor #' summary(bktr_regressor) #' #' # Get estimated response variables for missing values #' bktr_regressor$imputed_y_estimates #' #' # Get the list of sampled betas for given spatial, temporal and feature labels #' bktr_regressor$get_iterations_betas( #' spatial_label = bixi_data$spatial_positions_df$location[1], #' temporal_label = bixi_data$temporal_positions_df$time[1], #' feature_label = 'mean_temp_c') #' #' # Get the summary of all betas for the 'mean_temp_c' feature #' bktr_regressor$get_beta_summary_df(feature_labels = 'mean_temp_c') #' #' @export BKTRRegressor <- R6::R6Class( classname = 'BKTRRegressor', public = list( #' @field data_df The dataframe containing all the covariates through time and space (including #' the response variable) data_df = NULL, #' @field y The response variable tensor y = NULL, #' @field omega The tensor indicating which response values are not missing omega = NULL, #' @field covariates The tensor containing all the covariates covariates = NULL, #' @field covariates_dim The dimensions of the covariates tensor covariates_dim = NULL, #' @field logged_params_tensor The tensor containing all the sampled hyperparameters logged_params_tensor = NULL, #' @field tau The precision hyperparameter tau = NULL, #' @field spatial_decomp The spatial covariate decomposition spatial_decomp = NULL, # U #' @field temporal_decomp The temporal covariate decomposition temporal_decomp = NULL, # V #' @field covs_decomp The feature covariate decomposition covs_decomp = NULL, # C or W # Result Logger #' @field result_logger The result logger instance used to store the results of the MCMC sampling result_logger = NULL, #' @field has_completed_sampling Boolean showing wheter the MCMC sampling has been completed has_completed_sampling = FALSE, # Kernels #' @field spatial_kernel The spatial kernel used spatial_kernel = NULL, #' @field temporal_kernel The temporal kernel used temporal_kernel = NULL, #' @field spatial_positions_df The dataframe containing the spatial positions spatial_positions_df = NULL, #' @field temporal_positions_df The dataframe containing the temporal positions temporal_positions_df = NULL, # Samplers #' @field spatial_params_sampler The spatial kernel hyperparameter sampler spatial_params_sampler = NULL, #' @field temporal_params_sampler The temporal kernel hyperparameter sampler temporal_params_sampler = NULL, #' @field tau_sampler The tau hyperparameter sampler tau_sampler = NULL, #' @field precision_matrix_sampler The precision matrix sampler precision_matrix_sampler = NULL, # Likelihood evaluators #' @field spatial_ll_evaluator The spatial likelihood evaluator spatial_ll_evaluator = NULL, #' @field temporal_ll_evaluator The temporal likelihood evaluator temporal_ll_evaluator = NULL, # Params #' @field rank_decomp The rank of the CP decomposition rank_decomp = NULL, #' @field burn_in_iter The number of burn in iterations burn_in_iter = NULL, #' @field sampling_iter The number of sampling iterations sampling_iter = NULL, #' @field max_iter The total number of iterations max_iter = NULL, #' @field a_0 The initial value for the shape in the gamma function generating tau a_0 = NULL, #' @field b_0 The initial value for the rate in the gamma function generating tau b_0 = NULL, #' @field formula The formula used to specify the relation between the response variable and the covariates formula = NULL, #' @field spatial_labels The spatial labels spatial_labels = NULL, #' @field temporal_labels The temporal labels temporal_labels = NULL, #' @field feature_labels The feature labels feature_labels = NULL, #' @field geo_coords_projector The geographic coordinates projector geo_coords_projector = NULL, #' @description Create a new \code{BKTRRegressor} object. #' @param data_df data.table: A dataframe containing all the covariates #' through time and space. It is important that the dataframe has a two #' indexes named `location` and `time` respectively. The dataframe should #' also contain every possible combinations of `location` and `time` #' (i.e. even missing rows should be filled present but filled with NaN). #' So if the dataframe has 10 locations and 5 time points, it should have #' 50 rows (10 x 5). If formula is None, the dataframe should contain #' the response variable `Y` as the first column. Note that the covariate #' columns cannot contain NaN values, but the response variable can. #' @param formula A Wilkinson R formula to specify the relation #' between the response variable `Y` and the covariates. If Null, the first #' column of the data frame will be used as the response variable and all the #' other columns will be used as the covariates. Defaults to Null. #' @param rank_decomp Integer: Rank of the CP decomposition (Paper -- \eqn{R}). Defaults to 10. #' @param burn_in_iter Integer: Number of iteration before sampling (Paper -- \eqn{K_1}). Defaults to 500. #' @param sampling_iter Integer: Number of sampling iterations (Paper -- \eqn{K_2}). Defaults to 500. #' @param spatial_positions_df data.table: Spatial kernel input tensor used #' to calculate covariates' distance. Vector of length equal to the number of location points. #' @param temporal_positions_df data.table: Temporal kernel input tensor used to #' calculate covariate distance. Vector of length equal to the number of time points. #' @param spatial_kernel Kernel: Spatial kernel Used. Defaults to #' a KernelMatern(smoothness_factor=3). #' @param temporal_kernel Kernel: Temporal kernel used. Defaults to KernelSE(). #' @param sigma_r Numeric: Variance of the white noise process (\eqn{\tau^{-1}}) #' defaults to 1E-2. #' @param a_0 Numeric: Initial value for the shape (\eqn{\alpha}) in the gamma function #' generating tau defaults to 1E-6. #' @param b_0 Numeric: Initial value for the rate (\eqn{\beta}) in the gamma function #' generating tau defaults to 1E-6. #' @param has_geo_coords Boolean: Whether the spatial positions df use geographic coordinates #' (latitude, longitude). Defaults to TRUE. #' @param geo_coords_scale Numeric: Scale factor to convert geographic coordinates to euclidean #' 2D space via Mercator projection using x & y domains of [-scale/2, +scale/2]. Only used if #' has_geo_coords is TRUE. Defaults to 10. #' @return A new \code{BKTRRegressor} object. initialize = function( data_df, spatial_positions_df, temporal_positions_df, rank_decomp = 10, burn_in_iter = 500, sampling_iter = 500, formula = NULL, spatial_kernel = KernelMatern$new(smoothness_factor = 3), temporal_kernel = KernelSE$new(), sigma_r = 1E-2, a_0 = 1E-6, b_0 = 1E-6, has_geo_coords = TRUE, geo_coords_scale = 10 ) { self$has_completed_sampling <- FALSE private$verify_input_labels(data_df, spatial_positions_df, temporal_positions_df) # We don't need to sort since keys in data.table already do so self$data_df <- data_df self$temporal_positions_df <- temporal_positions_df if (has_geo_coords) { self$geo_coords_projector <- GeoMercatorProjector$new(spatial_positions_df, geo_coords_scale) self$spatial_positions_df <- self$geo_coords_projector$scaled_ini_df } else { self$spatial_positions_df <- spatial_positions_df } # Set formula and get model's matrix xy_df_list <- private$get_x_and_y_dfs_from_formula(self$data_df[, -c('location', 'time')], formula) y_df <- xy_df_list$y_df x_df <- xy_df_list$x_df # Set labels self$spatial_labels <- self$spatial_positions_df$location self$temporal_labels <- self$temporal_positions_df$time self$feature_labels <- colnames(x_df) # Tensor Assignation y_matrix <- matrix(y_df[[1]], ncol = length(self$temporal_labels), byrow = TRUE) # Omega is 1 if y is not NA, 0 otherwise self$omega <- TSR$tensor(ifelse(is.na(y_matrix), 0.0, 1.0)) # Y should replace all NA values by 0 y_matrix[is.na(y_matrix)] <- 0.0 self$y <- TSR$tensor(y_matrix) covariates <- TSR$tensor(as.matrix(x_df)) self$tau <- 1 / TSR$tensor(sigma_r) # Params Assignation self$rank_decomp <- rank_decomp self$burn_in_iter <- burn_in_iter self$sampling_iter <- sampling_iter self$max_iter <- burn_in_iter + sampling_iter self$a_0 <- a_0 self$b_0 <- b_0 # Reshape Covariates private$reshape_covariates(covariates, length(self$spatial_labels), length(self$temporal_labels)) #Kernel Assignation self$spatial_kernel <- spatial_kernel self$temporal_kernel <- temporal_kernel self$spatial_kernel$set_positions(self$spatial_positions_df) self$temporal_kernel$set_positions(self$temporal_positions_df) # Create First Kernels self$spatial_kernel$kernel_gen() self$temporal_kernel$kernel_gen() }, #' @description Launch the MCMC sampling process. \cr #' For a predefined number of iterations: #' \enumerate{ #' \item{Sample spatial kernel hyperparameters} #' \item{Sample temporal kernel hyperparameters} #' \item{Sample the precision matrix from a wishart distribution} #' \item{Sample a new spatial covariate decomposition} #' \item{Sample a new feature covariate decomposition} #' \item{Sample a new temporal covariate decomposition} #' \item{Calculate respective errors for the iterations} #' \item{Sample a new tau value} #' \item{Collect all the important data for the iteration} #' } #' @return NULL Results are stored and can be accessed via summary() mcmc_sampling = function() { private$initialize_params() for (i in 1:self$max_iter) { private$sample_kernel_hparam() private$sample_precision_wish() private$sample_spatial_decomp() private$sample_covariate_decomp() private$sample_temporal_decomp() private$set_errors_and_sample_precision_tau(i) private$collect_iter_values(i) } private$log_final_iter_results() }, #' @description Use interpolation to predict betas and response values for new data. #' @param new_data_df data.table: New covariates. Must have the same columns as #' the covariates used to fit the model. The index should contain the combination #' of all old spatial coordinates with all new temporal coordinates, the combination #' of all new spatial coordinates with all old temporal coordinates, and the #' combination of all new spatial coordinates with all new temporal coordinates. #' @param new_spatial_positions_df data.table or NULL: A data frame containing the new #' spatial positions. Defaults to NULL. #' @param new_temporal_positions_df data.table or NULL: A data frame containing the new #' temporal positions. Defaults to NULL. #' @param jitter Numeric or NULL: A small value to add to the diagonal of the precision matrix. #' Defaults to NULL. #' #' @examplesIf torch::torch_is_installed() #' ## PREDICTION EXAMPLE ## #' # Create a light version of the BIXI data collection instance #' bixi_data <- BixiData$new(is_light = TRUE) #' # Simplify variable names #' data_df <- bixi_data$data_df #' spa_pos_df <- bixi_data$spatial_positions_df #' temp_pos_df <- bixi_data$temporal_positions_df #' #' # Keep some data aside for prediction #' new_spa_pos_df <- spa_pos_df[1:2, ] #' new_temp_pos_df <- temp_pos_df[1:5, ] #' reg_spa_pos_df <- spa_pos_df[-(1:2), ] #' reg_temp_pos_df <- temp_pos_df[-(1:5), ] #' reg_data_df_mask <- data_df$location %in% reg_spa_pos_df$location & #' data_df$time %in% reg_temp_pos_df$time #' reg_data_df <- data_df[reg_data_df_mask, ] #' new_data_df <- data_df[!reg_data_df_mask, ] #' #' # Launch mcmc sampling on regression data #' bktr_regressor <- BKTRRegressor$new( #' formula = nb_departure ~ 1 + mean_temp_c + area_park, #' data_df = reg_data_df, #' spatial_positions_df = reg_spa_pos_df, #' temporal_positions_df = reg_temp_pos_df, #' burn_in_iter = 5, sampling_iter = 10) # For example only (too few iterations) #' bktr_regressor$mcmc_sampling() #' #' # Predict response values for new data #' bktr_regressor$predict( #' new_data_df = new_data_df, #' new_spatial_positions_df = new_spa_pos_df, #' new_temporal_positions_df = new_temp_pos_df) #' #' @return List: A list of two dataframes. The first represents the beta #' forecasted for all new spatial locations or temporal points. #' The second represents the forecasted response for all new spatial #' locations or temporal points. predict = function( new_data_df, new_spatial_positions_df = NULL, new_temporal_positions_df = NULL, jitter = 1e-5 ) { # private$pred_valid_and_sort_data( # new_data_df, new_spatial_positions_df, new_temporal_positions_df # ) ini_fp_type <- TSR$fp_type TSR$set_params(fp_type = 'float64') if (!is.null(new_spatial_positions_df) && !is.null(self$geo_coords_projector)) { new_spatial_positions_df <- self$geo_coords_projector$project_new_coords(new_spatial_positions_df) } spatial_positions_df <- ( if (!is.null(new_spatial_positions_df)) rbind(self$spatial_positions_df, new_spatial_positions_df) else self$spatial_positions_df ) temporal_positions_df <- ( if (!is.null(new_temporal_positions_df)) rbind(self$temporal_positions_df, new_temporal_positions_df) else self$temporal_positions_df ) data_df <- rbind(self$data_df, new_data_df) spa_order_df <- data.table( location = spatial_positions_df$location, spa_order = seq_len(nrow(spatial_positions_df)) ) temp_order_df <- data.table( time = temporal_positions_df$time, temp_order = seq_len(nrow(temporal_positions_df)) ) data_df <- data_df[spa_order_df, on = 'location'][temp_order_df, on = 'time'] setorder(data_df, spa_order, temp_order) data_df[, c('spa_order', 'temp_order') := list(NULL, NULL)] # private$verify_input_labels( # data_df, # spatial_positions_df, # temporal_positions_df # ) all_betas <- TSR$zeros( c( nrow(spatial_positions_df), nrow(temporal_positions_df), length(self$feature_labels), self$sampling_iter ) ) for (i in seq_len(self$sampling_iter)) { new_spa_decomp <- private$pred_simu_new_decomp( 'spatial', i, spatial_positions_df, new_spatial_positions_df, jitter ) new_temp_decomp <- private$pred_simu_new_decomp( 'temporal', i, temporal_positions_df, new_temporal_positions_df, jitter ) covs_decomp <- TSR$tensor(self$result_logger$covs_decomp_per_iter[, , i]) all_betas[, , , i] <- torch::torch_einsum( 'il,jl,kl->ijk', c(new_spa_decomp, new_temp_decomp, covs_decomp) ) } new_betas <- all_betas$mean(dim = -1) x_df <- private$get_x_and_y_dfs_from_formula(data_df[, -c('location', 'time')], self$formula)$x_df covariates <- TSR$tensor(as.matrix(x_df))$reshape( c(nrow(spatial_positions_df), nrow(temporal_positions_df), -1) ) new_y_est <- torch::torch_einsum('ijk,ijk->ij', c(new_betas, covariates)) new_index_df <- data_df[, c('location', 'time')] new_beta_df <- cbind( new_index_df, data.table(as.matrix(new_betas$flatten(start_dim = 1, end_dim = 2)$cpu())) ) setnames(new_beta_df, c('location', 'time', self$feature_labels)) new_y_df <- cbind(new_index_df, data.table(as.matrix(new_y_est$flatten()$cpu(), byrow = TRUE))) setnames(new_y_df, c('location', 'time', 'y_est')) new_locs <- unique(new_spatial_positions_df$location) new_times <- unique(new_temporal_positions_df$time) new_beta_df <- new_beta_df[, new_beta_df[new_beta_df[, .I[location %in% new_locs | time %in% new_times]], ]] new_y_df <- new_y_df[, new_y_df[new_y_df[, .I[location %in% new_locs | time %in% new_times]], ]] TSR$set_params(fp_type = ini_fp_type) return(list(new_y_df = new_y_df, new_beta_df = new_beta_df)) }, #' @description Return all sampled betas through sampling iterations for a given #' set of spatial, temporal and feature labels. Useful for plotting the #' distribution of sampled beta values. #' @param spatial_label String: The spatial label for which we want to get the betas #' @param temporal_label String: The temporal label for which we want to get the betas #' @param feature_label String: The feature label for which we want to get the betas #' @return A list containing the sampled betas through iteration for the given labels get_iterations_betas = function(spatial_label, temporal_label, feature_label) { if (!self$has_completed_sampling) { stop('Beta values can only be accessed after MCMC sampling.') } beta_per_iter_tensor <- self$result_logger$get_iteration_betas_tensor( c(spatial_label), c(temporal_label), c(feature_label) )[1] return(as.array(beta_per_iter_tensor)) }, #' @description Get a summary of estimated beta values. If no labels are given, #' then the summary is for all the betas. If labels are given, then the summary #' is for the given labels. #' @param spatial_labels vector: The spatial labels used in summary. If NULL, #' then all spatial labels are used. Defaults to NULL. #' @param temporal_labels vector: The temporal labels used in summary. If NULL, #' then all temporal labels are used. Defaults to NULL. #' @param feature_labels vector: The feature labels used in summary. If NULL, #' then all feature labels are used. Defaults to NULL. #' @return A new data.table with the beta summary for the given labels. get_beta_summary_df = function( spatial_labels = NULL, temporal_labels = NULL, feature_labels = NULL ) { if (!self$has_completed_sampling) { stop('Beta values can only be accessed after MCMC sampling.') } return(self$result_logger$get_beta_summary_df(spatial_labels, temporal_labels, feature_labels)) } ), private = list( #~ @description Verify if kernel inputs are valid and align with covariates labels. verify_kernel_labels = function( kernel_positions, expected_labels, kernel_type ) { cov_related_indx_name <- ifelse(kernel_type == 'spatial', 'location', 'time') if (key(kernel_positions) != cov_related_indx_name) { stop(sprintf( '`%s_positions_df` must have a `%s` key.', kernel_type, cov_related_indx_name )) } if (!identical(expected_labels, unique(kernel_positions[[cov_related_indx_name]]))) { stop(paste0( '`', kernel_type, '_positions_df` must contain in its ', cov_related_indx_name, ' index the unique values located in `data_df` ', cov_related_indx_name, ' index.' )) } }, #~ @description Verify validity of BKTR dataframe input labels verify_input_labels = function( data_df, spatial_positions_df, temporal_positions_df ) { if (!identical(key(data_df), c('location', 'time'))) { stop(paste( 'The data_df dataframe must have a multi index on location and time.', 'Set the keys on the table with `setkey(data_df, location, time)``.' )) } loc_set <- unique(data_df$location) time_set <- unique(data_df$time) product_set <- CJ(location = loc_set, time = time_set) data_df_index_set <- unique(data_df[, c('location', 'time')]) if (!identical(data_df_index_set, product_set)) { stop(paste( 'The data_df dataframe must have a row for every possible combination of location and time.', 'Even if response values are missing (NaN).' )) } private$verify_kernel_labels(spatial_positions_df, loc_set, 'spatial') private$verify_kernel_labels(temporal_positions_df, time_set, 'temporal') }, #~ @description Use formula to get x and y dataframes. #~ @param data_df data.table: The initial dataframe used to obtain the x and y dataframes. #~ @param formula: Formula to give the y and X dataframes matrix. If formula is #~ None, use the first column as y and all other columns as covariates. #~ @return A list containing the y and x dataframes. get_x_and_y_dfs_from_formula = function(data_df, formula = NULL) { if (is.null(formula)) { formula <- paste0(colnames(data_df)[1], ' ~ .') } self$formula <- as.formula(formula) formula_y_name <- as.character(self$formula[[2]]) if (length(formula_y_name) != 1) { stop(paste( 'The formula provided to the regressor is not valid.', 'It must contain one and only one response variable.' )) } mf <- model.frame(self$formula, data = data_df, na.action=na.pass) x_df <- data.table(model.matrix(self$formula, mf)) y_df <- data.table(model.response(mf)) x_colnames <- colnames(x_df) if ('(Intercept)' %in% x_colnames) { colnames(x_df)[x_colnames == '(Intercept)'] <- 'Intercept' } setnames(y_df, formula_y_name) return(list(y_df=y_df, x_df=x_df)) }, #~ @description Reshape the covariate tensors into one single tensor and set this #~ tensor into the \code{covariates} property of the BKTRRegressor object #~ @param temporal_covariate_tensor tensor: Temporal Covariates #~ @param spatial_covariate_tensor tensor: Spatial Covariates reshape_covariates = function(covariate_tensor, nb_locations, nb_times) { nb_covariates <- covariate_tensor$shape[2] self$covariates_dim <- list( nb_spaces = nb_locations, # S nb_times = nb_times, # T nb_covariates = nb_covariates # P ) self$covariates <- covariate_tensor$reshape(c(nb_locations, nb_times, nb_covariates)) }, #~ @description Initialize the CP decomposed covariate tensors #~ using normally distributed random values init_covariate_decomp = function() { rank_decomp <- self$rank_decomp covs_dim <- self$covariates_dim self$spatial_decomp <- TSR$randn(c(covs_dim$nb_spaces, rank_decomp)) self$temporal_decomp <- TSR$randn(c(covs_dim$nb_times, rank_decomp)) self$covs_decomp <- TSR$randn(c(covs_dim$nb_covariates, rank_decomp)) }, create_result_logger = function() { self$result_logger <- ResultLogger$new( y = self$y, omega = self$omega, covariates = self$covariates, nb_burn_in_iter = self$burn_in_iter, nb_sampling_iter = self$sampling_iter, rank_decomp = self$rank_decomp, formula = self$formula, spatial_labels = self$spatial_labels, temporal_labels = self$temporal_labels, feature_labels = self$feature_labels, spatial_kernel = self$spatial_kernel, temporal_kernel = self$temporal_kernel ) }, #~ @description Create and set the evaluators for the spatial and the temporal likelihoods create_likelihood_evaluators = function() { self$spatial_ll_evaluator <- MarginalLikelihoodEvaluator$new( self$rank_decomp, self$covariates_dim$nb_covariates, self$covariates, self$omega, self$y, is_transposed = FALSE ) self$temporal_ll_evaluator <- MarginalLikelihoodEvaluator$new( self$rank_decomp, self$covariates_dim$nb_covariates, self$covariates, self$omega, self$y, is_transposed = TRUE ) }, #~ @description Create and set the hyperparameter samplers #~ for spatial and temporal kernels, tau and the precision matrix create_hparam_samplers = function() { self$spatial_params_sampler <- KernelParamSampler$new( kernel = self$spatial_kernel, marginal_ll_eval_fn = private$calc_spatial_marginal_ll ) self$temporal_params_sampler <- KernelParamSampler$new( kernel = self$temporal_kernel, marginal_ll_eval_fn = private$calc_temporal_marginal_ll ) self$tau_sampler <- TauSampler$new(self$a_0, self$b_0, self$omega$sum()) self$precision_matrix_sampler <- PrecisionMatrixSampler$new( self$covariates_dim$nb_covariates, self$rank_decomp ) }, #~ @description Calculate the spatial marginal likelihood calc_spatial_marginal_ll = function() { return(self$spatial_ll_evaluator$calc_likelihood( self$spatial_kernel$covariance_matrix, self$temporal_decomp, self$covs_decomp, self$tau )) }, #~ @description Calculate the temporal marginal likelihood calc_temporal_marginal_ll = function() { return(self$temporal_ll_evaluator$calc_likelihood( self$temporal_kernel$covariance_matrix, self$spatial_decomp, self$covs_decomp, self$tau )) }, #~ @description Sample new kernel hyperparameters sample_kernel_hparam = function() { self$spatial_params_sampler$sample() self$temporal_params_sampler$sample() }, #~ @description Sample the precision matrix from a Wishart distribution sample_precision_wish = function() { self$precision_matrix_sampler$sample(self$covs_decomp) }, #~ @description Sample a new covariate decomposition from a mulivariate normal distribution #~ @param initial_decomp tensor: Decomposition of the previous iteration #~ @param chol_l tensor: The cholesky decomposition of the l tensor #~ @param uu tensor: uu decomposition #~ @return A tensor containing the newly sampled covariate decomposition sample_decomp_norm = function(initial_decomp, chol_l, uu) { precision_mat <- chol_l$t() mean_vec <- self$tau * torch::torch_triangular_solve( uu$unsqueeze(2), precision_mat, upper = TRUE )[[1]]$squeeze() return( sample_norm_multivariate(mean_vec, precision_mat)$reshape_as(initial_decomp$t())$t() ) }, #~ @description Sample a new spatial covariate decomposition sample_spatial_decomp = function() { ll_eval <- self$spatial_ll_evaluator self$spatial_decomp <- private$sample_decomp_norm( self$spatial_decomp, ll_eval$chol_lu, ll_eval$uu ) }, #~ @description Sample a new covariate decomposition sample_covariate_decomp = function() { chol_res <- get_cov_decomp_chol( self$spatial_decomp, self$temporal_decomp, self$covariates, self$rank_decomp, self$omega, self$tau, self$y, self$precision_matrix_sampler$wish_precision_tensor ) self$covs_decomp <- private$sample_decomp_norm( self$covs_decomp, chol_res$chol_lc, chol_res$cc ) }, #~ @description Sample a new temporal covariate decomposition sample_temporal_decomp = function() { # Need to recalculate uu and chol_u since covariate decomp changed private$calc_temporal_marginal_ll() ll_eval <- self$temporal_ll_evaluator self$temporal_decomp <- private$sample_decomp_norm( self$temporal_decomp, ll_eval$chol_lu, ll_eval$uu ) }, #~ @description Set BKTR error values (MAE, RMSE, Total Sq. Error) and sample a new tau set_errors_and_sample_precision_tau = function(iter) { self$result_logger$set_y_and_beta_estimates(self$decomposition_tensors, iter) error_metrics <- self$result_logger$set_error_metrics() self$tau <- self$tau_sampler$sample(self$result_logger$total_sq_error) }, #~ @description Collect all necessary iteration values collect_iter_values = function(iter) { self$result_logger$collect_iter_samples(iter, as.numeric(self$tau$cpu())) }, #~ @description Log final iteration results in via the result logger log_final_iter_results = function() { self$result_logger$log_final_iter_results() self$has_completed_sampling <- TRUE }, #~ @description Initialize all parameters that are needed before we start the MCMC sampling initialize_params = function() { private$init_covariate_decomp() private$create_result_logger() private$create_likelihood_evaluators() private$create_hparam_samplers() # Calculate first likelihoods private$calc_spatial_marginal_ll() private$calc_temporal_marginal_ll() }, pred_simu_new_decomp = function( pred_type, iter_no, position_df, new_position_df, jitter ) { old_decomp <- ( if (pred_type == 'spatial') self$result_logger$spatial_decomp_per_iter[, , iter_no] else self$result_logger$temporal_decomp_per_iter[, , iter_no] ) old_decomp <- TSR$tensor(old_decomp) if (is.null(new_position_df)) { return(old_decomp) } nb_pos <- nrow(position_df) nb_new_pos <- nrow(new_position_df) nb_old_pos <- nb_pos - nb_new_pos old_kernel <- if (pred_type == 'spatial') self$spatial_kernel else self$temporal_kernel new_kernel <- old_kernel for (param in new_kernel$parameters){ if (!param$is_fixed) { param_full_repr <- paste(capitalize_str(pred_type), param$full_name, sep = ' - ') param$value <- as.numeric( self$result_logger$hyperparameters_per_iter_df[iter_no, ..param_full_repr] ) } } new_kernel$set_positions(position_df) cov_mat <- new_kernel$kernel_gen() old_cov <- cov_mat[1:nb_old_pos, 1:nb_old_pos] new_old_cov <- cov_mat[-nb_new_pos:nb_pos, 1:nb_old_pos] old_new_cov <- cov_mat[1:nb_old_pos, -nb_new_pos:nb_pos] new_cov <- cov_mat[-nb_new_pos:nb_pos, -nb_new_pos:nb_pos] new_decomp_mus <- new_old_cov$matmul(old_cov$inverse())$matmul(old_decomp) new_decomp_cov <- new_cov - new_old_cov$matmul(old_cov$inverse())$matmul(old_new_cov) new_decomp_cov <- (new_decomp_cov + new_decomp_cov$t()) / 2 if (!is.null(jitter)) { new_decomp_cov <- new_decomp_cov + jitter * TSR$eye(new_decomp_cov$shape[1]) } new_decomp <- ( torch::distr_multivariate_normal(new_decomp_mus$t(), new_decomp_cov)$sample()$t() ) return(torch::torch_cat(c(old_decomp, new_decomp), dim = 1)) } ), active = list( #' @field summary A summary of the BKTRRegressor instance summary = function() { if (!self$has_completed_sampling) { stop('Summary can only be accessed after running the MCMC sampling.') } return(self$result_logger$summary()) }, #' @field beta_covariates_summary A dataframe containing the summary of the beta covariates beta_covariates_summary = function() { if (!self$has_completed_sampling) { stop('Beta covariates summary can only be accessed after running the MCMC sampling.') } return(self$result_logger$beta_covariates_summary_df) }, #' @field y_estimates A dataframe containing the y estimates y_estimates = function() { if (!self$has_completed_sampling) { stop('Y estimates can only be accessed after running the MCMC sampling.') } y_est <- bktr_regressor$result_logger$y_estimates_df y_est[as.array(bktr_regressor$omega$flatten()$cpu()) == 0, 3] <- NaN return(y_est) }, #' @field imputed_y_estimates A dataframe containing the imputed y estimates imputed_y_estimates = function() { if (!self$has_completed_sampling) { stop('Imputed Y estimates can only be accessed after running the MCMC sampling.') } return(self$result_logger$y_estimates_df) }, #' @field beta_estimates A dataframe containing the beta estimates beta_estimates = function() { if (!self$has_completed_sampling) { stop('Beta estimates can only be accessed after running the MCMC sampling.') } return(self$result_logger$beta_estimates_df) }, #' @field hyperparameters_per_iter_df A dataframe containing the beta estimates per iteration hyperparameters_per_iter_df = function() { if (!self$has_completed_sampling) { stop('Hyperparameters trace can only be accessed after running the MCMC sampling.') } return(self$result_logger$hyperparameters_per_iter_df) }, #' @field decomposition_tensors List of all used decomposition tensors decomposition_tensors = function() { return( list( spatial_decomp = self$spatial_decomp, temporal_decomp = self$temporal_decomp, covs_decomp = self$covs_decomp ) ) } ) ) #' @title Summarize a BKTRRegressor instance #' @param object A BKTRRegressor instance #' @param ... Additional arguments to comply with generic function #' @export summary.BKTRRegressor <- function(object, ...) { cat(object$summary) } #' @title Print the summary of a BKTRRegressor instance #' @param x A BKTRRegressor instance #' @param ... Additional arguments to comply with generic function #' @export print.BKTRRegressor <- function(x, ...) { cat(x$summary) }
/scratch/gouwar.j/cran-all/cranData/BKTR/R/bktr.R
#' @title Distance tensor checks #' #' @description Check that two tensors are valid for distance computation #' #' @noRd check_dist_tensor_dimensions <- function(x1, x2, expected_nb_dim = 2, expected_last_dim_shape = NULL) { if (!(TSR$is_tensor(x1) && TSR$is_tensor(x2))) { stop('Distance params must be tensors') } if (!(x1$ndim == x2$ndim && x2$ndim == expected_nb_dim)) { stop(sprintf('Distance params should have %s dimension(s)', expected_nb_dim)) } if ( !is.null(expected_last_dim_shape) && !( expected_last_dim_shape == x1$shape[-1] && expected_last_dim_shape == x2$shape[-1] ) ) { stop( sprintf('Distance params last dimension should contain %s elements', expected_last_dim_shape) ) } } #' @title Function to compute a tensor's euclidean distance #' #' @description Function to compute the euclidean distance between a tensor and its transpose. #' #' @noRd get_euclidean_dist_tsr <- function(x) { check_dist_tensor_dimensions(x, x) x1 <- x$unsqueeze(1) x2 <- x$unsqueeze(1)$transpose(1, 2) return((x1 - x2)$pow(2)$sum(3)$sqrt()) } #' @title Class to project coordinates with mercator projection on a 2D plane #' #' @description Project coordinates with mercator projection on a 2D plane for #' a given scale. Keep track of the scale and the center of the projection to #' be able to project new coordinates which is useful during interpolation. #' #' @noRd GeoMercatorProjector <- R6::R6Class( 'GeoMercatorProjector', public = list( ini_df = NULL, x_mid_point = NULL, y_mid_point = NULL, coords_scale = NULL, scale = NULL, scaled_ini_df = NULL, EARTH_RADIUM_KM = 6371, initialize = function(df, scale = 10.0) { self$ini_df = df km_df = private$km_from_coords_df(df) lon_x <- km_df$lon_x lat_y <- km_df$lat_y x_max <- max(lon_x) x_min <- min(lon_x) y_max <- max(lat_y) y_min <- min(lat_y) self$x_mid_point <- (x_min + x_max) / 2 self$y_mid_point <- (y_min + y_max) / 2 self$coords_scale = max(x_max - x_min, y_max - y_min) self$scale = scale self$scaled_ini_df = private$scale_and_center_df(km_df) }, project_new_coords = function(df) { km_df <- private$km_from_coords_df(df) return(private$scale_and_center_df(km_df)) } ), private = list( scale_and_center_df = function(df) { new_df <- df scaling_factor <- self$scale / self$coords_scale new_df$lon_x <- (new_df$lon_x - self$x_mid_point) * scaling_factor new_df$lat_y <- (new_df$lat_y - self$y_mid_point) * scaling_factor return(new_df) }, km_from_coords_df = function(df) { if (!('latitude' %in% colnames(df) && 'longitude' %in% colnames(df))) { stop('Dataframe must have columns "latitude" and "longitude"') } new_df <- df lons <- TSR$tensor(df$longitude) lats <- TSR$tensor(df$latitude) x <- (self$EARTH_RADIUM_KM / (2 * pi)) * torch_deg2rad(lons) merc_n_y <- torch_log( torch_tan(pi / 4 + torch_deg2rad(lats) / 2) ) y <- (self$EARTH_RADIUM_KM / (2 * pi)) * merc_n_y new_df$lon_x <- as.numeric(x$cpu()) new_df$lat_y <- as.numeric(y$cpu()) new_df <- new_df[, -c('latitude', 'longitude')] return(new_df) } ) )
/scratch/gouwar.j/cran-all/cranData/BKTR/R/distances.R
#' @importFrom R6 R6Class #' @import data.table # Private function to normalize a vector to [0, 1] normalize_0_1 <- function(x) { return((x - min(x)) / (max(x) - min(x))) } #' @title BIXI Data Class #' #' @description R6 class encapsulating all BIXI dataframes. It is also #' possible to use a light version of the dataset by using the \code{is_light} #' parameter. In this case, the dataset is reduced to its first 25 stations #' and first 50 days. The light version is only used for testing and short examples. #' #' @examples #' # Create a light BIXI data collection instance containing multiple dataframes #' # This only uses the first 25 stations and 50 days of the full dataset #' bixi_data <- BixiData$new(is_light = TRUE) #' # Dataframe containing the position (latitude and longitude) of M stations #' bixi_data$spatial_positions_df #' # Dataframe containing the time position of N days (O to N-1) #' bixi_data$temporal_positions_df #' # Dataframe with spatial and temporal features for each day and station (M x N rows) #' bixi_data$data_df #' #' @export BixiData <- R6::R6Class( 'BixiData', public = list( #' @field departure_df The departure dataframe departure_df = NULL, #' @field spatial_features_df The spatial features dataframe spatial_features_df = NULL, #' @field temporal_features_df The temporal features dataframe temporal_features_df = NULL, #' @field spatial_positions_df The spatial positions dataframe spatial_positions_df = NULL, #' @field temporal_positions_df The temporal positions dataframe temporal_positions_df = NULL, #' @field data_df The data dataframe data_df = NULL, #' @field is_light Whether the light version of the dataset is used is_light = FALSE, #' @description Initialize the BIXI data class #' #' @param is_light Whether the light version of the dataset is used, #' defaults to FALSE. #' #' @return A new BIXI data instance initialize = function(is_light = FALSE) { self$is_light <- is_light # Normalize departure counts to [0, 1] self$departure_df <- BKTR::bixi_station_departures max_val <- max(self$departure_df[, !c('location')], na.rm = TRUE) cols <- colnames(self$departure_df[, !c('location')]) self$departure_df[, (cols) := .SD / max_val, .SDcols = cols] # Normalize spatial features column wise to [0, 1] self$spatial_features_df <- BKTR::bixi_spatial_features cols <- colnames(self$spatial_features_df[, !c('location')]) self$spatial_features_df[, (cols) := lapply(.SD, normalize_0_1), .SDcols = cols] # Normalize temporal features column wise to [0, 1] self$temporal_features_df <- BKTR::bixi_temporal_features cols <- colnames(self$temporal_features_df[, !c('time')]) self$temporal_features_df[, (cols) := lapply(.SD, normalize_0_1), .SDcols = cols] self$spatial_positions_df <- BKTR::bixi_spatial_locations self$temporal_positions_df <- BKTR::bixi_temporal_locations # Reduce the dataset to its first 25 stations and first 50 days when used for examples if (is_light) { self$spatial_positions_df <- self$spatial_positions_df[1:25, ] self$temporal_positions_df <- self$temporal_positions_df[1:50, ] kept_locs <- self$spatial_positions_df$location kept_times <- self$temporal_positions_df$time self$spatial_features_df <- self$spatial_features_df[self$spatial_features_df$location %in% kept_locs, ] self$temporal_features_df <- self$temporal_features_df[self$temporal_features_df$time %in% kept_times, ] # Filter rows and columns of departure_df use_dep_rows <- self$departure_df$location %in% kept_locs use_dep_cols <- colnames(self$departure_df) %in% c('location', as.character(kept_times)) self$departure_df <- self$departure_df[use_dep_rows, use_dep_cols, with = FALSE] } self$data_df <- reshape_covariate_dfs( spatial_df = self$spatial_features_df, temporal_df = self$temporal_features_df, y_df = self$departure_df, y_column_name = 'nb_departure' ) } ) )
/scratch/gouwar.j/cran-all/cranData/BKTR/R/examples.R
#' @import ggplot2 #' @include tensor_ops.R #' @include distances.R DEFAULT_LBOUND <- 1e-3 DEFAULT_UBOUND <- 1e3 #' @title R6 class for kernel's hyperparameter #' #' @description KernelParameter contains all information and behaviour related to a kernel parameters. #' #' @examplesIf torch::torch_is_installed() #' # A kernel parameter can be a constant value #' const_param <- KernelParameter$new(7, is_fixed = TRUE) #' # It can otherwise be sampled and have its value updated through sampling #' samp_param <- KernelParameter$new(1, lower_bound = 0.1, #' upper_bound = 10, slice_sampling_scale = 4) #' #' # A kernel parameter can be associated with any type of kernel #' KernelPeriodic$new(period_length = const_param, lengthscale = samp_param) #' #' @export KernelParameter <- R6::R6Class( public = list( #' @field value The hyperparameter mean's prior value or its constant value value = 0, #' @field is_fixed Says if the kernel parameter is fixed or not (if fixed, there is no sampling) is_fixed = FALSE, #' @field lower_bound The hyperparameter's minimal value during sampling lower_bound = DEFAULT_LBOUND, #' @field upper_bound The hyperparameter's maximal value during sampling upper_bound = DEFAULT_UBOUND, #' @field slice_sampling_scale The sampling range's amplitude slice_sampling_scale = log(10), #' @field hparam_precision Precision of the hyperparameter hparam_precision = 1.0, #' @field kernel The kernel associated with the parameter (it is set at kernel instanciation) kernel = NULL, #' @field name The kernel parameter's name name = NULL, #' @description Create a new \code{KernelParameter} object. #' @param value Numeric: The hyperparameter mean's prior value (Paper - \eqn{\phi}) or its constant value #' @param is_fixed Boolean: Says if the kernel parameter is fixed or not (if fixed, there is no sampling) #' @param lower_bound Numeric: Hyperparameter's minimal value during sampling (Paper - \eqn{\phi_{min}}) #' @param upper_bound Numeric: Hyperparameter's maximal value during sampling (Paper - \eqn{\phi_{max}}) #' @param slice_sampling_scale Numeric: The sampling range's amplitude (Paper - \eqn{\rho}) #' @param hparam_precision Numeric: The hyperparameter's precision #' @return A new \code{KernelParameter} object. initialize = function( value, is_fixed = FALSE, lower_bound = DEFAULT_LBOUND, upper_bound = DEFAULT_UBOUND, slice_sampling_scale = log(10), hparam_precision = 1.0 ) { self$value <- value self$lower_bound <- lower_bound self$upper_bound <- upper_bound self$is_fixed <- is_fixed self$slice_sampling_scale <- slice_sampling_scale self$hparam_precision <- hparam_precision }, #' @description Set \code{Kernel} for a given \code{KernelParameter} object. #' @param kernel Kernel: The kernel to associate with the parameter #' @param param_name String: The parameter's name #' @return NULL, set a new kernel for the parameter set_kernel = function(kernel, param_name) { self$kernel <- kernel self$name <- param_name self$kernel$parameters <- c(self$kernel$parameters, self) } ), active = list( #' @field full_name The kernel parameter's full name full_name = function() { if (is.null(self$kernel)) { return(self$name) } return(sprintf('%s - %s', self$kernel$name, self$name)) } ) ) #' @title Base R6 class for Kernels #' @description Abstract base class for kernels (Should not be instantiated) #' @export Kernel <- R6::R6Class( 'Kernel', public = list( #' @field kernel_variance The variance of the kernel kernel_variance = 1, #' @field jitter_value The jitter value to add to the kernel matrix jitter_value = NULL, #' @field distance_matrix The distance matrix between points in a tensor format distance_matrix = NULL, #' @field name The kernel's name name = NULL, #' @field parameters The parameters of the kernel (list of \code{KernelParameter}) parameters = c(), #' @field covariance_matrix The covariance matrix of the kernel in a tensor format covariance_matrix = NULL, #' @field positions_df The positions of the points in a dataframe format positions_df = NULL, #' @field has_dist_matrix Identify if the kernel has a distance matrix or not has_dist_matrix = NULL, #' @description Kernel abstract base constructor #' @param kernel_variance Numeric: The variance of the kernel #' @param jitter_value Numeric: The jitter value to add to the kernel matrix #' @return A new \code{Kernel} object. initialize = function(kernel_variance, jitter_value) { self$parameters <- c() self$kernel_variance <- kernel_variance self$jitter_value <- jitter_value }, #' @description Abstract method to compute the core kernel's covariance matrix core_kernel_fn = function() { stop('core_kernel_fn() is not implemented') }, #' @description Method to add jitter to the kernel's covariance matrix add_jitter_to_kernel = function() { has_null_jitter <- is.null(self$jitter_value) if (!has_null_jitter && self$jitter_value == 0) { return() } jitter_val <- ifelse(has_null_jitter, TSR$get_default_jitter(), self$jitter_value) self$covariance_matrix <- self$covariance_matrix + jitter_val * TSR$eye(nrow(self$covariance_matrix)) }, #' @description Method to compute the kernel's covariance matrix kernel_gen = function() { if (is.null(self$positions_df)) { stop('Set `positions_df` via `set_positions` before kernel evaluation.') } self$covariance_matrix <- self$kernel_variance * self$core_kernel_fn() self$add_jitter_to_kernel() return(self$covariance_matrix) }, #' @description Method to set the kernel's positions and compute the distance matrix #' @param positions_df Dataframe: The positions of the points in a dataframe format set_positions = function(positions_df) { if (ncol(positions_df) < 2) { stop('`positions_df` must have at least two columns.') } self$positions_df <- positions_df positions_tensor <- TSR$tensor(as.matrix(positions_df[, -1])) if (self$has_dist_matrix) { self$distance_matrix <- get_euclidean_dist_tsr(positions_tensor) } }, #' @description Method to plot the kernel's covariance matrix #' @param show_figure Boolean: If TRUE, the figure is shown, otherwise it is returned #' @return If \code{show_figure} is TRUE, the figure is shown, otherwise it is returned plot = function(show_figure = TRUE) { x_name <- colnames(self$positions_df)[1] y_name <- paste0(x_name, "'") df <- data.table(as.matrix(self$covariance_matrix$cpu())) pos_labels <- sapply(self$positions_df[, 1], as.character) colnames(df) <- pos_labels df[[x_name]] <- pos_labels df <- melt(df, id.vars = c(x_name), variable.name = y_name, value.name = 'covariance') fig <- ggplot(df, aes(.data[[x_name]], .data[[y_name]], fill = covariance)) + geom_tile() + theme_minimal() + scale_x_discrete(limits = pos_labels) + scale_y_discrete(limits = rev(pos_labels)) + ggtitle(self$name) if (show_figure) { print(fig) return(NULL) } return(fig) } ) ) #' @title R6 class for White Noise Kernels #' #' @description R6 class for White Noise Kernels #' #' @examplesIf torch::torch_is_installed() #' # Create a new white noise kernel #' k_white_noise <- KernelWhiteNoise$new() #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_white_noise$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_white_noise$kernel_gen() #' #' @export KernelWhiteNoise <- R6::R6Class( 'KernelWhiteNoise', inherit = Kernel, public = list( #' @field has_dist_matrix Identify if the kernel has a distance matrix or not has_dist_matrix = FALSE, #' @field name The kernel's name name = 'White Noise Kernel', # @description Create a new \code{KernelWhiteNoise} object. #' @param kernel_variance Numeric: The variance of the kernel #' @param jitter_value Numeric: The jitter value to add to the kernel matrix #' @return A new \code{KernelWhiteNoise} object. initialize = function( kernel_variance = 1, jitter_value = NULL ) { super$initialize(kernel_variance, jitter_value) }, #' @description Method to compute the core kernel's covariance matrix #' @return The core kernel's covariance matrix core_kernel_fn = function() { return(TSR$eye(nrow(self$positions_df))) } ) ) #' @title R6 class for Square Exponential Kernels #' #' @description R6 class for Square Exponential Kernels #' #' @examplesIf torch::torch_is_installed() #' # Create a new SE kernel #' k_se <- KernelSE$new() #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_se$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_se$kernel_gen() #' #' @export KernelSE <- R6::R6Class( 'KernelSE', inherit = Kernel, public = list( #' @field lengthscale The lengthscale parameter instance of the kernel lengthscale = NULL, #' @field has_dist_matrix Identify if the kernel has a distance matrix or not has_dist_matrix = TRUE, #' @field name The kernel's name name = 'SE Kernel', #' @description Create a new \code{KernelSE} object. #' @param lengthscale KernelParameter: The lengthscale parameter instance of the kernel #' @param kernel_variance Numeric: The variance of the kernel #' @param jitter_value Numeric: The jitter value to add to the kernel matrix #' @return A new \code{KernelSE} object. initialize = function( lengthscale = KernelParameter$new(2), kernel_variance = 1, jitter_value = NULL ) { super$initialize(kernel_variance, jitter_value) self$lengthscale <- lengthscale self$lengthscale$set_kernel(self, 'lengthscale') }, #' @description Method to compute the core kernel's covariance matrix #' @return The core kernel's covariance matrix core_kernel_fn = function() { return(torch::torch_exp(-self$distance_matrix^2 / (2 * self$lengthscale$value^2))) } ) ) #' @title R6 class for Rational Quadratic Kernels #' #' @description R6 class for Rational Quadratic Kernels #' #' @examplesIf torch::torch_is_installed() #' # Create a new RQ kernel #' k_rq <- KernelRQ$new() #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_rq$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_rq$kernel_gen() #' #' @export KernelRQ <- R6::R6Class( 'KernelRQ', inherit = Kernel, public = list( #' @field lengthscale The lengthscale parameter instance of the kernel lengthscale = NULL, #' @field alpha The alpha parameter instance of the kernel alpha = NULL, #' @field has_dist_matrix The distance matrix between points in a tensor format has_dist_matrix = TRUE, #' @field name The kernel's name name = 'RQ Kernel', #' @description Create a new \code{KernelRQ} object. #' @param lengthscale KernelParameter: The lengthscale parameter instance of the kernel #' @param alpha KernelParameter: The alpha parameter instance of the kernel #' @param kernel_variance Numeric: The variance of the kernel #' @param jitter_value Numeric: The jitter value to add to the kernel matrix #' @return A new \code{KernelRQ} object. initialize = function( lengthscale = KernelParameter$new(2), alpha = KernelParameter$new(2), kernel_variance = 1, jitter_value = NULL ) { super$initialize(kernel_variance, jitter_value) self$lengthscale <- lengthscale self$lengthscale$set_kernel(self, 'lengthscale') self$alpha <- alpha self$alpha$set_kernel(self, 'alpha') }, #' @description Method to compute the core kernel's covariance matrix #' @return The core kernel's covariance matrix core_kernel_fn = function() { return( 1 + self$distance_matrix^2 / (2 * self$lengthscale$value^2 * self$alpha$value) ) ** -self$alpha$value } ) ) #' @title R6 class for Periodic Kernels #' #' @description R6 class for Periodic Kernels #' #' @examplesIf torch::torch_is_installed() #' # Create a new Periodic kernel #' k_periodic <- KernelPeriodic$new() #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_periodic$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_periodic$kernel_gen() #' #' @export KernelPeriodic <- R6::R6Class( 'KernelPeriodic', inherit = Kernel, public = list( #' @field lengthscale The lengthscale parameter instance of the kernel lengthscale = NULL, #' @field period_length The period length parameter instance of the kernel period_length = NULL, #' @field has_dist_matrix Identify if the kernel has a distance matrix or not has_dist_matrix = TRUE, #' @field name The kernel's name name = 'Periodic Kernel', #' @description Create a new \code{KernelPeriodic} object. #' @param lengthscale KernelParameter: The lengthscale parameter instance of the kernel #' @param period_length KernelParameter: The period length parameter instance of the kernel #' @param kernel_variance Numeric: The variance of the kernel #' @param jitter_value Numeric: The jitter value to add to the kernel matrix #' @return A new \code{KernelPeriodic} object. initialize = function( lengthscale = KernelParameter$new(2), period_length = KernelParameter$new(2), kernel_variance = 1, jitter_value = NULL ) { super$initialize(kernel_variance, jitter_value) self$lengthscale <- lengthscale self$lengthscale$set_kernel(self, 'lengthscale') self$period_length <- period_length self$period_length$set_kernel(self, 'period length') }, #' @description Method to compute the core kernel's covariance matrix #' @return The core kernel's covariance matrix core_kernel_fn = function() { return(torch::torch_exp( -2 * torch::torch_sin(pi * self$distance_matrix / self$period_length$value)^2 / self$lengthscale$value^2 )) } ) ) #' @title R6 class for Matern Kernels #' #' @description R6 class for Matern Kernels #' #' @examplesIf torch::torch_is_installed() #' # Create a new Matern 3/2 kernel #' k_matern <- KernelMatern$new(smoothness_factor = 3) #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_matern$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_matern$kernel_gen() #' #' @export KernelMatern <- R6::R6Class( 'KernelMatern', inherit = Kernel, public = list( #' @field lengthscale The lengthscale parameter instance of the kernel lengthscale = NULL, #' @field smoothness_factor The smoothness factor of the kernel smoothness_factor = NULL, #' @field has_dist_matrix Identify if the kernel has a distance matrix or not has_dist_matrix = TRUE, #' @description Create a new \code{KernelMatern} object. #' @param smoothness_factor Numeric: The smoothness factor of the kernel (1, 3 or 5) #' @param lengthscale KernelParameter: The lengthscale parameter instance of the kernel #' @param kernel_variance Numeric: The variance of the kernel #' @param jitter_value Numeric: The jitter value to add to the kernel matrix initialize = function( smoothness_factor = 5, lengthscale = KernelParameter$new(2), kernel_variance = 1, jitter_value = NULL ) { if (smoothness_factor %in% c(1, 3, 5) == FALSE) { stop('Smoothness factor should be one of the following values 1, 3 or 5') } super$initialize(kernel_variance, jitter_value) self$name <- paste0('Matern ', smoothness_factor, '/2 Kernel') self$smoothness_factor <- smoothness_factor self$lengthscale <- lengthscale self$lengthscale$set_kernel(self, 'lengthscale') }, #' @description Method to the get the smoothness kernel function for a given integer smoothness factor #' @return The smoothness kernel function get_smoothness_kernel_fn = function() { if (self$smoothness_factor == 1) { return(function(t) return(1)) } else if (self$smoothness_factor == 3) { return(function(t) return(1 + t)) } else if (self$smoothness_factor == 5) { return(function(t) return(1 + t * (1 + t / 3))) } else { torch:::value_error('Kernel function for this smoothness factor is not implemented') } }, #' @description Method to compute the core kernel's covariance matrix #' @return The core kernel's covariance matrix core_kernel_fn = function() { temp_kernel <- ( self$distance_matrix * sqrt(self$smoothness_factor) / self$lengthscale$value ) return(self$get_smoothness_kernel_fn()(temp_kernel) * torch::torch_exp(-temp_kernel)) } ), ) #' @title Kernel Composition Operations #' #' @description Kernel Composition Operations Enum. Possibilities of operation between #' two kernels to generate a new composed kernel. The values are: \code{MUL} and \code{ADD}. #' #' @export CompositionOps <- list( 'MUL' = 'MUL', 'ADD' = 'ADD' ) #' @title R6 class for Composed Kernels #' #' @description R6 class for Composed Kernels #' #' @examplesIf torch::torch_is_installed() #' # Create a new locally periodic kernel #' k_loc_per <- KernelComposed$new( #' left_kernel = KernelSE$new(), #' right_kernel = KernelPeriodic$new(), #' new_name = 'Locally Periodic Kernel', #' composition_operation = CompositionOps$MUL #' ) #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_loc_per$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_loc_per$kernel_gen() #' #' @export KernelComposed <- R6::R6Class( 'KernelComposed', inherit = Kernel, public = list( #' @field name The kernel's name name = '', #' @field parameters The parameters of the kernel (list of \code{KernelParameter}) parameters = c(), #' @field left_kernel The left kernel to use for composition left_kernel = NULL, #' @field right_kernel The right kernel to use for composition right_kernel = NULL, #' @field composition_operation The operation to use for composition composition_operation = NULL, #' @field has_dist_matrix Identify if the kernel has a distance matrix or not has_dist_matrix = TRUE, #' @description Create a new \code{KernelComposed} object. #' @param left_kernel Kernel: The left kernel to use for composition #' @param right_kernel Kernel: The right kernel to use for composition #' @param new_name String: The name of the composed kernel #' @param composition_operation CompositionOps: The operation to use for composition initialize = function( left_kernel, right_kernel, new_name, composition_operation ) { composed_variance <- 1 new_jitter_val <- max( left_kernel$jitter_value, right_kernel$jitter_value, TSR$get_default_jitter() ) super$initialize(composed_variance, new_jitter_val) self$left_kernel <- left_kernel self$right_kernel <- right_kernel self$name <- new_name self$parameters <- c( left_kernel$parameters, right_kernel$parameters ) self$composition_operation <- composition_operation }, #' @description Method to compute the core kernel's covariance matrix #' @return The core kernel's covariance matrix core_kernel_fn = function() { if (self$composition_operation == CompositionOps$MUL) { return(self$left_kernel$core_kernel_fn() * self$right_kernel$core_kernel_fn()) } else if (self$composition_operation == CompositionOps$ADD) { return(self$left_kernel$core_kernel_fn() + self$right_kernel$core_kernel_fn()) } else { torch:::value_error('Composition operation is not implemented') } }, #' @description Method to set the kernel's positions and compute the distance matrix #' @param positions_df Dataframe: The positions of the points in a dataframe format #' @return NULL, set the kernel's positions and compute the distance matrix set_positions = function(positions_df) { super$set_positions(positions_df) self$left_kernel$set_positions(positions_df) self$right_kernel$set_positions(positions_df) } ) ) #' @title R6 class for Kernels Composed via Addition #' #' @description R6 class automatically generated when #' adding two kernels together. #' #' @examplesIf torch::torch_is_installed() #' # Create a new additive kernel #' k_rq_plus_per <- KernelAddComposed$new( #' left_kernel = KernelRQ$new(), #' right_kernel = KernelPeriodic$new(), #' new_name = 'SE + Periodic Kernel' #' ) #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_rq_plus_per$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_rq_plus_per$kernel_gen() #' #' @export KernelAddComposed <- R6::R6Class( 'KernelAddComposed', inherit = KernelComposed, public = list( #' @description Create a new \code{KernelAddComposed} object. #' @param left_kernel Kernel: The left kernel to use for composition #' @param right_kernel Kernel: The right kernel to use for composition #' @param new_name String: The name of the composed kernel #' @return A new \code{KernelAddComposed} object. initialize = function(left_kernel, right_kernel, new_name) { super$initialize(left_kernel, right_kernel, new_name, CompositionOps$ADD) } ) ) #' @title R6 class for Kernels Composed via Multiplication #' #' @description R6 class automatically generated when #' multiplying two kernels together. #' #' @examplesIf torch::torch_is_installed() #' # Create a new locally periodic kernel #' k_loc_per <- KernelMulComposed$new( #' left_kernel = KernelSE$new(), #' right_kernel = KernelPeriodic$new(), #' new_name = 'Locally Periodic Kernel' #' ) #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_loc_per$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_loc_per$kernel_gen() #' #' @export KernelMulComposed <- R6::R6Class( 'KernelMulComposed', inherit = KernelComposed, public = list( #' @description Create a new \code{KernelMulComposed} object. #' @param left_kernel Kernel: The left kernel to use for composition #' @param right_kernel Kernel: The right kernel to use for composition #' @param new_name String: The name of the composed kernel #' @return A new \code{KernelMulComposed} object. initialize = function(left_kernel, right_kernel, new_name) { super$initialize(left_kernel, right_kernel, new_name, CompositionOps$MUL) } ) ) #' @title Operator overloading for kernel addition #' @description Operator overloading for kernel addition #' @param k1 Kernel: The left kernel to use for composition #' @param k2 Kernel: The right kernel to use for composition #' @return A new \code{KernelAddComposed} object. #' #' @examplesIf torch::torch_is_installed() #' # Create a new additive kernel #' k_rq_plus_per <- KernelRQ$new() + KernelPeriodic$new() #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_rq_plus_per$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_rq_plus_per$kernel_gen() #' #' @export `+.Kernel` <- function(k1, k2) { composed_kernel <- KernelAddComposed$new(k1, k2, paste0(k1$name, ' + ', k2$name)) return(composed_kernel) } #' @title Operator overloading for kernel multiplication #' @description Operator overloading for kernel multiplication #' @param k1 Kernel: The left kernel to use for composition #' @param k2 Kernel: The right kernel to use for composition #' @return A new \code{KernelMulComposed} object. #' #' @examplesIf torch::torch_is_installed() #' # Create a new locally periodic kernel #' k_loc_per <- KernelSE$new() * KernelPeriodic$new() #' # Set the kernel's positions #' positions_df <- data.frame(x=c(-4, 0, 3), y=c(-2, 0, 2)) #' k_loc_per$set_positions(positions_df) #' # Generate the kernel's covariance matrix #' k_loc_per$kernel_gen() #' #' @export `*.Kernel` <- function(k1, k2) { composed_kernel <- KernelMulComposed$new(k1, k2, paste0(k1$name, ' * ', k2$name)) return(composed_kernel) }
/scratch/gouwar.j/cran-all/cranData/BKTR/R/kernels.R
#' @import torch #' @importFrom R6 "R6Class" #' @include tensor_ops.R #' @title R6 class to evaluate the marginal likelihood of the hyperparameter #' #' @description MarginalLikelihoodEvaluator enable the calculation of the marginal #' likelihood of the kernel hyperparameters. This likelihood is used during the sampling #' process. #' #' @noRd MarginalLikelihoodEvaluator <- R6::R6Class( 'MarginalLikelihoodEvaluator', public = list( axis_permutation = c(), rank_decomp = NULL, nb_covariates = NULL, covariates = NULL, omega = NULL, y_masked = NULL, inv_k = NULL, chol_k = NULL, chol_lu = NULL, uu = NULL, likelihood = NULL, initialize = function(rank_decomp, nb_covariates, covariates, omega, y, is_transposed) { self$rank_decomp <- rank_decomp self$nb_covariates <- nb_covariates self$covariates <- covariates self$omega <- omega self$axis_permutation <- if (is_transposed) c(2, 1) else c(1, 2) self$y_masked <- y * omega }, calc_likelihood = function(kernel_values, decomp_values, covs_decomp, tau) { rank_decomp <- self$rank_decomp kernel_size <- kernel_values$shape[1] lambda_size <- kernel_size * self$rank_decomp psi_u <- torch::torch_einsum("ijk,jkl->ilj", c( self$covariates$permute(c(self$axis_permutation, 3)), TSR$khatri_rao_prod(decomp_values, covs_decomp)$reshape(c(-1, self$nb_covariates, rank_decomp)) )) psi_u_mask <- psi_u * self$omega$permute(c(self$axis_permutation))$unsqueeze(2)$expand_as(psi_u) self$chol_k <- torch::linalg_cholesky(kernel_values) kernel_inverse <- torch::linalg_solve( self$chol_k$t(), torch::linalg_solve(self$chol_k, TSR$eye(kernel_size)) ) stabilized_kernel_inv <- (kernel_inverse$t() + kernel_inverse) / 2 self$inv_k <- TSR$kronecker_prod( TSR$eye(rank_decomp), stabilized_kernel_inv ) # I_R Kron inv(Ks) lambda_u <- tau * torch::torch_einsum('ijk,ilk->ijl', c(psi_u_mask, psi_u_mask)) # tau * H_T * H_T' lambda_u <- ( lambda_u$transpose(1, -1)$unsqueeze(-1) * TSR$eye(kernel_size) )$transpose(2, 3)$reshape(c(lambda_size, lambda_size)) lambda_u <- lambda_u + self$inv_k self$chol_lu <- torch::linalg_cholesky(lambda_u) uu <- torch::torch_triangular_solve( torch::torch_einsum( 'ijk,ik->ji', c(psi_u_mask, self$y_masked$permute(c(self$axis_permutation))) )$flatten()$unsqueeze(2), self$chol_lu, upper = FALSE )[[1]]$squeeze() self$likelihood <- as.numeric(( TSR$tensor(0.5 * tau ** 2) * uu$t()$matmul(uu) - self$chol_lu$diag()$log()$sum() - TSR$tensor(rank_decomp) * self$chol_k$diag()$log()$sum() )$cpu()) self$uu <- uu return(self$likelihood) } ) )
/scratch/gouwar.j/cran-all/cranData/BKTR/R/likelihood_evaluator.R
#' @import ggplot2 #' @import ggmap #' @importFrom stats reshape #' @title Print ggplot figure #' @description Utility function to print a ggplot figure #' @param fig ggplot: ggplot figure to print #' @param fig_width Numeric: Figure width. #' @param fig_height Numeric: Figure height. #' @param fig_resolution Numeric: Figure resolution PPI. #' @return NULL #' #' @noRd print_ggplot_fig <- function(fig, fig_width, fig_height, fig_resolution) { # The following options are mainly for notebooks rendering (like Colab) options(repr.plot.width = fig_width, repr.plot.height = fig_height, repr.plot.res = fig_resolution) # The following sizes are for RStudio and other rendering print(fig, vp = grid::viewport(width = unit(fig_width, 'inches'), height = unit(fig_height, 'inches'))) } #' @title Plot Temporal Beta Coefficients #' @description Create a plot of the beta values through time for a given #' spatial point and a set of feature labels. #' @param bktr_reg BKTRRegressor: BKTRRegressor object. #' @param plot_feature_labels Array: Array of feature labels to plot. #' @param spatial_point_label String: Spatial point label to plot. #' @param date_format String: Format of the date to use in bktr dataframes for the time. #' Defaults to '\%Y-\%m-\%d'. #' @param show_figure Boolean: Whether to show the figure. Defaults to True. #' @param fig_width Numeric: Figure width when figure is shown. Defaults to 8.5. #' @param fig_height Numeric: Figure height when figure is shown. Defaults to 5.5. #' @param fig_resolution Numeric: Figure resolution PPI. Defaults to 200. #' @return ggplot or NULL: ggplot object or NULL if show_figure is set to FALSE. #' #' @examplesIf torch::torch_is_installed() #' # Launch MCMC sampling on a light version of the BIXI dataset #' bixi_data <- BixiData$new(is_light = TRUE) #' bktr_regressor <- BKTRRegressor$new( #' data_df <- bixi_data$data_df, #' spatial_positions_df = bixi_data$spatial_positions_df, #' temporal_positions_df = bixi_data$temporal_positions_df, #' burn_in_iter = 5, sampling_iter = 10) # For example only (too few iterations) #' bktr_regressor$mcmc_sampling() #' #' # Plot temporal beta coefficients for the first station and the two features #' plot_temporal_betas( #' bktr_regressor, #' plot_feature_labels = c('mean_temp_c', 'area_park'), #' spatial_point_label = bixi_data$spatial_positions_df$location[1]) #' #' @export plot_temporal_betas <- function( bktr_reg, plot_feature_labels, spatial_point_label, date_format = '%Y-%m-%d', show_figure = TRUE, fig_width = 8.5, fig_height = 5.5, fig_resolution = 200 ) { if (!bktr_reg$has_completed_sampling) { stop('Plots can only be accessed after MCMC sampling.') } # Verify all labels are valid get_label_index_or_raise(spatial_point_label, bktr_reg$spatial_labels, 'spatial') sapply(plot_feature_labels, function(feature_label) { get_label_index_or_raise(feature_label, bktr_reg$feature_labels, 'feature') }) beta_est_df <- bktr_reg$get_beta_summary_df( c(spatial_point_label), NULL, plot_feature_labels ) plot_title <- paste('Location:', spatial_point_label) if (!is.null(date_format)) beta_est_df$time <- as.Date(beta_est_df$time, format = date_format) fig <- ( ggplot(beta_est_df, aes(.data$time, .data$Mean, group = .data$feature, color = .data$feature)) + geom_line() + geom_ribbon( aes(ymin = .data$Low2.5p, ymax = .data$Up97.5p, fill = .data$feature), alpha = 0.3, color = NA ) + ggtitle(plot_title) + theme_bw() + ylab('Beta Value') + xlab('Time') + labs(fill = 'Feature', color = 'Feature') ) if (!show_figure) { return(fig) } print_ggplot_fig(fig, fig_width, fig_height, fig_resolution) } #' @title Plot Spatial Beta Coefficients #' @description Create a plot of beta values through space for a given #' temporal point and a set of feature labels. #' @param bktr_reg BKTRRegressor: BKTRRegressor object. #' @param plot_feature_labels Array: Array of feature labels to plot. #' @param temporal_point_label String: Temporal point label to plot. #' @param nb_cols Integer: The number of columns to use in the facet grid. #' @param use_dark_mode Boolean: Whether to use a dark mode for the geographic map or not. #' @param zoom Integer: Zoom level for the geographic map. Defaults to 11. #' @param google_token String or NULL: Google API token to use for the geographic map. Defaults to NULL. #' If NULL, use Stamen maps. #' @param show_figure Boolean: Whether to show the figure. Defaults to True. #' @param fig_width Numeric: Figure width when figure is shown. Defaults to 8.5. #' @param fig_height Numeric: Figure height when figure is shown. Defaults to 5.5. #' @param fig_resolution Numeric: Figure resolution PPI. Defaults to 200. #' @return ggplot or NULL: ggplot object or NULL if show_figure is set to FALSE. #' #' @examplesIf torch::torch_is_installed() #' # Launch MCMC sampling on a light version of the BIXI dataset #' bixi_data <- BixiData$new(is_light = TRUE) #' bktr_regressor <- BKTRRegressor$new( #' data_df <- bixi_data$data_df, #' spatial_positions_df = bixi_data$spatial_positions_df, #' temporal_positions_df = bixi_data$temporal_positions_df, #' burn_in_iter = 5, sampling_iter = 10) # For example only (too few iterations) #' bktr_regressor$mcmc_sampling() #' #' # Plot spatial beta coefficients for the first time point and the two features #' plot_spatial_betas( #' bktr_regressor, #' plot_feature_labels = c('mean_temp_c', 'area_park'), #' temporal_point_label = bixi_data$temporal_positions_df$time[1]) #' #' # We can also use light mode and plot the maps side by side #' plot_spatial_betas( #' bktr_regressor, #' plot_feature_labels = c('mean_temp_c', 'area_park', 'total_precip_mm'), #' temporal_point_label = bixi_data$temporal_positions_df$time[10], #' use_dark_mode = FALSE, nb_cols = 3) #' #' @export plot_spatial_betas <- function( bktr_reg, plot_feature_labels, temporal_point_label, nb_cols = 1, use_dark_mode = TRUE, show_figure = TRUE, zoom = 11, google_token = NULL, fig_width = 8.5, fig_height = 5.5, fig_resolution = 200 ) { if (!bktr_reg$has_completed_sampling) { stop('Plots can only be accessed after MCMC sampling.') } # Verify all labels are valid point_label_index <- get_label_index_or_raise(temporal_point_label, bktr_reg$temporal_labels, 'temporal') sapply(plot_feature_labels, function(feature_label) { get_label_index_or_raise(feature_label, bktr_reg$feature_labels, 'feature') }) # Get only spatial estimates beta_df <- bktr_reg$beta_estimates out_col_names <- c('location', plot_feature_labels) beta_df <- beta_df[beta_df$time == temporal_point_label, out_col_names, with = FALSE] coords_projector <- bktr_reg$geo_coords_projector is_map <- !is.null(coords_projector) coord_df <- if (is_map) coords_projector$ini_df else bktr_reg$spatial_positions_df if (ncol(coord_df[, -1]) != 2) { stop('Spatial coordinates must be 2 dimensions to be plotted.') } full_df <- beta_df[coord_df, on = 'location', nomatch = NULL] full_df <- reshape( full_df, direction = 'long', idvar = colnames(coord_df), v.names = 'value', timevar = 'feature', varying = plot_feature_labels, times = plot_feature_labels ) plot_title <- paste0('Estimated Beta at Time Point : ', temporal_point_label) longitude <- latitude <- value <- NULL # Used for CRAN global binding checks if (is_map) { is_google <- !is.null(google_token) map_source <- ifelse(is_google, 'google', 'stamen') map_color <- ifelse(is_google && use_dark_mode, 'bw', 'color') if (is_google) { ggmap::register_google(google_token) map_type <- 'roadmap' } else if (use_dark_mode) { map_type <- 'toner' } else { map_type <- 'toner-lite' } fig <- ( qmplot( x = longitude, y = latitude, color = value, data = full_df, source = map_source, maptype = map_type, mapcolor = map_color, zoom = zoom ) + facet_wrap(~feature, ncol = nb_cols) + theme_bw() + theme( axis.ticks.x = element_blank(), axis.ticks.y = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), ) + labs(x = NULL, y = NULL) + scale_color_viridis_c() + ggtitle(plot_title) ) } else { x_col_name <- colnames(full_df)[2] y_col_name <- colnames(full_df)[3] fig <- ( ggplot(full_df, aes(x = .data[[x_col_name]], y = .data[[y_col_name]], color = .data$value)) + geom_point() + facet_wrap(~feature, ncol = nb_cols) + theme_bw() + scale_color_viridis_c() + ggtitle(plot_title) ) } if (!show_figure) { return(fig) } print_ggplot_fig(fig, fig_width, fig_height, fig_resolution) } #' @title Plot Beta Coefficients Distribution #' @description Plot the distribution of beta values for a given list of labels. #' @param bktr_reg BKTRRegressor: BKTRRegressor object. #' @param labels_list List: List of labels tuple (spatial, temporal, feature) for #' which to plot the beta distribution through iterations #' @param show_figure Boolean: Whether to show the figure. Defaults to True. #' @param fig_width Integer: Figure width. Defaults to 9. #' @param fig_height Integer: Figure height. Defaults to 6. #' @param fig_resolution Numeric: Figure resolution PPI. Defaults to 200. #' @return ggplot or NULL: ggplot object or NULL if show_figure is set to FALSE. #' #' @examplesIf torch::torch_is_installed() #' # Launch MCMC sampling on a light version of the BIXI dataset #' bixi_data <- BixiData$new(is_light = TRUE) #' bktr_regressor <- BKTRRegressor$new( #' data_df <- bixi_data$data_df, #' spatial_positions_df = bixi_data$spatial_positions_df, #' temporal_positions_df = bixi_data$temporal_positions_df, #' burn_in_iter = 5, sampling_iter = 10) # For example only (too few iterations) #' bktr_regressor$mcmc_sampling() #' #' # Plot temporal beta coefficients for the first station and the first feature #' spa_lab <- bixi_data$spatial_positions_df$location[3] #' plot_beta_dists( #' bktr_regressor, #' labels_list = list( #' c(spa_lab, '2019-04-15', 'area_park'), #' c(spa_lab, '2019-04-16', 'area_park'), #' c(spa_lab, '2019-04-16', 'mean_temp_c') #' ), #' ) #' #' @export plot_beta_dists <- function( bktr_reg, labels_list, show_figure = TRUE, fig_width = 9, fig_height = 6, fig_resolution = 200 ) { if (!bktr_reg$has_completed_sampling) { stop('Plots can only be accessed after MCMC sampling.') } plot_title <- 'Posterior distribution of beta values per given spatial point, temporal point and feature' df <- data.table(sapply(labels_list, function(x) bktr_reg$get_iterations_betas(x[1], x[2], x[3]))) col_names <- sapply(labels_list, function(x) paste(x, collapse = '\n')) setnames(df, col_names) df <- reshape( df, direction = 'long', v.names = 'value', timevar = 'labels', varying = col_names, times = col_names ) fig <- ( ggplot(df, aes(x = .data$labels, y = .data$value, fill = .data$labels)) + geom_violin(trim = FALSE) + ggtitle(plot_title) + ylab('Beta Value') + xlab('Labels') + labs(fill = 'Labels', color = 'Labels') ) if (!show_figure) { return(fig) } print_ggplot_fig(fig, fig_width, fig_height, fig_resolution) } #' @title Plot Beta Coefficients Distribution Regrouped by Covariates #' @description Plot the distribution of beta estimates regrouped by covariates. #' @param bktr_reg BKTRRegressor: BKTRRegressor object. #' @param feature_labels Array or NULL: Array of feature labels for #' which to plot the beta estimates distribution. If NULL plot for all features. #' @param show_figure Boolean: Whether to show the figure. Defaults to True. #' @param fig_width Integer: Figure width. Defaults to 9. #' @param fig_height Integer: Figure height. Defaults to 6. #' @param fig_resolution Numeric: Figure resolution PPI. Defaults to 200. #' @return ggplot or NULL: ggplot object or NULL if show_figure is set to FALSE. #' #' @examplesIf torch::torch_is_installed() #' # Launch MCMC sampling on a light version of the BIXI dataset #' bixi_data <- BixiData$new(is_light = TRUE) #' bktr_regressor <- BKTRRegressor$new( #' formula = 'nb_departure ~ 1 + area_park + mean_temp_c + total_precip_mm', #' data_df <- bixi_data$data_df, #' spatial_positions_df = bixi_data$spatial_positions_df, #' temporal_positions_df = bixi_data$temporal_positions_df, #' burn_in_iter = 5, sampling_iter = 10) # For example only (too few iterations) #' bktr_regressor$mcmc_sampling() #' #' # Plot beta estimates distribution for all features #' plot_covariates_beta_dists(bktr_regressor) #' # Or plot for a subset of features #' plot_covariates_beta_dists(bktr_regressor, c('area_park', 'mean_temp_c')) #' #' @export plot_covariates_beta_dists <- function( bktr_reg, feature_labels = NULL, show_figure = TRUE, fig_width = 9, fig_height = 6, fig_resolution = 200 ) { if (!bktr_reg$has_completed_sampling) { stop('Plots can only be accessed after MCMC sampling.') } if (is.null(feature_labels)) { feature_labels <- bktr_reg$feature_labels } else { sapply(feature_labels, function(feature_label) { get_label_index_or_raise(feature_label, bktr_reg$feature_labels, 'feature') }) } plot_title <- 'Distribution of beta estimates by feature across time and space' full_df <- reshape( bktr_reg$beta_estimates[, feature_labels, with = FALSE], direction = 'long', v.names = 'value', timevar = 'feature', varying = feature_labels, times = feature_labels ) fig <- ( ggplot(full_df, aes(x = .data$feature, y = .data$value, fill = .data$feature)) + geom_violin() + ggtitle(plot_title) + ylab('Beta Value') + xlab('Feature') + labs(fill = 'Feature', color = 'Feature') ) if (!show_figure) { return(fig) } print_ggplot_fig(fig, fig_width, fig_height, fig_resolution) } #' @title Plot Hyperparameters Distributions #' @description Plot the distribution of hyperparameters through iterations #' @param bktr_reg BKTRRegressor: BKTRRegressor object. #' @param hyperparameters Array or NULL: Array of hyperparameters to plot. #' If NULL, plot all hyperparameters. Defaults to NULL. #' @param show_figure Boolean: Whether to show the figure. Defaults to True. #' @param fig_width Integer: Figure width. Defaults to 9. #' @param fig_height Integer: Figure height. Defaults to 6. #' @param fig_resolution Numeric: Figure resolution PPI. Defaults to 200. #' @return ggplot or NULL: ggplot object or NULL if show_figure is set to FALSE. #' #' @examplesIf torch::torch_is_installed() #' # Launch MCMC sampling on a light version of the BIXI dataset #' bixi_data <- BixiData$new(is_light = TRUE) #' k_matern <- KernelMatern$new() #' k_periodic <- KernelPeriodic$new() #' bktr_regressor <- BKTRRegressor$new( #' data_df <- bixi_data$data_df, #' spatial_kernel = k_matern, #' temporal_kernel = k_periodic, #' spatial_positions_df = bixi_data$spatial_positions_df, #' temporal_positions_df = bixi_data$temporal_positions_df, #' burn_in_iter = 5, sampling_iter = 10) # For example only (too few iterations) #' bktr_regressor$mcmc_sampling() #' #' # Plot the distribution of all hyperparameters #' plot_hyperparams_dists(bktr_regressor) #' #' # Plot the distribution of the spatial kernel hyperparameters #' spa_par_name <- paste0('Spatial - ', k_matern$parameters[[1]]$full_name) #' plot_hyperparams_dists(bktr_regressor, spa_par_name) #' #' # Plot the distribution of the temporal kernel hyperparameters #' temp_par_names <- sapply(k_periodic$parameters, function(x) x$full_name) #' temp_par_names <- paste0('Temporal - ', temp_par_names) #' plot_hyperparams_dists(bktr_regressor, temp_par_names) #' #' @export plot_hyperparams_dists <- function( bktr_reg, hyperparameters = NULL, show_figure = TRUE, fig_width = 9, fig_height = 6, fig_resolution = 200 ) { if (!bktr_reg$has_completed_sampling) { stop('Plots can only be accessed after MCMC sampling.') } df <- bktr_reg$result_logger$hyperparameters_per_iter_df all_hparams <- colnames(df[, -1]) hparams <- if (is.null(hyperparameters)) all_hparams else hyperparameters hparam_diff <- setdiff(hparams, all_hparams) if (length(hparam_diff) > 0) { formatted_available_params <- paste(all_hparams, collapse = ',\n\t') formatted_hparam_diff <- paste(hparam_diff, collapse = ', ') stop(sprintf( 'Hyperparameter(s) %s not found. Available hyperparameters are:\n\t%s', formatted_hparam_diff, formatted_available_params )) } df <- reshape( df[, -1], direction = 'long', v.names = 'value', timevar = 'hyperparameter', varying = hparams, times = hparams ) fig <- ( ggplot(df, aes(x = .data$hyperparameter, y = .data$value, fill = .data$hyperparameter)) + geom_violin(trim = FALSE) + ggtitle('Posterior Distribution of BKTR Hyperparameters') + ylab('Hyperparameter Value') + xlab('Hyperparameter') + labs(fill = 'Hyperparameter', color = 'Hyperparameter') ) if (!show_figure) { return(fig) } print_ggplot_fig(fig, fig_width, fig_height, fig_resolution) } #' @title Plot Hyperparameters Traceplot #' @description Plot the evolution of hyperparameters through iterations. (Traceplot) #' @param bktr_reg BKTRRegressor: BKTRRegressor object. #' @param hyperparameters Array or NULL: Array of hyperparameters to plot. #' If NULL, plot all hyperparameters. Defaults to NULL. #' @param show_figure Boolean: Whether to show the figure. Defaults to True. #' @param fig_width Integer: Figure width. Defaults to 9. #' @param fig_height Integer: Figure height. Defaults to 5.5. #' @param fig_resolution Numeric: Figure resolution PPI. Defaults to 200. #' @return ggplot or NULL: ggplot object or NULL if show_figure is set to FALSE. #' #' @examplesIf torch::torch_is_installed() #' # Launch MCMC sampling on a light version of the BIXI dataset #' bixi_data <- BixiData$new(is_light = TRUE) #' k_matern <- KernelMatern$new() #' k_periodic <- KernelPeriodic$new() #' bktr_regressor <- BKTRRegressor$new( #' data_df <- bixi_data$data_df, #' spatial_kernel = k_matern, #' temporal_kernel = k_periodic, #' spatial_positions_df = bixi_data$spatial_positions_df, #' temporal_positions_df = bixi_data$temporal_positions_df, #' burn_in_iter = 5, sampling_iter = 10) # For example only (too few iterations) #' bktr_regressor$mcmc_sampling() #' #' # Plot the traceplot of all hyperparameters #' plot_hyperparams_traceplot(bktr_regressor) #' #' # Plot the traceplot of the spatial kernel hyperparameters #' spa_par_name <- paste0('Spatial - ', k_matern$parameters[[1]]$full_name) #' plot_hyperparams_traceplot(bktr_regressor, spa_par_name) #' #' # Plot the traceplot of the temporal kernel hyperparameters #' temp_par_names <- sapply(k_periodic$parameters, function(x) x$full_name) #' temp_par_names <- paste0('Temporal - ', temp_par_names) #' plot_hyperparams_traceplot(bktr_regressor, temp_par_names) #' #' @export plot_hyperparams_traceplot <- function( bktr_reg, hyperparameters = NULL, show_figure = TRUE, fig_width = 9, fig_height = 5.5, fig_resolution = 200 ) { if (!bktr_reg$has_completed_sampling) { stop('Plots can only be accessed after MCMC sampling.') } df <- bktr_reg$result_logger$hyperparameters_per_iter_df all_hparams <- colnames(df[, -1]) hparams <- if (is.null(hyperparameters)) all_hparams else hyperparameters hparam_diff <- setdiff(hparams, all_hparams) if (length(hparam_diff) > 0) { formatted_available_params <- paste(all_hparams, collapse = ',\n\t') formatted_hparam_diff <- paste(hparam_diff, collapse = ', ') stop(sprintf( 'Hyperparameter(s) %s not found. Available hyperparameters are:\n\t%s', formatted_hparam_diff, formatted_available_params )) } df_cols <- c('iter', hparams) df <- reshape( df[, df_cols, with = FALSE], direction = 'long', v.names = 'value', idvar = 'iter', timevar = 'hyperparameter', varying = hparams, times = hparams ) fig <- ( ggplot(df, aes(.data$iter, .data$value, group = .data$hyperparameter, color = .data$hyperparameter)) + geom_line() + ggtitle('Hyperparameter values through sampling iterations (Traceplot)') + theme_bw() + ylab('Hyperparameter Value') + xlab('Sampling Iter') + labs(fill = 'Hyperparameter', color = 'Hyperparameter') + theme(legend.position = 'bottom') ) if (!show_figure) { return(fig) } print_ggplot_fig(fig, fig_width, fig_height, fig_resolution) } #' @title Plot Y Estimates #' @description Plot y estimates vs observed y values. #' @param bktr_reg BKTRRegressor: BKTRRegressor object. #' @param show_figure Boolean: Whether to show the figure. Defaults to True. #' @param fig_width Numeric: Figure width when figure is shown. Defaults to 5. #' @param fig_height Numeric: Figure height when figure is shown. Defaults to 5. #' @param fig_resolution Numeric: Figure resolution PPI when figure is shown. Defaults to 200. #' @param fig_title String or NULL: Figure title if provided. Defaults to 'y estimates vs observed y values' #' @return ggplot or NULL: ggplot object or NULL if show_figure is set to FALSE. #' #' @examplesIf torch::torch_is_installed() #' # Launch MCMC sampling on a light version of the BIXI dataset #' bixi_data <- BixiData$new(is_light = TRUE) #' bktr_regressor <- BKTRRegressor$new( #' data_df <- bixi_data$data_df, #' spatial_positions_df = bixi_data$spatial_positions_df, #' temporal_positions_df = bixi_data$temporal_positions_df, #' burn_in_iter = 5, sampling_iter = 10) # For example only (too few iterations) #' bktr_regressor$mcmc_sampling() #' #' # Plot Y estimates vs observed y values #' plot_y_estimates(bktr_regressor) #' #' @export plot_y_estimates <- function( bktr_reg, show_figure = TRUE, fig_width = 5, fig_height = 5, fig_resolution = 200, fig_title = 'y estimates vs observed y values' ) { if (!bktr_reg$has_completed_sampling) { stop('Plots can only be accessed after MCMC sampling.') } # Verify all labels are valid omega_list <- as.numeric(bktr_reg$omega$flatten()$cpu()) != 0 y_est_list <- bktr_reg$y_estimates[omega_list]$y_est y_list <- as.numeric(bktr_reg$y$flatten()$cpu()[omega_list]) min_y <- min(y_list) max_y <- max(y_list) df <- data.table(y = y_list, y_est = y_est_list) fig <- ( ggplot(df, aes(x = .data$y, y = .data$y_est)) + geom_point(color = '#39a7d0', alpha = 0.6, shape = 21, fill = '#20a0d0') + geom_segment(aes(x = min_y, y = min_y, xend = max_y, yend = max_y), color = 'black', linetype = 'twodash', linewidth = 1) + theme_bw() + ylab('Estimated y') + xlab('Observed y') ) if (!is.null(fig_title)) { fig <- fig + ggtitle(fig_title) } if (!show_figure) { return(fig) } print_ggplot_fig(fig, fig_width, fig_height, fig_resolution) }
/scratch/gouwar.j/cran-all/cranData/BKTR/R/plots.R
#' @importFrom R6 "R6Class" #' @import torch #' @include tensor_ops.R #' @title R6 class for Logging BKTR Results #' #' @description The ResultLogger encapsulate all the behavior related to #' logging iteration information during the BKTR algorithm #' #' @noRd ResultLogger <- R6::R6Class( 'ResultLogger', public = list( y = NULL, omega = NULL, covariates = NULL, nb_burn_in_iter = NULL, nb_sampling_iter = NULL, logged_params_map = NULL, beta_estimates = NULL, y_estimates = NULL, total_elapsed_time = NULL, formula = NULL, rank_decomp = NULL, spatial_labels = NULL, temporal_labels = NULL, feature_labels = NULL, spatial_kernel = NULL, temporal_kernel = NULL, hparam_labels = NULL, hparam_per_iter = NULL, spatial_decomp_per_iter = NULL, temporal_decomp_per_iter = NULL, covs_decomp_per_iter = NULL, sum_beta_est = NULL, sum_y_est = NULL, beta_estimates_df = NULL, y_estimates_df = NULL, beta_covariates_summary_df = NULL, hyperparameters_per_iter_df = NULL, last_time_stamp = NULL, error_metrics = NULL, total_sq_error = NULL, # Metrics used to create beta summaries moment_metrics = c('Mean', 'SD'), quantile_metrics = c( 'Min', 'Q1', 'Median', 'Q3', 'Max', 'Low2.5p', 'Up97.5p' ), quantile_values = c(0, 0.25, 0.5, 0.75, 1, 0.025, 0.975), # Summary parameters LINE_NCHAR = 70, TAB_STR = ' ', MAIN_COL_WIDTH = 18, OTHER_COL_WIDTH = 8, OTHER_COL_FMT = '%8.3f', MAIN_SUMMARY_COLS = c('Mean', 'Median', 'SD'), DISTRIB_COLS = c('Mean', 'Median', 'SD', 'Low2.5p', 'Up97.5p'), initialize = function( y, omega, covariates, nb_burn_in_iter, nb_sampling_iter, rank_decomp, formula, spatial_labels, temporal_labels, feature_labels, spatial_kernel, temporal_kernel ) { # Create a tensor dictionary holding scalar data gathered through all iterations self$logged_params_map <- list() # Create tensors that accumulate values needed for estimates self$spatial_labels <- spatial_labels self$temporal_labels <- temporal_labels self$feature_labels <- feature_labels nofix_spa_params <- Filter(function(p) !p$is_fixed, spatial_kernel$parameters) nofix_temp_params <- Filter(function(p) !p$is_fixed, temporal_kernel$parameters) self$hparam_labels <- c( 'Tau', paste('Spatial', sapply(nofix_spa_params, function(x) x$full_name), sep = ' - '), paste('Temporal', sapply(nofix_temp_params, function(x) x$full_name), sep = ' - ') ) self$spatial_decomp_per_iter <- TSR$zeros( c(length(spatial_labels), rank_decomp, nb_sampling_iter) ) self$temporal_decomp_per_iter <- TSR$zeros( c(length(temporal_labels), rank_decomp, nb_sampling_iter) ) self$covs_decomp_per_iter <- TSR$zeros( c(length(feature_labels), rank_decomp, nb_sampling_iter) ) self$hparam_per_iter <- TSR$zeros(c(length(self$hparam_labels), nb_sampling_iter)) self$sum_beta_est <- TSR$zeros(covariates$shape) self$sum_y_est <- TSR$zeros(y$shape) self$total_elapsed_time <- 0 self$y <- y self$omega <- omega self$covariates <- covariates self$formula <- formula self$rank_decomp <- rank_decomp self$spatial_kernel <- spatial_kernel self$temporal_kernel <- temporal_kernel self$nb_burn_in_iter <- nb_burn_in_iter self$nb_sampling_iter <- nb_sampling_iter # Set initial timer value to calculate iterations' processing time self$last_time_stamp <- Sys.time() }, #~ @description Collect current iteration values inside the historical data tensor #~list. Note that errors have already been calculated before tau sampling. collect_iter_samples = function(iter, tau_value) { elapsed_time <- private$get_elapsed_time() if (iter > self$nb_burn_in_iter) { self$sum_beta_est <- self$sum_beta_est + self$beta_estimates self$sum_y_est <- self$sum_y_est + self$y_estimates # Collect hyperparameters s_iter <- iter - self$nb_burn_in_iter s_params <- Filter(function(p) !p$is_fixed, self$spatial_kernel$parameters) t_params <- Filter(function(p) !p$is_fixed, self$temporal_kernel$parameters) self$hparam_per_iter[1, s_iter] <- tau_value self$hparam_per_iter[2:(1 + length(s_params)), s_iter] <- sapply( s_params, function(p) p$value ) self$hparam_per_iter[(2 + length(s_params)):length(self$hparam_labels), s_iter] <- sapply( t_params, function(p) p$value ) } total_logged_params <- c( list( iter = iter, is_burn_in = ifelse(iter <= self$nb_burn_in_iter, 1, 0), elapsed_time = elapsed_time ), self$error_metrics ) for (p_name in names(total_logged_params)) { self$logged_params_map[[p_name]] <- c( self$logged_params_map[[p_name]], total_logged_params[[p_name]] ) } private$print_iter_result(iter, elapsed_time) }, set_error_metrics = function() { nb_observ <- self$omega$sum() err_matrix <- (self$y_estimates - self$y) * self$omega total_sq_error <- err_matrix$norm() ** 2 mae <- err_matrix$abs()$sum() / nb_observ rmse <- (total_sq_error / nb_observ)$sqrt() self$total_sq_error <- as.numeric(total_sq_error$cpu()) self$error_metrics <- list( MAE = as.double(mae$cpu()), RMSE = as.double(rmse$cpu()) ) }, set_y_and_beta_estimates = function(decomp_tensors_map, iter) { # Calculate Coefficient Estimation if (iter > self$nb_burn_in_iter) { iter_indx <- iter - self$nb_burn_in_iter self$spatial_decomp_per_iter[, , iter_indx] <- decomp_tensors_map[['spatial_decomp']] self$temporal_decomp_per_iter[, , iter_indx] <- decomp_tensors_map[['temporal_decomp']] self$covs_decomp_per_iter[, , iter_indx] <- decomp_tensors_map[['covs_decomp']] } self$beta_estimates <- torch::torch_einsum( 'im,jm,km->ijk', c( decomp_tensors_map[['spatial_decomp']], decomp_tensors_map[['temporal_decomp']], decomp_tensors_map[['covs_decomp']] ) ) self$y_estimates <- torch::torch_einsum('ijk,ijk->ij', c(self$covariates, self$beta_estimates)) }, log_final_iter_results = function() { self$beta_estimates <- self$sum_beta_est / self$nb_sampling_iter self$y_estimates <- self$sum_y_est / self$nb_sampling_iter beta_covariates_summary <- private$create_distrib_values_summary( self$beta_estimates$reshape(c(-1, length(self$feature_labels))), dim = 1 ) self$beta_covariates_summary_df <- cbind( data.table(self$feature_labels), data.table(as.matrix(beta_covariates_summary$t()$cpu())) ) setnames(self$beta_covariates_summary_df, c('feature', self$moment_metrics, self$quantile_metrics)) y_beta_index <- CJ(location = self$spatial_labels, time = self$temporal_labels) self$y_estimates_df <- cbind( y_beta_index, data.table(as.matrix(self$y_estimates$cpu()$flatten())) ) setnames(self$y_estimates_df, c('location', 'time', 'y_est')) self$beta_estimates_df <- cbind( y_beta_index, data.table(as.matrix(self$beta_estimates$reshape(c(-1, length(self$feature_labels)))$cpu())) ) setnames(self$beta_estimates_df, c('location', 'time', self$feature_labels)) self$hyperparameters_per_iter_df <- cbind( data.table(1:self$nb_sampling_iter), data.table(as.matrix(self$hparam_per_iter$t()$cpu())) ) setnames(self$hyperparameters_per_iter_df, c('iter', self$hparam_labels)) self$set_error_metrics() private$print_iter_result('TOTAL', self$total_elapsed_time) }, # Print a summary of the BKTR regressor instance after MCMC sampling. summary = function() { line_sep <- strrep('=', self$LINE_NCHAR) summary_str <- c( '', line_sep, format('BKTR Regressor Summary', width = self$LINE_NCHAR, justify = 'centre'), line_sep, private$get_formula_str(), '', sprintf('Burn-in iterations: %i', self$nb_burn_in_iter), sprintf('Sampling iterations: %i', self$nb_sampling_iter), sprintf('Rank decomposition: %i', self$rank_decomp), sprintf('Nb Spatial Locations: %i', length(self$spatial_labels)), sprintf('Nb Temporal Points: %i', length(self$temporal_labels)), sprintf('Nb Covariates: %i', length(self$feature_labels)), line_sep, 'In Sample Errors:', sprintf('%sRMSE: %.3f', self$TAB_STR, self$error_metrics['RMSE']), sprintf('%sMAE: %.3f', self$TAB_STR, self$error_metrics['MAE']), sprintf('Computation time: %.2fs.', self$total_elapsed_time), line_sep, '-- Spatial Kernel --', private$kernel_summary(self$spatial_kernel, 'spatial'), '', '-- Temporal Kernel --', private$kernel_summary(self$temporal_kernel, 'temporal'), line_sep, private$beta_summary(), line_sep, '' ) return(paste(summary_str, collapse = '\n')) }, get_beta_summary_df = function( spatial_labels = NULL, temporal_labels = NULL, feature_labels = NULL ) { spatial_labs <- if (is.null(spatial_labels)) self$spatial_labels else spatial_labels temporal_labs <- if (is.null(temporal_labels)) self$temporal_labels else temporal_labels feature_labs <- if (is.null(feature_labels)) self$feature_labels else feature_labels iteration_betas <- self$get_iteration_betas_tensor(spatial_labs, temporal_labs, feature_labs) beta_summary <- private$create_distrib_values_summary(iteration_betas, dim = 2)$t()$cpu() index_cols <- CJ(location = spatial_labs, time = temporal_labs, feature = feature_labs) df <- cbind( index_cols, data.table(as.matrix(beta_summary)) ) setnames(df, c('location', 'time', 'feature', self$moment_metrics, self$quantile_metrics)) return(df) }, get_iteration_betas_tensor = function(spatial_labels, temporal_labels, feature_labels) { spatial_indexes <- get_label_indexes(spatial_labels, self$spatial_labels, 'spatial') temporal_indexes <- get_label_indexes(temporal_labels, self$temporal_labels, 'temporal') feature_indexes <- get_label_indexes(feature_labels, self$feature_labels, 'feature') betas_per_iterations <- torch::torch_einsum( 'sri,tri,cri->stci', c( self$spatial_decomp_per_iter[spatial_indexes, , , drop = FALSE], self$temporal_decomp_per_iter[temporal_indexes, , , drop = FALSE], self$covs_decomp_per_iter[feature_indexes, , , drop = FALSE] ) ) return(betas_per_iterations$reshape(c(-1, self$nb_sampling_iter))) } ), private = list( get_elapsed_time = function() { iter_elapsed_time <- Sys.time() - self$last_time_stamp self$total_elapsed_time <- self$total_elapsed_time + iter_elapsed_time self$last_time_stamp <- Sys.time() return(iter_elapsed_time) }, print_iter_result = function(iter, elapsed_time) { formatted_err_vals <- sprintf('%7.4f', unlist(self$error_metrics)) formatted_errors <- paste0(names(self$error_metrics), ': ', formatted_err_vals, collapse = ' | ') iter_format <- paste('Iter', ifelse(is.character(iter), '%s', '%-5d')) result_items <- c( sprintf(iter_format, iter), sprintf('Elapsed Time: %8.2fs', elapsed_time), formatted_errors ) print(paste0(result_items, collapse = ' | ')) }, #~ @description Create a summary for a given tensor of beta values across a given dimension #~ for the metrics set in the class. #~ @param values Tensor: Values to summarize #~ @param dim Integer: Dimension of the tensor we want to summarize. If NULL, #~ we want to summarize the whole tensor and flatten it. Defaults to NULL. #~ @return A tensor with summaries for the given beta values create_distrib_values_summary = function(values, dim) { all_metrics <- c(self$moment_metrics, self$quantile_metrics) summary_shape <- c(length(all_metrics)) if (!is.null(dim)) { beta_val_shape <- values$shape summary_shape <- c( summary_shape, beta_val_shape[-dim] ) } beta_summaries <- TSR$zeros(summary_shape) # In the advent of having no values, we return the empty tensor if (values$numel() == 0) { return(beta_summaries) } # Dimension for moment calculations are a bit different than for quantile moment_dim <- ifelse(is.null(dim), c(), dim) beta_summaries[1] <- values$mean(dim = moment_dim) beta_summaries[2] <- values$std(dim = moment_dim) beta_summaries[(length(self$moment_metrics) + 1):length(all_metrics)] <- torch::torch_quantile( values, TSR$tensor(self$quantile_values), dim = dim ) return(beta_summaries) }, get_formula_str = function() { formula_str <- paste(self$formula[2], self$formula[3], sep = " ~ ") formula_str <- paste('Formula:', formula_str) f_wrap <- strwrap(formula_str, width = self$LINE_NCHAR) return(paste(f_wrap, collapse = paste0('\n', self$TAB_STR))) }, #~ @description Get a string representation of a given kernel. Since the kernel can be #~ composed, this function needs to be recursive. #~ @param kernel Kernel: The kernel we want summarize. #~ @param kernel_type ('spatial' or 'temporal'): The type of kernel. #~ @param indent_count Integer: Indentation level (related to the depth of composition). #~ Defaults to 0. #~ @return A string representation of the kernel. Containing the name of the kernel, #~ the estimated parameters distribution and the fixed parameters. kernel_summary = function(kernel, kernel_type, indent_count = 0) { params <- kernel$parameters if (class(kernel)[1] %in% c('KernelAddComposed', 'KernelMulComposed')) { new_ind_nb <- indent_count + 1 op_str <- capitalize_str(kernel$composition_operation) kernel_elems <- c( paste0('Composed Kernel (', op_str, ')'), private$kernel_summary(kernel$left_kernel, kernel_type, new_ind_nb), paste0(self$TAB_STR, ifelse(op_str == 'Add', '+', '*')), private$kernel_summary(kernel$right_kernel, kernel_type, new_ind_nb) ) } else { fixed_params <- params[sapply(params, function(x) x$is_fixed)] sampled_params <- params[sapply(params, function(x) !x$is_fixed)] sampled_par_indexes <- sapply( sampled_params, function(x) which(self$hparam_labels == paste0(capitalize_str(kernel_type), ' - ', x$full_name)) ) sampled_par_tsr <- self$hparam_per_iter[sampled_par_indexes, drop = FALSE] sampled_par_summary <- private$create_distrib_values_summary(sampled_par_tsr, dim = 2) sampled_par_df <- cbind( data.table(sapply(sampled_params, function(x) x$name)), data.table(as.matrix(sampled_par_summary$t()$cpu())) ) setnames(sampled_par_df, c('Parameter', self$moment_metrics, self$quantile_metrics)) out_cols <- c('Parameter', self$DISTRIB_COLS) sampled_par_df <- sampled_par_df[, ..out_cols] sampled_par_strs <- private$get_formatted_df_rows(sampled_par_df) fixed_par_strs <- sapply( fixed_params, function(x) sprintf('%-20s Fixed Value: %.3f', x$name, x$value) ) kernel_elems <- c( kernel$name, 'Parameter(s):', sampled_par_strs, fixed_par_strs ) } kernel_elems <- paste0(rep(self$TAB_STR, indent_count), kernel_elems) return(paste(kernel_elems, collapse = '\n')) }, #~ @description Get a string representation of the beta estimates aggregated per #~ covariates. (This shows the distribution of the beta hats per covariates) #~ @return A string representation of the beta estimates. beta_summary = function() { beta_est_cols <- c('feature', self$MAIN_SUMMARY_COLS) distrib_df <- self$beta_covariates_summary_df[, ..beta_est_cols] beta_distrib_str_rows <- private$get_formatted_df_rows(distrib_df) beta_summary_strs <- c( 'Beta Estimates Summary (Aggregated Per Covariates)', '', beta_distrib_str_rows ) return(paste(beta_summary_strs, collapse = '\n')) }, format_df_row = function(df_row) { df_cols <- c( format(trunc_str(df_row[, 1], self$MAIN_COL_WIDTH), width = self$MAIN_COL_WIDTH, justify = 'left'), sapply(unlist(df_row[, -1]), function(x) sprintf(self$OTHER_COL_FMT, x)) ) return(paste(df_cols, collapse = ' ')) }, # Format a dataframe to be printed in a table. get_formatted_df_rows = function(df) { df_header_cols <- c( strrep(' ', self$MAIN_COL_WIDTH), sapply(colnames(df)[-1], function(x) format(x, width = self$OTHER_COL_WIDTH, justify = 'right')) ) df_header_row <- paste(df_header_cols, collapse = ' ') df_str_rows <- by(df, seq_len(nrow(df)), private$format_df_row) return(c(df_header_row, df_str_rows)) } ) )
/scratch/gouwar.j/cran-all/cranData/BKTR/R/result_logger.R
#' @importFrom R6 "R6Class" #' @import torch #' @title R6 class for kernel's hyperparameter sampling #' #' @description The KernelParamSampler encapsulate all the behavior related to #' the sampling of the kernel hyperparameters #' #' @noRd KernelParamSampler <- R6::R6Class( 'KernelParamSampler', public = list( kernel = NULL, marginal_ll_eval_fn = NULL, initialize = function( kernel, marginal_ll_eval_fn ) { self$kernel <- kernel self$marginal_ll_eval_fn <- marginal_ll_eval_fn }, initialize_theta_bounds = function(param) { theta_range <- param$slice_sampling_scale * as.numeric(TSR$rand(1)$cpu()) theta_min <- max(log(param$value) - theta_range, log(param$lower_bound)) theta_max <- min(theta_min + param$slice_sampling_scale, log(param$upper_bound)) return(list(min = theta_min, max = theta_max)) }, prior_fn = function(param) { return(-0.5 * param$hparam_precision * log(param$value) ** 2) }, sample_rand_theta_value = function(theta_min, theta_max) { return(theta_min + (theta_max - theta_min) * as.numeric(TSR$rand(1)$cpu())) }, sample_param = function(param) { theta_bounds <- self$initialize_theta_bounds(param) theta_min <- theta_bounds$min theta_max <- theta_bounds$max initial_theta <- log(param$value) self$kernel$kernel_gen() initial_marginal_likelihood <- self$marginal_ll_eval_fn() + self$prior_fn(param) density_threshold <- as.numeric(TSR$rand(1)$cpu()) while (TRUE) { new_theta <- self$sample_rand_theta_value(theta_min, theta_max) param$value <- exp(new_theta) self$kernel$kernel_gen() new_marginal_likelihood <- self$marginal_ll_eval_fn() + self$prior_fn(param) marg_ll_diff <- new_marginal_likelihood - initial_marginal_likelihood if (exp(marg_ll_diff) > density_threshold) { return(param$value) } if (new_theta < initial_theta) { theta_min <- new_theta } else { theta_max <- new_theta } } }, sample = function() { for (param in self$kernel$parameters) { if (!param$is_fixed) { self$sample_param(param) } } } ) ) #' @title Sample a tensor of random values from a normal multivariate distribution #' #' @description The sampling use a tensor of mean and the upper triangular portion of the precision matrix #' #' @noRd sample_norm_multivariate <- function(mean_vec, precision_upper_tri) { # TODO Open PR & Issue for https://github.com/mlverse/torch/blob/main/R/distributions-multivariate_normal.R L:86 # Not Able to use the precision matrix because of priority of ops (!is.null(NULL) + !is.null(1) + !is.null(1)) == F # ERROR comes from torch::distr_multivariate_normal(torch::torch_zeros(2), precision_matrix = torch::torch_eye(2)) return( torch::torch_triangular_solve( TSR$tensor(torch::torch_randn_like(mean_vec))$unsqueeze(2), precision_upper_tri, upper = TRUE )[[1]]$squeeze() + mean_vec ) } #' @noRd get_cov_decomp_chol <- function( spatial_decomp, time_decomp, covs, rank_cp, omega, tau, y, wish_precision_tensor ) { y_masked <- omega * y # TODO Merge some parts with marginal ll of spatial and temporal # get corresponding norm multivariate mean b <- TSR$khatri_rao_prod(spatial_decomp, time_decomp)$reshape( c(spatial_decomp$shape[1], time_decomp$shape[1], rank_cp) ) psi_c <- torch::torch_einsum('ijk,ijl->ijlk', c(covs, b)) psi_c_mask <- psi_c * omega$unsqueeze(3)$unsqueeze(4)$expand_as(psi_c) psi_c_mask <- psi_c_mask$permute(c(2, 1, 3, 4))$reshape( c(psi_c$shape[1] * psi_c$shape[2], psi_c$shape[3] * psi_c$shape[4]) ) inv_s <- TSR$kronecker_prod(TSR$eye(rank_cp), wish_precision_tensor) lambda_c <- tau * psi_c_mask$t()$matmul(psi_c_mask) + inv_s chol_lc <- torch::linalg_cholesky(lambda_c) cc <- torch::linalg_solve(chol_lc, psi_c_mask$t()$matmul(y_masked$t()$flatten())) return(list(chol_lc = chol_lc, cc = cc)) } #' @title R6 class for the Tau precision hyperparameter sampling #' #' @description Encapsulate all the behavior that allows to generate new tau values #' #' @noRd TauSampler <- R6::R6Class( 'TauSampler', public = list( b_0 = NULL, a_tau = NULL, initialize = function(a_0, b_0, nb_observations) { self$b_0 <- b_0 self$a_tau <- TSR$tensor(a_0 + 0.5 * nb_observations) }, sample = function(total_sq_error) { b_tau <- self$b_0 + 0.5 * total_sq_error return(TSR$tensor( torch::distr_gamma(self$a_tau$cpu(), b_tau)$sample() )) } ) ) #' @title R6 class to sample new precision matrices #' #' @description Encapsulate all the behavior that allows to sample new precision matrices from #' a Wishart distribution #' #' @noRd # TODO create a PR to add rand wishart in R Torch PrecisionMatrixSampler <- R6::R6Class( 'PrecisionMatrixSampler', public = list( nb_covariates = NULL, wish_df = NULL, wish_precision_tensor = NULL, initialize = function(nb_covariates, rank_cp) { self$nb_covariates <- nb_covariates self$wish_df <- nb_covariates + rank_cp }, sample = function(covs_decomp) { w <- covs_decomp$matmul(covs_decomp$t()) + TSR$eye(self$nb_covariates) wish_sigma <- as.matrix(((w + w$t()) * 0.5)$inverse()$cpu()) wish_precision_matrix <- rWishart(1, self$wish_df, wish_sigma)[, , 1] self$wish_precision_tensor <- TSR$tensor(wish_precision_matrix) return(self$wish_precision_tensor) } ) )
/scratch/gouwar.j/cran-all/cranData/BKTR/R/samplers.R
#' @import torch #' @importFrom R6 R6Class #' @importFrom R6P Singleton #' @title R6 singleton that contains the configuration for the tensor backend #' #' @description Tensor backend configuration and methods for all the tensor operations #' in BKTR #' #' @examplesIf torch::torch_is_installed() #' # Set the seed, setup the tensor floating point type and device #' TSR$set_params(fp_type='float64', fp_device='cpu', seed=42) #' # Create a tensor from a vector #' TSR$tensor(c(1, 2, 3)) #' # Create a tensor from a matrix #' TSR$tensor(matrix(c(1, 2, 3, 4), nrow=2)) #' # Create a 3x3 tensor with a diagonal of ones and zeros elsewhere #' TSR$eye(3) #' # Create a tensor of ones (with 6 elements, 2 rows and 3 columns) #' TSR$ones(c(2, 3)) #' # Create a tensor of zeros (with 12 elements, 3 rows and 4 columns) #' TSR$zeros(c(3, 4)) #' # Create a tensor of random uniform values (with 6 elements) #' TSR$rand(c(2, 3)) #' # Create a tensor of random normal values (with 6 elements) #' TSR$randn(c(2, 3)) #' # Create a tensor of random normal values with the same shape as a given tensor #' tsr_a <- TSR$randn(c(2, 3)) #' TSR$randn_like(tsr_a) #' # Create a tensor of a range of values (1, 2, 3, 4) #' TSR$arange(1, 4) #' # Choose two random values from a given tensor without replacement #' tsr_b <- TSR$rand(6) #' TSR$rand_choice(tsr_b, 2) #' # Use the tensor operator to compute the kronecker product of two 2x2 matrices #' tsr_c <- TSR$tensor(matrix(c(1, 2, 3, 4), nrow=2)) #' tsr_d <- TSR$tensor(matrix(c(5, 6, 7, 8), nrow=2)) #' TSR$kronecker_prod(tsr_c, tsr_d) # Returns a 4x4 tensor #' # Use the tensor operator to compute the khatri rao product of two 2x2 matrices #' TSR$khatri_rao_prod(tsr_c, tsr_d) # Returns a 4x2 tensor #' # Check if a given object is a tensor #' TSR$is_tensor(tsr_d) # Returns TRUE #' TSR$is_tensor(TSR$eye(2)) # Returns TRUE #' TSR$is_tensor(1) # Returns FALSE #' #' @export TensorOperator <- R6::R6Class( 'TensorOperator', inherit = R6P::Singleton, private = list( # Private values used to store the config of the underlying tensor library dtype = NULL, device = NULL ), public = list( # Public facing config values used streamlined for TensorOperator #' @field fp_type The floating point type to use for the tensor operations fp_type = NULL, #' @field fp_device The device to use for the tensor operations fp_device = NULL, #' @description Initialize the tensor operator with the given floating point type #' and device #' @param fp_type The floating point type to use for the tensor operations (either #' "float64" or "float32") #' @param fp_device The device to use for the tensor operations (either "cpu" or #' "cuda") #' @return A new tensor operator instance initialize = function( fp_type = 'float64', fp_device = 'cpu' ) { self$set_params(fp_type, fp_device) }, #' @description Set the tensor operator parameters #' @param fp_type The floating point type to use for the tensor operations (either #' "float64" or "float32") #' @param fp_device The device to use for the tensor operations (either "cpu" or #' "cuda") #' @param seed The seed to use for the random number generator set_params = function( fp_type = NULL, fp_device = NULL, seed = NULL ) { if (!is.null(fp_type)) { if (fp_type == 'float64') { private$dtype <- torch::torch_float64 } else if (fp_type == 'float32') { private$dtype <- torch::torch_float32 } else { stop('`fp_type` must be either "float64" or "float32"') } self$fp_type <- fp_type } if (!is.null(fp_device)) { private$device <- fp_device self$fp_device <- fp_device } if (!is.null(seed)) { torch::torch_manual_seed(seed) # This is for rWishart until it is implemented in R Torch set.seed(seed) } }, #' @description Get the default jitter value for the floating point type used by the tensor operator #' @return The default jitter value for the floating point type used by the tensor operator get_default_jitter = function() { # I wanted this to be an active binding but it is compiled at build time # See: https://github.com/r-lib/R6/issues/152 if (private$dtype() == torch::torch_float64()) { return(1e-8) } else if (private$dtype() == torch::torch_float32()) { return(1e-4) } stop('The dtype used by TSR has no default mapped jitter value') }, #' @description Create a tensor from a vector or matrix of data with the tensor operator dtype and device #' @param tensor_data The vector or matrix of data to create the tensor from #' @return A new tensor with the tensor operator dtype and device tensor = function(tensor_data) { return( torch::torch_tensor( tensor_data, dtype = private$dtype(), device = private$device ) ) }, #' @description Check if a provided object is a tensor #' @param tensor The object to check #' @return A boolean indicating if the object is a tensor is_tensor = function(tensor) { return(is(tensor, 'torch_tensor')) }, #' @description Create a tensor with a diagonal of ones and zeros with the tensor operator dtype and device #' for a given dimension #' @param eye_dim The dimension of the tensor to create #' @return A new tensor with a diagonal of ones and zeros with the tensor operator dtype and device eye = function(eye_dim) { return( torch::torch_eye( eye_dim, dtype = private$dtype(), device = private$device ) ) }, #' @description Create a tensor of ones with the tensor operator dtype and device for a given dimension #' @param tsr_dim The dimension of the tensor to create #' @return A new tensor of ones with the tensor operator dtype and device ones = function(tsr_dim) { return( torch::torch_ones( tsr_dim, dtype = private$dtype(), device = private$device ) ) }, #' @description Create a tensor of zeros with the tensor operator dtype and device for a given dimension #' @param tsr_dim The dimension of the tensor to create #' @return A new tensor of zeros with the tensor operator dtype and device zeros = function(tsr_dim) { return( torch::torch_zeros( tsr_dim, dtype = private$dtype(), device = private$device ) ) }, #' @description Create a tensor of random uniform values with the tensor operator dtype and #' device for a given dimension #' @param tsr_dim The dimension of the tensor to create #' @return A new tensor of random values with the tensor operator dtype and device rand = function(tsr_dim) { return(torch::torch_rand(tsr_dim, dtype = private$dtype(), device = private$device)) }, #' @description Create a tensor of random normal values with the tensor operator dtype and device #' for a given dimension #' @param tsr_dim The dimension of the tensor to create #' @return A new tensor of random normal values with the tensor operator dtype and device randn = function(tsr_dim) { return(torch::torch_randn(tsr_dim, dtype = private$dtype(), device = private$device)) }, #' @description Create a tensor of random uniform values with the same shape as a given tensor #' with the tensor operator dtype and device #' @param input_tensor The tensor to use as a shape reference #' @return A new tensor of random uniform values with the same shape as a given tensor randn_like = function(input_tensor) { return(torch::torch_randn_like(input_tensor, dtype = private$dtype(), device = private$device)) }, #' @description Create a tensor of a range of values with the tensor operator dtype and device #' for a given start and end #' @param start The start of the range #' @param end The end of the range #' @return A new tensor of a range of values with the tensor operator dtype and device arange = function(start, end) { return(self$tensor(torch::torch_arange(start, end))) }, #' @description Choose random values from a tensor for a given number of samples #' @param choices_tsr The tensor to choose values from #' @param nb_sample The number of samples to choose #' @param use_replace A boolean indicating if the sampling should be done with replacement. #' Defaults to FALSE #' @param weights_tsr The weights to use for the sampling. If NULL, the sampling is uniform. #' Defaults to NULL #' @return A new tensor of randomly chosen values from a tensor rand_choice = function(choices_tsr, nb_sample, use_replace = FALSE, weights_tsr = NULL) { if (is.null(weights_tsr)) { weights_tsr <- self$ones(choices_tsr$shape) } if (choices_tsr$shape != weights_tsr$shape) { stop('Choices and weights tensors must have the same shape') } choices_indx <- torch::torch_multinomial(weights_tsr, nb_sample, use_replace) return(choices_tsr[choices_indx]) }, #' @description Efficiently compute the kronecker product of two matrices in tensor format #' @param a The first tensor #' @param b The second tensor #' @return The kronecker product of the two matrices kronecker_prod = function(a, b) { return(torch::torch_kron(a, b)) }, #' @description Efficiently compute the khatri rao product of two matrices in tensor format #' having the same number of columns #' @param a The first tensor #' @param b The second tensor #' @return The khatri rao product of the two matrices khatri_rao_prod = function(a, b) { if (a$shape[2] != b$shape[2]) { stop( sprintf( 'Matrices must have the same number of columns to perform khatri rao product, got %i and %i', a$shape[2], b$shape[2] ) ) } return(torch::torch_reshape( torch::torch_einsum("ac,bc->abc", c(a, b)), c(-1, a$shape[2]) )) } ), ) #' @title Tensor Operator Singleton #' #' @description Singleton instance of the \code{TensorOperator} class that contains #' all informations related the tensor API; tensor methods, used data type and used device. #' #' @export TSR <- TensorOperator$new()
/scratch/gouwar.j/cran-all/cranData/BKTR/R/tensor_ops.R
#' @import torch #' @include tensor_ops.R #' @importFrom stats rWishart # Private utility function to do a cross join between two data.tables. # Taken from https://github.com/Rdatatable/data.table/issues/1717 cross_join_dt <- function(...) { rows <- do.call(CJ, lapply( list(...), function(x) if (is.data.frame(x)) seq_len(nrow(x)) else seq_along(x) )) do.call(data.table, Map(function(x, y) x[y], list(...), rows)) } #' Function used to transform covariates coming from two dataframes one for spatial and #' one for temporal into a single dataframe with the right shape for the BKTR Regressor. #' This is useful when the temporal covariates do not vary trough space and the spatial #' covariates do not vary trough time (Like in the BIXI example). The function also adds #' a column for the target variable at the beginning of the dataframe. #' @param spatial_df data.table: Spatial covariates dataframe with an index named #' location and a shape of (n_locations, n_spatial_covariates) #' @param temporal_df data.table: Temporal covariates dataframe with an index named #' time and a shape of (n_times, n_temporal_covariates) #' @param y_df data.table: The dataframe containing the target variable. It should have #' a shape of (n_locations, n_times). The columns and index names of this dataframe #' should be correspond to the one of the spatial_df and temporal_df. #' @param y_column_name string: The name of the target variable column in y_df. Default #' to 'y'. #' @return data.table: The reshaped covariates dataframe with a shape of #' (n_locations * n_times, 1 + n_spatial_covariates + n_temporal_covariates). #' The first two columns are the indexes (location, time), the following column #' is the target variable and the other columns are the covariates. #' #' @examplesIf torch::torch_is_installed() #' # Let's reshape the BIXI dataframes without normalization #' new_data_df <- reshape_covariate_dfs( #' spatial_df = BKTR::bixi_spatial_features, #' temporal_df = BKTR::bixi_temporal_features, #' y_df = BKTR::bixi_station_departures, #' y_column_name = 'whole_nb_departure') #' # The resulting dataframe has the right shape to be a BKTRRegressor data_df #' head(new_data_df) #' #' @export reshape_covariate_dfs <- function( spatial_df, temporal_df, y_df, y_column_name = 'y' ) { spa_index_name <- 'location' temp_index_name <- 'time' if ( is.null(key(spatial_df)) || is.null(key(y_df)) || key(spatial_df) != c(spa_index_name) || key(y_df) != c(spa_index_name) ) { stop(paste('Key column names of spatial_df and y_df must be', spa_index_name)) } if (is.null(key(temporal_df)) || key(temporal_df) != c(temp_index_name)) { stop(paste('Key column name of temporal_df must be', temp_index_name)) } spatial_df_cp <- spatial_df temporal_df_cp <- temporal_df y_df_cp <- y_df # Sort indexes and columns # Indexes (keys) are already sorted by data.table y_df_col_names <- sort(colnames(y_df_cp)[!colnames(y_df_cp) == spa_index_name]) setcolorder(y_df_cp, c(spa_index_name, y_df_col_names)) # Compare indexes values if (!identical(spatial_df_cp[[key(spatial_df_cp)[1]]], y_df_cp[[key(y_df_cp)[1]]])) { stop('Index values of spatial_df and y_df must be the same') } if (!identical(as.character(temporal_df_cp[[key(temporal_df_cp)[1]]]), y_df_col_names)) { stop('temporal_df index values and y_df columns names must be the same') } data_df <- cross_join_dt(spatial_df_cp, temporal_df_cp) setkeyv(data_df, c(spa_index_name, temp_index_name)) y_vals_mat <- as.matrix(y_df_cp[, y_df_col_names, with = FALSE]) y_flat_values <- as.vector(t(y_vals_mat)) data_df[, (y_column_name) := y_flat_values] setcolorder(data_df, c(spa_index_name, temp_index_name, y_column_name)) return(data_df) } #' @title Simulate Spatiotemporal Data Using Kernel Covariances. #' #' @param nb_locations Integer: Number of spatial locations #' @param nb_time_points Integer: Number of time points #' @param nb_spatial_dimensions Integer: Number of spatial dimensions #' @param spatial_scale Numeric: Spatial scale #' @param time_scale Numeric: Time scale #' @param spatial_covariates_means Vector: Spatial covariates means #' @param temporal_covariates_means Vector: Temporal covariates means #' @param spatial_kernel Kernel: Spatial kernel #' @param temporal_kernel Kernel: Temporal kernel #' @param noise_variance_scale Numeric: Noise variance scale # #' @return A list containing 4 dataframes: #' - `data_df` contains the response variable and the covariates #' - `spatial_positions_df` contains the spatial locations and their coordinates #' - `temporal_positions_df` contains the time points and their coordinates #' - `beta_df` contains the true beta coefficients #' @examplesIf torch::torch_is_installed() #' # Simulate data with 20 locations, 30 time points, in 2D spatial dimensions #' # with 3 spatial covariates and 2 temporal covariates #' simu_data <- simulate_spatiotemporal_data( #' nb_locations=20, #' nb_time_points=30, #' nb_spatial_dimensions=2, #' spatial_scale=10, #' time_scale=10, #' spatial_covariates_means=c(0, 2, 4), #' temporal_covariates_means=c(1, 3), #' spatial_kernel=KernelMatern$new(), #' temporal_kernel=KernelSE$new(), #' noise_variance_scale=1) #' #' # The dataframes are similar to bixi_data, we have: #' # - data_df #' head(simu_data$data_df) #' # - spatial_positions_df #' head(simu_data$spatial_positions_df) #' # - temporal_positions_df #' head(simu_data$temporal_positions_df) #' #' # We also obtain the true beta coefficients used to simulate the data #' head(simu_data$beta_df) #' #' @export simulate_spatiotemporal_data <- function( nb_locations, nb_time_points, nb_spatial_dimensions, spatial_scale, time_scale, spatial_covariates_means, temporal_covariates_means, spatial_kernel, temporal_kernel, noise_variance_scale ) { # Saving last fp_type to restore it at the end of the function # Using float64 to avoid numerical errors in simulation ini_fp_type <- TSR$fp_type TSR$set_params(fp_type = 'float64') spa_pos <- TSR$rand(c(nb_locations, nb_spatial_dimensions)) * spatial_scale temp_pos <- TSR$arange(0, nb_time_points - 1) * time_scale / (nb_time_points - 1) temp_pos <- temp_pos$reshape(c(nb_time_points, 1)) # Dimension labels s_dims <- get_dim_labels('s_dim', nb_spatial_dimensions) s_locs <- get_dim_labels('s_loc', nb_locations) t_points <- get_dim_labels('t_point', nb_time_points) s_covs <- get_dim_labels('s_cov', length(spatial_covariates_means)) t_covs <- get_dim_labels('t_cov', length(temporal_covariates_means)) spa_pos_df <- cbind(data.table(s_locs), data.table(as.matrix(spa_pos$cpu()))) setnames(spa_pos_df, c('location', s_dims)) setkeyv(spa_pos_df, 'location') temp_pos_df <- cbind(data.table(t_points), data.table(as.matrix(temp_pos$cpu()))) setnames(temp_pos_df, c('time', 'time_val')) setkeyv(temp_pos_df, 'time') spa_means <- TSR$tensor(spatial_covariates_means) nb_spa_covariates <- length(spa_means) spa_covariates <- TSR$randn(c(nb_locations, nb_spa_covariates)) spa_covariates <- spa_covariates + spa_means temp_means <- TSR$tensor(temporal_covariates_means) nb_temp_covariates <- length(temp_means) temp_covariates <- TSR$randn(c(nb_time_points, nb_temp_covariates)) temp_covariates <- temp_covariates + temp_means intercept_covariates <- TSR$ones(c(nb_locations, nb_time_points, 1)) covs <- torch::torch_cat( c( intercept_covariates, spa_covariates$unsqueeze(2)$expand(c(nb_locations, nb_time_points, nb_spa_covariates)), temp_covariates$unsqueeze(1)$expand(c(nb_locations, nb_time_points, nb_temp_covariates)) ), dim = 3 ) nb_covs <- 1 + nb_spa_covariates + nb_temp_covariates covs_covariance_mat <- rWishart(1, nb_covs, diag(nb_covs))[,,1] covs_covariance <- TSR$tensor(covs_covariance_mat) spatial_kernel$set_positions(spa_pos_df) spatial_covariance <- spatial_kernel$kernel_gen() temporal_kernel$set_positions(temp_pos_df) temporal_covariance <- temporal_kernel$kernel_gen() # Use Matrix Normal distribution to sample beta values (to reduce memory usage) # the second covariance matrix is the Kronecker product of temporal and covariates covariances chol_spa <- torch::linalg_cholesky(spatial_covariance) chol_temp_covs <- torch::linalg_cholesky( TSR$kronecker_prod(temporal_covariance, covs_covariance) ) temp_vals <- TSR$randn(c(nb_locations, nb_time_points * nb_covs)) beta_values <- ( chol_spa$matmul(temp_vals)$matmul(chol_temp_covs$t()) )$reshape(c(nb_locations, nb_time_points, nb_covs)) y_val <- torch::torch_einsum('ijk,ijk->ij', c(covs, beta_values)) err <- TSR$randn(c(nb_locations, nb_time_points)) * (noise_variance_scale ** 0.5) y_val <- y_val + err y_val <- y_val$reshape(c(nb_locations * nb_time_points, 1)) # We remove the intercept from the covariates covs <- covs$reshape(c(nb_locations * nb_time_points, nb_covs))[, 2:nb_covs] index_cols_df <- CJ(spa_pos_df[['location']], temp_pos_df[['time']]) setnames(index_cols_df, c('location', 'time')) data_df <- data.table(cbind(as.matrix(y_val$cpu()), as.matrix(covs$cpu()))) setnames(data_df, c('y', s_covs, t_covs)) data_df <- cbind(index_cols_df, data_df) setkeyv(data_df, c('location', 'time')) beta_df <- data.table(as.matrix(beta_values$reshape(c(nb_locations * nb_time_points, nb_covs))$cpu())) setnames(beta_df, c('Intercept', s_covs, t_covs)) beta_df <- cbind(index_cols_df, beta_df) setkeyv(beta_df, c('location', 'time')) TSR$set_params(fp_type = ini_fp_type) return(list( data_df = data_df, spatial_positions_df = spa_pos_df, temporal_positions_df = temp_pos_df, beta_df = beta_df )) } # Following are private utility functions #' @description Private utility function to get the dimension labels for a #' given dimension prefix and max value. #' @param dim_prefix String: The prefix of the dimension labels #' @param max_value Integer: The maximum value of the dimension labels #' @return String: The dimension labels #' #' @noRd get_dim_labels <- function(dim_prefix, max_value) { max_digits <- nchar(as.character(max_value - 1)) formatted_numbers <- formatC(0:(max_value - 1), width = max_digits, flag = "0") return(paste(dim_prefix, formatted_numbers, sep = "_")) } #' @description Get the index of a label in a list of labels. If the #' label is not in the list, raise an error. #' @param label Any: The label for which we want to get the index #' @param label_list Vector[Any]: The list of labels #' @param label_type String: The label type either 'spatial', 'temporal', 'feature'. #' @return Integer: The index of the label in the list #' #' @noRd get_label_index_or_raise <- function(label, label_list, label_type) { match_indx <- match(as.character(label), as.character(label_list)) if (is.na(match_indx)) { stop(sprintf('Label `%s` does not exist in %s labels.', label, label_type)) } return(match_indx) } #' @description return the indexes of a given set of labels that can #' be found in a list of available labels. #' @param labels vector: The labels for which we want to get the indexes #' @param available_labels vector: A vector of available labels #' @param label_type (spatial, temporal, feature): Type of label for #' which we want to get indexes #' @return The indexes of the labels in the vector of available labels #' #' @noRd get_label_indexes <- function(labels, available_labels, label_type) { if (length(labels) == 0) { stop(sprintf('No %s labels provided.', label_type)) } return(sapply(labels, function(x) get_label_index_or_raise(x, available_labels, label_type))) } #' @description Utility function to capitalize a string (only the first letter) #' @param str_val String: The string to capitalize #' @return String: The capitalized string #' #' @noRd capitalize_str <- function(str_val) { return( paste0( toupper(substr(str_val, 1, 1)), tolower(substr(str_val, 2, nchar(str_val))) ) ) } #' @description Utility function to truncate a string with ellipsis #' @param str_val String: The string to truncate #' @param trunc_len Integer: The maximum length of the string #' @return String: The truncated string #' #' @noRd trunc_str <- function(str_val, trunc_len) { if (trunc_len < 3) { stop('trunc_len must be at least 3') } if (nchar(str_val) <= trunc_len) { return(str_val) } return(paste0(substring(str_val, 1, trunc_len - 3), "...")) }
/scratch/gouwar.j/cran-all/cranData/BKTR/R/utils.R
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # posteriorFeasibility # Author: Francisco ############################################################################### # DESCRIPTION: Tries to assess the "feasibility" of a set of Black-Litterman views using the method described by Meucci and Fusai # in "Assessing Views". This method is based on the Mahalanobis distance between the posterior and prior mean # KEYWORDS: math # TODO: Appears not to be completely correct at the moment ############################################################################### posteriorFeasibility <- function( result # BLResult class object ) { views <- result@views qv <- views@qv P <- views@P numAssets <- length(assetSet(views)) sigmaInv <- solve(result@priorCovar) # calculates the Mahalanobis distance as described by the papaer mahal <- mahalanobis(result@posteriorMean, result@priorMean, cov = result@priorCovar, inverted = FALSE) mahalProb <- 1 - pchisq(mahal, df = numAssets) if(! result@kappa == 0) omega <- diag(1 / views@confidences) else omega <- result@kappa * tcrossprod(P %*% result@priorCovar, P) # if(result@tau != 1) warning("This function is not yet implemented for tau != 1, so the calculation of view senstivities will proceed assuming tau = 1") # sensitivities <- -2 * dchisq(mahal, df = numAssets) * (solve(tcrossprod(P %*% result@priorCovar, P) # + omega) %*% P %*%(result@posteriorMean-result@priorMean)) list("mahalDist" = mahal, "mahalDistProb" = mahalProb, sensitivities = "Not implemented yet") }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/BLAnalyzePost.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # createBLViews # Author: Francisco ############################################################################### # DESCRIPTION: Creates a set of B-L views with a GUI interface. Currently the only interface that can be used is the data editor # KEYWORDS: datagen ############################################################################### createBLViews <- function ( allAssets, # A vector of strings holding the names of all of the assets in one's universe numAssetViews = 1, # # Number of views to form assetSubset = NULL, # subset of one's universe that one actually wants to form views on mode = c("editor", "Window") # GUI to use, only editor used at the moment ) { mode <- match.arg(mode) .createBLViews.Editor(allAssets, numAssetViews, assetSubset) } ############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # createViews # Author: Francisco ############################################################################### # DESCRIPTION: Creates a set of B-L views using the data editor as a GUI # KEYWORDS: datagen ############################################################################### .createBLViews.Editor <- function ( allAssets, numViews = 1, stockSubset = NULL ) { #extract if( is.null(stockSubset) ) stockSubset <- allAssets if(length(stockSubset) < numViews) stop("The number of views to be formed should be greater than or equal to the number of stocks") viewsMatInit <- matrix(0, nrow = numViews, ncol = length(stockSubset) + 2, dimnames = list(NULL, c( stockSubset, "q", "confidence"))) viewsMat <- edit(viewsMatInit) P <- viewsMat[, -((ncol(viewsMat)-1):ncol(viewsMat)), drop = FALSE] qv <- viewsMat[,ncol(viewsMat) - 1 ] conf <- viewsMat[, ncol(viewsMat)] views <- try(BLViews(P,q = qv, conf, assetNames = allAssets)) if(inherits(views, "try-error")) stop("Incorrectly entered views, unable to initialize object") views } updateBLViews <- function ( views, includeNullViews = FALSE, numNewViews = 0, assets = NULL ) { if(!is.null(assets)) assets <- assetSet(views) # extract the "P" matrix and then remove those stocks for which we have not formed # any views at all, i.e. those with only 0 column entries if(!includeNullViews) P <- .removeZeroColumns(views@P) else P <- views@P temp <- colnames(P) newAssets <- setdiff(assets, temp) addCols <- matrix(0, nrow = nrow(P), ncol = length(newAssets), dimnames = list(NULL, newAssets)) # aggregate pre-existing views P <- cbind(P, addCols) if(numNewViews > 0) P <- rbind(P, matrix(0, ncol = ncol(P), nrow = numNewViews)) qv <- c(views@qv, rep(0, numNewViews)) confidences <- c(views@confidences, rep(0, numNewViews)) viewsMatInit <- cbind(P, "qv" = qv, "confidences" = confidences) viewsMat <- edit(viewsMatInit) v <- viewsMat[, -ncol(viewsMat)] conf <- viewsMat[, ncol(viewsMat)] BLViews(P = v[, -ncol(v)], q = v[,ncol(v)], confidences = conf, assetNames = colnames(P)) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/BLGUI.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # BLCOPOptions # Author: Francisco ############################################################################### # DESCRIPTION: Sets or retrieves the package's global settings . See the online help for these. # KEYWORDS: environment ############################################################################### BLCOPOptions <- function ( opt, # string with the option to be retrieved or changed setting # new setting for the value. Note: currently not error checked! ) { if(missing(opt)&missing(setting)) return(.BLEnv$settings) if(missing(setting)) return(.BLEnv$settings[[opt]]) .BLEnv$settings[[opt]] <- setting }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/BLOptions.R
############################################################################### # Mango Solutions # posteriorEst # Author: Francisco # $Rev: 4763 $ # $LastChangedDate: 2010-02-23 20:32:39 +0000 (Tue, 23 Feb 2010) $ # ############################################################################### # DESCRIPTION: Computes the Black-Litterman posterior estimate # KEYWORDS: math ############################################################################### #' This function performs the "core" calculation of the Black-Litterman model. #' @param views An object of class BLViews #' @param mu A vector of mean equilibrium returns #' @param tau The "tau" parameter in the Black-Litterman model. #' @param sigma The variance-covariance matrix of the returns of the assets #' @param kappa if greater than 0, the confidences in each view are replaced. See the online help for details #' @return An object of class BLResult holding the updated Black-Litterman posterior #' @author Francisco #' @export posteriorEst <- function ( views, # full BLview object. mu, # Equilibrium expected returns tau = 0.5, # Degree of uncertainty in prior sigma, # variance-covariance matrix of asset returns kappa = 0 # if greater than 0, the view confidences will be ignored and the # omega matrix in the BL model will be replaced by kappa * P %*% sigma %*% t(P) ) { # preallocate the return numAssets <- length(assetSet(views)) P <- views@P if(kappa == 0) { if(length(views@confidences) > 1) omega <- diag( 1/ views@confidences) else omega <- matrix(1 / views@confidences, 1,1) } else { omega <- kappa * tcrossprod(P %*% sigma, P) omegaInv <- solve(omega) } qv <- views@qv sigmaInv <- solve(sigma) # The following steps are the core Black-Litterman calculations temp <- tcrossprod(sigma, P) postMu <- mu + tau * temp %*% solve(tau * P %*% temp + omega, qv - P %*% mu) postMu <- as.numeric(postMu) postSigma <- (1 + tau) * sigma - tau^2 * temp %*% solve(tau * P %*% temp + omega, P %*% sigma) names(mu) <- assetSet(views) names(postMu) <- assetSet(views) new("BLResult", views = views, tau = tau, priorMean = mu, priorCovar = sigma, posteriorMean = postMu, posteriorCovar = postSigma, kappa = kappa ) } #' BLposterior #' @param returns A matrix of time series of returns. The columns should correspond to individual assets. #' @param views An object of class BLViews #' @param tau The "tau" parameter in the Black-Litterman model. #' @param marketIndex A set of returns of a market index. #' @param riskFree A time series of risk-free rates of return. Defaults to 0 #' @param kappa if greater than 0, the confidences in each view are replaced. See the online help for details #' @param covEstimator A string holding the name of the function that should be used to estimate the variance-covariance matrix. #' This function should simply return a matrix. #' @return An object of class BLResult #' @author Francisco #' @export BLPosterior <- function ( returns, views, tau = 1, marketIndex, riskFree = NULL, kappa = 0, covEstimator = "cov" ) { covEstimator <- match.fun(covEstimator) alphaInfo <- CAPMList(returns, marketIndex, riskFree = riskFree) post <- posteriorEst(views, tau = tau, mu = alphaInfo[["alphas"]], sigma = unclass(covEstimator(returns)), kappa = kappa) post }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/BLPost.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # addBLViews # Author: Francisco ############################################################################### # DESCRIPTION: Adds a new set of views # KEYWORDS: datagen # TODO: Add the ability to add entirely new assets ############################################################################### addBLViews <- function ( pickMatrix, # pickMatrix matrix. q, # mean vector confidences, # vector of confidences in the new views views # pre-existing views to add to ) { # try to force the view input to be a matrix just in case if its a vector if(!is(pickMatrix, "matrix")) pickMatrix <- matrix(pickMatrix, nrow = 1, ncol = length(pickMatrix), dimnames = list(NULL, names(pickMatrix)) ) if(is.null(colnames(pickMatrix))) { warning("Missing asset names in the pickMatrix matrix, assigning them automatically") dimnames(pickMatrix) <- list(rownames(pickMatrix), assetSet(views)[1:ncol(pickMatrix)]) } sNames <- colnames(pickMatrix) assetNames <- assetSet(views) # find the indices of the names of the assets which occur in the new views # within the vector of asset names of the already-existing views positions <- match(sNames, assetNames) if(any(is.na(positions))) stop("Some asset names in the new views matrix did not have matches to the assetnames in the first object") P <- matrix(0, ncol = length(assetNames), nrow = nrow(pickMatrix), dimnames = list(NULL, assetNames)) P[, positions ] <- pickMatrix # create a new set of views q <- c(views@qv, q) names(q) <- NULL views <- new("BLViews", "P" = rbind(views@P, P), "qv" = q, "confidences" = c(views@confidences, confidences), "assets" = assetNames) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/BLbuildViews.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # addCOPViews # Author: Francisco ############################################################################### # DESCRIPTION: Adds views specified in a pick matrix, set of confidences and view distributions to a pre-existing # view object # KEYWORDS: datagen, manip ############################################################################### addCOPViews <- function ( pickMatrix, # pick matrix viewDist, # list of distribution of views confidences, # vector of confidences in views views # pre-existing object to add views to ) { sNames <- colnames(pickMatrix) if(is.null(sNames)) stop("Missing asset names in pick matrix, cannot input views") assets <- assetSet(views) positions <- match(sNames, assets) if(any(is.na(positions))) stop("Some asset names in the pick matrix did not have matches to list of provided assets") # construct a new pick matrix to incorporate old and new views P <- matrix(0, ncol = length(assets), nrow = nrow(pickMatrix), dimnames = list(NULL, assets)) # insert the new views P[, positions ] <- pickMatrix new("COPViews", "pick" = rbind(views@pick, P), viewDist = c(views@viewDist, viewDist), "confidences" = c(views@confidences, confidences), "assets" = assets) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/COPBuildViews.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # COPPosterior # Author: Francisco ############################################################################### # DESCRIPTION: Calculates the posterior distribution of "the market" given a set of views. The posterior is returned # in the form of a set of simulations. # KEYWORDS: math ############################################################################### COPPosterior <- function ( marketDist, # mvdistribution object that defines the distribution of "the market" views, # COPViews object numSimulations = BLCOPOptions("numSimulations") # number of samples to use for each monte-carlo simulation ) { # generate simulations of the market distribution and order them. marketSimulations <- t(sampleFrom(marketDist, numSimulations)) # now simulate from each subjective view subjSimulations <- sapply(views@viewDist, sampleFrom, n = numSimulations ) numViews <- length(views@viewDist) # compute the orthogonal complement of the pick matrix nullPick <- t(Null(t(views@pick))) pick <- views@pick # calculate the product of "the market" with the pick matrix impliedViews <- pick %*% marketSimulations # calculate the orthongal complement of the above productg complement <- nullPick %*% marketSimulations # Now generate samples from blended views and "implied market views". .innerChoiceSample <- function(conf) { sample(0:1, prob = c(1-conf, conf), numSimulations, replace = TRUE) } choices <- t(sapply(views@confidences, .innerChoiceSample)) combinedSimulations <- matrix(0, nrow = numViews, ncol = numSimulations) combinedSimulations[choices == 0] <- impliedViews[choices == 0] combinedSimulations[choices == 1] <- t(subjSimulations)[choices==1] # combinedSimulations <- (1-views@confidences) * impliedViews + views@confidences * t(subjSimulations) impliedCopula <- array(dim = dim(impliedViews)) pooledSimulations <- array(dim = dim(combinedSimulations)) # compute the copula of the implied views for(i in 1:nrow(impliedViews)) { cdf <- .empCDF(impliedViews[i,]) impliedCopula[i,] <- cdf(impliedViews[i,]) quant <- .empQuantile(combinedSimulations[i,]) pooledSimulations[i,] <- quant(impliedCopula[i,]) } # rotate back to "market coordinates" rotMatrix <- solve(rbind(pick, nullPick)) result <- t(rotMatrix %*% rbind(pooledSimulations, complement)) colnames(result) <- assetSet(views) new("COPResult", views = views, marketDist = marketDist, posteriorSims = result) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/COPPosterior.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # createCOPViews # Author: Francisco ############################################################################### # DESCRIPTION: Creates _part_ of a set of COP views with a GUI interface. Currently the only interface that can be used is the # data editor, and no method exists for specifying the views graphically. Only the pick matrices and confidences can be set with this. # KEYWORDS: datagen ############################################################################### createCOPViews <- function( allAssets, # Asset universe numAssetViews = 1, # # Number of views to form assetSubset = NULL, # Subset of asset universe to form views on mode = c("editor", "Window") # Currently unused. ) { # stopifnot((is.null(stockSet) && numFactorViews == 0) || ( !is.null(stockSet) && numFactorViews > 0)) mode <- match.arg(mode) .createCOPViews.Editor(allAssets, numAssetViews, assetSubset) } .createCOPViews.Editor <- function ( allAssets, numViews = 1, assetSubset = NULL ) { DEFAULTCONFIDENCE <- 1/10000 #extract if( is.null(assetSubset) ) assetSubset <- allAssets if(length(assetSubset) < numViews) stop("The number of views to be formed should be greater than or equal to the number of assets") viewsMatInit <- matrix(0, nrow = numViews, ncol = length(assetSubset) + 1, dimnames = list(NULL, c( assetSubset, "confidence"))) viewsMat <- edit(viewsMatInit) # the pick matrix excludes the last column, which has the confidences P <- viewsMat[, -ncol(viewsMat), drop = FALSE] conf <- viewsMat[, "confidence"] viewDist <- lapply(rep("norm", numViews), distribution, mean = 0, sd = DEFAULTCONFIDENCE) views <- try(COPViews(P, viewDist, conf,assetNames = allAssets)) if(inherits(views, "try-error")) stop("Incorrectly entered views, unable to initialize object") views }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/COPUI.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # CAPMList # Author: Francisco ############################################################################### # DESCRIPTION: Computes the CAPM alphas of a set of returns with a market index and a give risk-free rate # KEYWORDS: math ############################################################################### CAPMList <- function ( returns, # matrix of returns marketIndex, # vector or time series of market index riskFree = NULL, # risk-free rate of return regFunc = BLCOPOptions("regFunc"), # function to use to perform regression coeffExtractFunc = NULL, # function to extract intercept and betas of regression ... # additional parameters to the regression function ) { CAPMfits <- vector(mode = "list", length = ncol(returns)) regFunc <- match.fun(regFunc) # if risk-free rate is missing, replace it with 0 if(is.null(riskFree)) riskFree <- rep(0, length(marketIndex) ) for(i in 1:ncol(returns)) CAPMfits[[i]] <- regFunc(returns[,i] - riskFree ~ I(marketIndex - riskFree), ...) coeffs <- lapply(CAPMfits, coef) # Note: this is not a generic function as it does not work for all results # from linear regression functions if(is.null(coeffExtractFunc)) { coeffExtractFunc <- function(fit) { c(fit["(Intercept)"], fit["I(marketIndex - riskFree)"]) } } results <- try(sapply(coeffs, coeffExtractFunc)) if(inherits(results, "try-error")) stop("Unable to extract coefficients from regression results") else { data.frame("alphas" = results["(Intercept)",], "betas" = results["I(marketIndex - riskFree)",], row.names = colnames(returns) ) } }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/calcalphas.r
###################### # validity functions ###################### .BLViews.valid <- function(object) { numViews <- c(nrow(object@P), length(object@qv), length(object@confidences)) if(length(unique(numViews)) != 1) return("Inconsistent number of views implied\n") if(any(object@confidences < 0)) return("Negative confidences") if(!setequal(colnames(object@P), object@assets)) return("asset names not consistent with P's column names") return(TRUE) } .BLResult.valid <- function(object) { if(length(object@posteriorMean) != length(object@priorMean)) { return(FALSE) } if(!all(dim(object@priorCovar) == dim(object@posteriorMean))) return(FALSE) TRUE } .COPViews.valid <- function(object) { # All of these quantities should be equal numViews <- c(nrow(object@pick), length(object@viewDist), length(object@confidences)) #check if any don't match if(length(unique(numViews)) != 1) return("Inconsistent number of views implied!\n ") if(any(object@confidences < 0 | object@confidences > 1)) return("Confidences must lie between 0 and 1 \n") if(!setequal(colnames(object@pick), object@assets)) return("asset names not consistent with pick matrix's column names \n") return(TRUE) } .COPResult.valid <- function(object) { if(length(object@views@assets) != ncol(object@posteriorSims) ) return(FALSE) return(TRUE) } setClass("BLViews", representation(P = "matrix", qv = "numeric", confidences = "numeric", assets = "character"), validity = .BLViews.valid) setClass("BLResult", representation(views = "BLViews", tau = "numeric", priorMean = "numeric", priorCovar = "matrix", posteriorMean = "numeric", posteriorCovar = "matrix", kappa = "numeric"), validity = .BLResult.valid) setClass("distribution", representation(RName = "character", parameters = "numeric")) setClass("mvdistribution", representation(RName = "character", parameters = "list")) setClass("COPViews", representation(pick = "matrix", viewDist = "list",confidences = "numeric", assets = "character" ), validity = .COPViews.valid ) setClass("COPResult", representation(views = "COPViews", marketDist = "mvdistribution", posteriorSims = "matrix"), validity = .COPResult.valid)
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/classesmethods.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # COPViews # Author: Francisco ############################################################################### # DESCRIPTION: Constructor function for the COPViews object # KEYWORDS: utilities ############################################################################### COPViews <- function ( pickMatrix, # View matrix viewDist, # list of marginal distributions of views confidences, # vector confidences in views assetNames # names of assets in one's "universe" ) { if(is.null(colnames(pickMatrix))) colnames(pickMatrix) <- assetNames new("COPViews", pick = pickMatrix, viewDist = viewDist, confidences = confidences, assets = assetNames) } ############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # distribution # Author: Francisco ############################################################################### # DESCRIPTION: Constructor function for a distribution class object # KEYWORDS: utilities ############################################################################### distribution <- function ( RName, # string holding the "R name" / suffix for an R distribution. e.g. for a normal distribution this would be "norm" ... # additional parameters for defining a distribution ) { # construct the names of the associated distribution functions distFunctions <- paste(c("r" ,"d", "p", "q"), RName, sep = "") # at least the sampling function must exist for COP to be used if(!exists(distFunctions[1])) stop("Sampling function for this distribution does not exist!") samplingFun <- try(match.fun(distFunctions[1])) if("try-error" %in% class(samplingFun)) stop(paste(samplingFun, "seems not to be a function!" )) if(!all(vapply(distFunctions[-1], exists, logical(1)))) warning("Some functions associated to this distribution could not be found") # extract the parameters passed in through the ellipsis as a named vector. Then check that the sampling function actually accepts # all of these parameters distParams <- as.numeric(list(...)) names(distParams) <- names(list(...)) expectedDistParams <- names(formals(distFunctions[1])) stopifnot(all(names(distParams) %in% expectedDistParams)) new("distribution", RName = RName, parameters = distParams) } ############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # mvdistribution # Author: Francisco ############################################################################### # DESCRIPTION: Constructor function for the multivariate distribution class object # KEYWORDS: utilities ############################################################################### mvdistribution <- function ( RName, #string holding the "R name" / suffix for an R distribution. e.g. for a normal distribution this would be "norm" ... # additional parameters for defining a distribution ) { distFunctions <- paste(c("r" ,"d", "p", "q"), RName, sep = "") if(!exists(distFunctions[1])) stop("Sampling function for this distribution does not exist!") if(!all(vapply(distFunctions[-1], exists, logical(1)))) warning("Some functions associated to this distribution could not be found") samplingFun <- try(match.fun(distFunctions[1])) if("try-error" %in% class(samplingFun)) stop(paste(samplingFun, "seems not to be a function!" )) distParams <- list(...) names(distParams) <- names(distParams) expectedDistParams <- names(formals(distFunctions[1])) stopifnot(all(names(distParams) %in% expectedDistParams)) new("mvdistribution", RName = RName, parameters = distParams) } ############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # BLViews # Author: Francisco ############################################################################### # DESCRIPTION: Constructor function for a BLViews object. # KEYWORDS: datagen ############################################################################### BLViews <- function ( P, # Pick matrix q, # vector of "q" values in the Black-Litterman model confidences, # vector of confidences in views assetNames # names of assets ) { if(is.null(colnames(P))) dimnames(P) <- list(rownames(P), assetNames) names(q) <- NULL new("BLViews", P = P, qv = q, confidences = confidences, assets = assetNames) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/constructors.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # deleteViews # Author: Francisco ############################################################################### # DESCRIPTION: Generic function that deletes a vector of views (which correspond to rows of the pick matrix) from a view object. returns # the view object with the views deleted # KEYWORDS: manip, utilities ############################################################################### deleteViews <- function(views, viewsToDel) { invisible(NULL) } setGeneric("deleteViews") deleteViews.BLViews <- function(views, viewsToDel) { if(any(viewsToDel > nrow(views@P))) { warning("Attempting to delete some non-existent views, ignoring") viewsToDel <- viewsToDel[viewsToDel < nrow(views@P)] } views@P <- views@P[-viewsToDel,,drop=FALSE] views@qv <- views@qv[-viewsToDel] views@confidences <- views@confidences[-viewsToDel] views } setMethod("deleteViews", signature(views = "BLViews"), deleteViews.BLViews) deleteViews.COPViews <- function(views, viewsToDel) { if(any(viewsToDel > nrow(views@pick))) { warning("Attempting to delete some non-existent views, ignoring") viewsToDel <- viewsToDel[viewsToDel < nrow(views@pick)] } views@pick <- views@pick[-viewsToDel,,drop=FALSE] #viewDist = "list" views@viewDist <- views@viewDist[-viewsToDel] views@confidences <- views@confidences[-viewsToDel] views } setMethod("deleteViews", signature(views = "COPViews"), deleteViews.COPViews)
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/deleteViews.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # densityPlots # Author: Francisco ############################################################################### # DESCRIPTION: Generic function that displays density plots of the prior and posterior distributions # KEYWORDS: hplot ############################################################################### densityPlots <- function(result, assetsSel = NULL, numSimulations = BLCOPOptions("numSimulations"), ...) { stop("Not implemented for this class") } setGeneric("densityPlots") densityPlots.COPViews <- function(result, assetsSel = seq(along = result@views@assets) , numSimulations = BLCOPOptions("numSimulations") ,...) { marketSims <- sampleFrom(result@marketDist, numSimulations)[,drop=FALSE] colnames(marketSims) <- assetSet(result@views) for(i in seq(along = assetsSel)) { sims <- tail(result@posteriorSims[, assetsSel[i]], numSimulations) plot(density(sims), col = "blue", xlab = result@views@assets[assetsSel[i]], main = "Kernel density estimates of posterior and prior", ...) lines(density(marketSims[,assetsSel[i]]), col = "black", ...) abline(v = mean(sims, lty = 2, col = "blue")) abline(v = mean(marketSims[,i]), lty = 2, col = "black") } legend(x = "topright", legend = c("Posterior", "Prior"), lty = c(1,1), col = c("blue", "black")) } setMethod("densityPlots", signature(result = "COPResult"), densityPlots.COPViews) densityPlots.BLResult <- function(result, assetsSel = seq(along = assetSet(result@views)) , numSimulations = BLCOPOptions("numSimulations"), ...) { for(i in seq(along = assetsSel)) { postMean <- result@posteriorMean[assetsSel[i]] priorMean <- result@priorMean[assetsSel[i]] postStDev <- sqrt(result@posteriorCovar[assetsSel[i],assetsSel[i]] ) priorStDev <- sqrt(result@priorCovar[assetsSel[i],assetsSel[i]]) plotDispersion <- max(postStDev, priorStDev) x <- seq(from = min(priorMean,postMean) - 2.5 * abs(plotDispersion), to = max(priorMean,postMean) + 2.5 * abs(plotDispersion), length = 200) xLabel <- if(is.character(assetsSel)) assetsSel[i] else result@views@assets[i] if(dnorm(postMean, mean = postMean, sd = postStDev) < dnorm(priorMean, mean = priorMean, sd = priorStDev)) { plot(x, dnorm(x, mean = priorMean, sd = priorStDev), col = "black", type = "l",..., ylab = "Density", xlab = xLabel) abline(v = priorMean, lty = 2, col = "black") lines(x, dnorm(x, mean = postMean, sd = postStDev), col = "blue", type = "l",...) abline(v = postMean, lty = 2, col = "blue") legend(x = "topright", legend = c("Prior", "Posterior"), lty = c(1,1), col = c("black", "blue")) } else { plot(x, dnorm(x, mean = postMean, sd = postStDev), col = "blue", type = "l",..., ylab = "Density", xlab = xLabel) abline(v = postMean, lty = 2, col = "blue") lines(x, dnorm(x, mean = priorMean, sd = priorStDev), col = "black", type = "l",...) abline(v = priorMean, lty = 2, col = "black") legend(x = "topright", legend = c("Prior", "Posterior"), lty = c(1,1), col = c("black", "blue")) } } } setMethod("densityPlots", signature(result = "BLResult"), densityPlots.BLResult) biDensityPlots <- function(result, assetsSel , numSimulations = BLCOPOptions("numSimulations"), nBins, ...) { .assertClass(result, "COPResult") stopifnot(length(assetsSel) == 2) marketSims <- sampleFrom(result@marketDist, numSimulations) colnames(marketSims) <- assetSet(result@views) marketSims <- marketSims[,assetsSel,drop=FALSE] sims <- tail(result@posteriorSims, numSimulations)[,assetsSel] hexBin <- hexBinning(sims, bins = nBins) par(mfrow = c(1,2)) plot(hexBin, xlab = assetsSel[1], ylab = assetsSel[2], main = "Posterior", col = rev(greyPalette(nBins))) hexBin <- hexBinning(marketSims, bins = nBins) plot(hexBin, xlab = assetsSel[1], ylab = assetsSel[2], main = "Prior", col = seqPalette(nBins)) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/densityPlots.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # extractreplace.R # Author: Francisco ############################################################################### # DESCRIPTION: A set of utility functions for extracting and replacing data in COPViews and BLViews objects # KEYWORDS: hplot ############################################################################### viewMatrix <- function(views, dropZeroColumns = TRUE) { .assertClass(views, "BLViews") P <- views@P if(dropZeroColumns) { isZeroColumn <- apply(P==0, 2, all) P <- P[,!isZeroColumn, drop = FALSE] } cbind(P, "q" = views@qv) } "PMatrix<-" <- function(views, value) { stopifnot(nrow(views@P) >= nrow(value)) # stopifnot() views } PMatrix <- function(views) { .assertClass(views, c("BLViews", "COPViews")) # TODO: rename "P" to "pick" in BLViews if(class(views) %in% "BLViews") views@P else views@pick } assetSet <- function(views) { views@assets } "qv<-" <- function(views, value) { .assertClass(views, "BLViews") if(length(value) != length(views@qv)) { warning("Vector qv is of incorrect length, will not replace") return(views) } views@qv <- value views } "confidences<-" <- function(views, value) { .assertClass(views, c("BLViews", "COPViews")) if(length(value) != length(views@confidences)) { warning("value is of incorrect length, will not replace") return(views) } views@confidences <- value views } confidences <- function(views) { .assertClass(views, c("BLViews", "COPViews")) views@confidences } # TODO: unit test posteriorMeanCov <- function(posterior) { .assertClass(posterior, "BLResult") list("covariance" = posterior@posteriorCovar, "mean" = posterior@posteriorMean) } ## Extracts the matrix of posterior simulations from a COPPosterior object. ## Return A matrix with named columns. posteriorSimulations <- function(posterior) { .assertClass(posterior, "COPResult") posterior@posteriorSims } numSimulations <- function(posterior) { .assertClass(posterior, "COPResult") nrow(posterior@posteriorSims) } priorViews <- function(posterior) { .assertClass(posterior, "COPResult") posterior@views }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/extractreplace.r
################ #Methods ################ show.BLViews <- function(object) { .innerViewString <- function(p) { # p should be a row in the "P" matrix # only nonzero elements really consitute "views" qv <- tail(p,1) x <- head(p,-1)[head(p,-1) != 0] tmp <- paste(as.character(x), names(x), sep = "*", collapse = "+") tmp <- paste(tmp, qv, sep = "=") } viewStrings <- apply(viewMatrix(object),1,.innerViewString) for(i in seq(along = viewStrings)) cat(i, ":", viewStrings[i], " + eps. Confidence:", object@confidences[i], " \n") } show.BLResult <- function(object) { cat("Prior means:\n") show(object@priorMean) cat("Posterior means:\n") show(object@posteriorMean) cat("Posterior covariance:\n") show(object@posteriorCovar) } show.COPResult <- function(object) { cat(paste("Asset set: ", paste(assetSet(priorViews(object)), collapse = ","), "\n")) cat("Views used to generate this posterior: \n") show(priorViews(object)) cat("Number of simulations:", numSimulations(object), "\n" ) } show.COPViews <- function(object) { for(i in 1:nrow(object@pick)) { x <- object@pick[i, object@pick[i,] != 0] tmp <- paste("(", paste(names(object@viewDist[[i]]@parameters), object@viewDist[[i]]@parameters, collapse = ",", sep = "="), ")", sep="") distString <- paste(object@viewDist[[i]]@RName, tmp, sep = ":") tmp <- paste(as.character(x), names(x), sep = "*", collapse = "+") print(paste(tmp, distString, sep = "~")) } } setMethod("show", signature(object = "BLViews"), show.BLViews) setMethod("show", signature(object = "BLResult"), show.BLResult) setMethod("show", signature(object = "COPViews"), show.COPViews) setMethod("show", signature(object = "COPResult"), show.COPResult)
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/methods.R
.optimalWeights.simpleMV <- function(mu, sigma, constraints=NULL, tol = 1e-6) { if(is.null(constraints)) { numAssets <- length(mu) Amat <- rbind(rep(1, numAssets), diag(length(mu))) constraints <- list("Amat" = t(Amat), "bvec" = NULL, "meq" = 1) constraints$bvec <- c(1, rep(0, length(mu))) } stopifnot(class(constraints) == "list") stopifnot(all(c("Amat", "bvec", "meq") %in% names(constraints))) wts <- solve.QP(sigma, mu, constraints$Amat, constraints$bvec, constraints$meq) # else # wts <- solve.QP(sigma, mu, constraints$Amat, meq = constraints$meq) wts$solution[abs(wts$solution) < tol] <- 0 names(wts$solution) <- names(mu) wts$solution } ## A utility function that calculates "optimal" portfolios with respect to a prior and (Black-Litterman) posterior distribution, ## and then returns the weights and optionally plots them with barplots. The optimizer is provided by the user, but there is a "toy" ## Markowitz optimizer that is used by default. optimalPortfolios <- function ( result, optimizer = .optimalWeights.simpleMV, ..., doPlot = TRUE, beside = TRUE ) { BARWIDTH <- 1 .assertClass(result, "BLResult") optimizer <- match.fun(optimizer) # calculate the optimal prior and posterior weigths priorPortfolioWeights <- optimizer(result@priorMean, result@priorCovar, ...) postPortfolioWeights <- optimizer(result@posteriorMean, result@posteriorCovar, ...) if(doPlot) { if(beside) { plotData <- .removeZeroColumns(rbind(prior = priorPortfolioWeights, posterior = postPortfolioWeights)) barplot(plotData, beside = TRUE,col = c("lightblue", "cyan"), border = "blue", legend.text = c("Prior", "Posterior"), horiz = FALSE, ylab = "Weights", main = "Optimal weights") } else { plotData <- postPortfolioWeights - priorPortfolioWeights plotData <- plotData[plotData != 0] barplot(plotData, col = c("lightblue"), ylab = "Difference", border = "blue", main = "Differences in weights", horiz = FALSE) } } return(list(priorPfolioWeights = priorPortfolioWeights, postPfolioWeights = postPortfolioWeights )) } ## A utility function that calculates "optimal" portfolios with respect to a prior and (Black-Litterman) ## posterior distribution using the functionality of the Rmetrics fPortfolio package, and then returns the weights optimalPortfolios.fPort <- function(result, spec = NULL, constraints = "LongOnly", optimizer = "minriskPortfolio", inputData = NULL, numSimulations = BLCOPOptions("numSimulations")) { stop("Not implemented for this class of result") } setGeneric("optimalPortfolios.fPort") optimalPortfolios.fPort.BL <- function(result, spec = NULL ,constraints = "LongOnly", optimizer = "minriskPortfolio", inputData = NULL, numSimulations = BLCOPOptions("numSimulations")) { # if(!require("fPortfolio")) stop("The package fPortfolio is required to execute this function, but you do not have it installed.") assets <- assetSet(result@views) # create a "dummy" series that will only be used because the "optimizer" function requires it (but the mean and # covariance will not be calculated using it) dmySeries <- as.timeSeries(matrix(0, nrow = 1, ncol = length(assets), dimnames = list(NULL, assets))) numAssets <- length(assets) if(is.null(spec)) { spec <- portfolioSpec() # # setType(spec) <- "MV" # setWeights(spec) <- rep(1 / numAssets, times = numAssets) #setSolver(spec) <- "solveRquadprog" } # calculate prior and posterior mean and covariance. These are then stored in the scope of the # package environment and then accessed via a wrapper function when called by the optimiser. # This replaces the original code which stored value in the global environment and was therefore # in breach of the CRAN policies. .BLEnv$prior <- list(mu = result@priorMean, Sigma = result@priorCovar) # posterior mean and covariance estimates come from the BL calculations postMeanCov <- posteriorMeanCov(result) .BLEnv$post <- list(mu = postMeanCov$mean, Sigma = postMeanCov$covariance) priorSpec <- spec posteriorSpec <- spec setEstimator(priorSpec) <- "getPriorEstim" setEstimator(posteriorSpec) <- "getPosteriorEstim" optimizer <- match.fun(optimizer) # calculate optimal portfolios priorOptimPortfolio <- optimizer(dmySeries, priorSpec, constraints) posteriorOptimPortfolio <- optimizer(dmySeries, posteriorSpec, constraints) .BLEnv$prior <- NULL .BLEnv$post <- NULL x <- list(priorOptimPortfolio = priorOptimPortfolio, posteriorOptimPortfolio = posteriorOptimPortfolio) class(x) <- "BLOptimPortfolios" x } ## A wrapper function which returns estimates of the prior mean and covariance calculated ## stored in the package environment. It is not intended to be directly run by the user but ## needs to be exported in order to be run by the third party optimizer. getPriorEstim <- function(x, spec=NULL, ...) { return(.BLEnv$prior) } ## A wrapper function which returns estimates of the posterior mean and covariance calculated ## stored in the package environment. It is not intended to be directly run by the user but ## needs to be exported in order to be run by the third party optimizer. getPosteriorEstim <- function(x, spec=NULL, ...) { return(.BLEnv$post) } setMethod("optimalPortfolios.fPort", signature(result = "BLResult"), optimalPortfolios.fPort.BL ) # plot methods, not yet exposed plot.BLOptimPortfolios <- function(x,...) { plotData <- getWeights(x[[2]]@portfolio) - getWeights(x[[1]]@portfolio) plotData <- plotData[plotData != 0] barplot(plotData, col = c("lightblue"), ylab = "Difference", border = "blue", main = "Differences in weights", horiz = FALSE) } ## A utility function that calculates "optimal" portfolios with respect to a prior and (COP) ## posterior distribution using the functionality of the Rmetrics fPortfolio package, and then returns the weights optimalPortfolios.fPort.COP <- function(result, spec = NULL, constraints = "LongOnly", optimizer = "minriskPortfolio", inputData = NULL, numSimulations = BLCOPOptions("numSimulations")) { # if(!require("fPortfolio")) stop("The package fPortfolio is required to execute this function, but you do not have it installed.") if(is.null(inputData)) { # no input time series provided, so simulate the asset returns inputData <- sampleFrom(result@marketDist, n = numSimulations) colnames(inputData) <- assetSet(result@views) inputData <- as.timeSeries(inputData) } numAssets <- length(assetSet(result@views)) # missing portfolio spec, create mean-CVaR portfolio if(is.null(spec)) { spec <- portfolioSpec() setType(spec) <- "CVAR" setWeights(spec) <- rep(1 / numAssets, times = numAssets) setSolver(spec) <- "solveRglpk.CVAR" } # the "output" series data for the CVaR optimization will be taken from the simulations outData <- tail(posteriorSimulations(result), numSimulations) colnames(outData) <- assetSet(result@views) # kill row names to prevent errors when coercing to timeSeries object rownames(outData) <- NULL outData <- as.timeSeries(outData) optimizer <- match.fun(optimizer) # calculate prior and posterior optimal portfolios priorOptimPortfolio <- optimizer(inputData,spec, constraints) posteriorOptimPortfolio <- optimizer(outData, spec, constraints) x <- list(priorOptimPortfolio = priorOptimPortfolio, posteriorOptimPortfolio = posteriorOptimPortfolio) class(x) <- "COPOptimPortfolios" x } plot.COPOptimPortfolios <- function(x,...) { plotData <- getWeights(x[[2]]@portfolio) - getWeights(x[[1]]@portfolio) plotData <- plotData[plotData != 0] barplot(plotData, col = c("lightblue"), ylab = "Difference", border = "blue", main = "Differences in weights", horiz = FALSE) } setMethod("optimalPortfolios.fPort", signature(result = "COPResult"), optimalPortfolios.fPort.COP )
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/portfolioopt.R
probDistance <- function(result, numSimulations = BLCOPOptions("numSimulations")) { monteCarloSample <- rmnorm(numSimulations, result@priorMean, result@priorCovar) mean(abs(dmnorm(monteCarloSample, result@priorMean, result@priorCovar,log=TRUE) - dmnorm(monteCarloSample, result@posteriorMean, result@posteriorCovar, log = TRUE))) } setGeneric("probDistance") probDistance.COPResult <- function(result, numSimulations = BLCOPOptions("numSimulations") ) { show("Not implemented yet...") } setMethod("probDistance", signature(result = "COPResult"), probDistance.COPResult)
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/probDistance.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 2008 # sampleFrom # Author: Francisco ############################################################################### # DESCRIPTION: Function to sample from a distribution or mvdistribution class object # KEYWORDS: utilities ############################################################################### sampleFrom <- function ( dstn, # distribution or mvdistribution class object n = 1 # number of samples to generate ) { .assertClass(dstn, c("distribution", "mvdistribution")) sampleFun <- match.fun(paste("r", dstn@RName, sep = "")) do.call(sampleFun, c(n, as.list(dstn@parameters))) } # empirical CDF utility function .empCDF <- function(x, ordered = FALSE) { if(!ordered) x <- sort(x, decreasing = FALSE) probs <- seq(from = 0, to = 1, along = x) approxfun(x, probs) } # empirical quantile utility function .empQuantile <- function(x, ordered = FALSE) { if(!ordered) x <- sort(x, decreasing = FALSE) probs <- seq(from = 0, to = 1, along = x) approxfun(probs, x) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/simulation.r
runBLCOPTests <- function(testPath = BLCOPOptions("unitTestPath"), protocolFile = "BLCOPTests.html", writeProtocol = FALSE) { BLTestSuite <- defineTestSuite(name = "Black-Litterman / COP unit tests", dirs = testPath) testResults <- runTestSuite(BLTestSuite) if(writeProtocol) printHTMLProtocol(testResults, fileName = protocolFile) testResults }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/unittests.r
############################################################################### # Mango Solutions, Chippenham SN14 0SQ 11/08/2008 09:16:51 # newPMatrix # Author: Francisco ############################################################################### # DESCRIPTION: Creates a new pick matrix with appropriate column names ############################################################################### newPMatrix <- function ( assetNames, # set of assets referred to by the matrix numViews, # number of views defaultValue = 0 # default value to use ) { stopifnot(length(assetNames) > 0 && numViews > 0) matrix(defaultValue, ncol = length(assetNames), nrow = numViews, dimnames = list(NULL, assetNames)) } .padMatrix <- function(x, targetRows, fillVal = 0) { if(nrow(x) < targetRows) { fillRows <- matrix(fillVal, ncol = ncol(x), nrow = targetRows - nrow(x) ) return(rbind(x, fillRows)) } warning("x has enough rows") x } .padVector <- function(x, targetLength, fillVal = 0) { if(length(x) < targetLength) { return(c(x, rep(fillVal, targetLength - length(x)))) } return(x) } .blockDiag <- function(A,B) { stopifnot(is(A, "matrix") && is(B, "matrix")) x <- ncol(A) + ncol(B) y <- nrow(A) + nrow(B) z <- matrix(0, ncol = x, nrow = y) z[1:nrow(A), 1:ncol(A)] <- A yOffset <- nrow(A) xOffset <- ncol(A) z[1:ncol(B)+xOffset, 1:nrow(B)+yOffset] <- B z } .assertClass <- function(object, classNames) { if(! any(classNames %in% class(object)) ) stop(paste("None of the classes:", classNames, ",were inherited by object")) } .removeZeroColumns <- function(mat) { isZeroColumn <- apply(mat == 0, 2, all) mat[,!isZeroColumn, drop = FALSE] } .correlationMatrix <- function(upperTriangle, dim) { sigma <- matrix(0, nrow = dim, ncol = dim) sigma[upper.tri(sigma)] <- upperTriangle diag(sigma) <- 1 sigma <- t(sigma) sigma[upper.tri(sigma)] <- upperTriangle sigma } .varcovMatrix <- function(stdDeviations, correlations, dim) { x <- .correlationMatrix(correlations, dim) x <- x * stdDeviations x <- t(t(x) * stdDeviations) x } .symmetricMatrix <- function(upperTriangle, dim) { result <- matrix(NA, nrow = dim, ncol = dim) result[upper.tri(result, diag = TRUE)] <- upperTriangle result <- t(result) result[upper.tri(result, diag = TRUE)] <- upperTriangle result }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/utils.R
.BLEnv <- new.env() .onLoad <- function(libname, pkgname) { .BLEnv$settings <- list(gWidgetsToolkit = "tcltk", regFunc = "lm", numSimulations = 50000, unitTestPath = system.file("RUnit", package = "BLCOP")) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/R/zzz.r
test.BLPosterior <- function() { #entries <- c(0.001005,0.001328,-0.000579,-0.000675,0.000121,0.000128,-0.000445,-0.000437 , # 0.001328,0.007277,-0.001307,-0.000610,-0.002237,-0.000989,0.001442,-0.001535 , # -0.000579,-0.001307,0.059852,0.027588,0.063497,0.023036,0.032967,0.048039 , #-0.000675,-0.000610,0.027588,0.029609,0.026572,0.021465,0.020697,0.029854 , # 0.000121,-0.002237,0.063497,0.026572,0.102488,0.042744,0.039943,0.065994 , # 0.000128,-0.000989,0.023036,0.021465,0.042744,0.032056,0.019881,0.032235 , #-0.000445,0.001442,0.032967,0.020697,0.039943,0.019881,0.028355,0.035064 , #-0.000437,-0.001535,0.048039,0.029854,0.065994,0.032235,0.035064,0.079958 ) # #myVarcov2 <- matrix(entries, ncol = 8, nrow = 8) #mu <- c(0.08, 0.67,6.41, 4.08, 7.43, 3.70, 4.80, 6.60) / 100 #pick <- matrix(0, ncol = 8, nrow = 3, dimnames = list(NULL, letters[1:8])) #pick[1,7] <- 1 #pick[2,1] <- -1; pick[2,2] <- 1 #pick[3, 3:6] <- c(0.9, -0.9, .1, -.1) #confidences <- 1 / c(0.00709, 0.000141, 0.000866) #myViews <- BLViews(pick, c(0.0525, 0.0025, 0.02), confidences, letters[1:8]) #myPosterior <- posteriorEst(myViews, tau = 0.025, mu = mu, myVarcov2 ) BLEx <- get(load( file.path(BLCOPOptions("unitTestPath"), "BLExample.RData") )) myPosterior <- BLEx$posterior checkEquals(myPosterior@posteriorMean, c(0.0006845523,0.0049366147,0.0624770045,0.0412809077,0.0728581285, 0.0375468518, 0.0469830868, 0.0655370385 ), checkNames = FALSE ) } test.posteriorFeasibility <- function() { # at the moment this test requires corpcor, due to the expected availability of the shrinkage estimator. # TODO: remove this dependency if(!require("corpcor", quiet = TRUE)) { warning("Could not load the corpcor package, these tests will not be run") return() } pick <- matrix(0, 2, 6) pick[1,1:3] <- 1/3 pick[2,4:6] <- 1/3 views <- BLViews(pick, q = c(0.2, -0.1), confidences = c(100,100), colnames(monthlyReturns)) post <- BLPosterior(monthlyReturns, views, 1, sp500Returns, US13wTB, 0.9, covEstimator = "cov.shrink") feasibility <- posteriorFeasibility(post) checkEquals(2.261890, feasibility$mahalDist, tolerance = 1e-03) checkEquals(0.8941064, feasibility$mahalDistProb, tolerance = 1e-06) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/RUnit/runit.BLPosterior.R
test.CAPMList <- function() { x <- CAPMList(monthlyReturns, marketIndex = sp500Returns, riskFree = US13wTB) .unitTestPath <- BLCOPOptions("unitTestPath") expected <- read.csv(file.path(.unitTestPath, "CAPMRes_lm.csv"), row.names = 1 ) checkEquals( x, expected) x <- CAPMList(monthlyReturns, marketIndex = sp500Returns, riskFree = US13wTB, regFunc = "rlm") expected <- read.csv(file.path(.unitTestPath, "CAPMRes_rlm.csv"), row.names = 1) checkEquals( x, expected) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/RUnit/runit.CAPM.R
test.COPPosterior <- function() { if(!require("mnormt", quiet = TRUE)) { warning("This test relies on the mnormt package which is not available \n") return() } NUMTESTSIMULATIONS <- 1000 .unitTestPath <- BLCOPOptions("unitTestPath") testEnvironment <- new.env() dispersion <- c(.376,.253,.360,.333,.360,.600,.397,.396,.578,.775) / 1000 sigma <- BLCOP:::.symmetricMatrix(dispersion, dim = 4) caps <- rep(1/4, 4) mu <- 2.5 * sigma %*% caps dim(mu) <- NULL marketDistribution <- mvdistribution("mt", mean = mu, S = sigma, df = 5 ) pick <- matrix(0, ncol = 4, nrow = 1, dimnames = list(NULL, c("SP", "FTSE", "CAC", "DAX"))) pick[1,4] <- 1 vdist <- list(distribution("unif", min = -0.02, max = 0)) views <- COPViews(pick, vdist, 0.2, c("SP", "FTSE", "CAC", "DAX")) tmp <- load(file.path(.unitTestPath, "posteriorsims.RData")) assign(tmp, value = get(tmp), envir = testEnvironment) set.seed(3) posterior <- COPPosterior(marketDistribution, views, numSimulations = NUMTESTSIMULATIONS) checkEquals(posterior@posteriorSims, testEnvironment$posteriorSims, tolerance=1.5) checkEquals(colnames(posterior@posteriorSims), assetSet(views)) rm(testEnvironment) } test.COPPosteriorExtractors <- function() { if(!require("mnormt", quiet = TRUE)) { warning("This test relies on the mnormt package which is not available \n") return() } NUMTESTSIMULATIONS <- 1000 dispersion <- c(.376,.253,.360,.333,.360,.600,.397,.396,.578,.775) / 1000 sigma <- BLCOP:::.symmetricMatrix(dispersion, dim = 4) caps <- rep(1/4, 4) mu <- 2.5 * sigma %*% caps dim(mu) <- NULL marketDistribution <- mvdistribution("mt", mean = mu, S = sigma, df = 5 ) pick <- matrix(0, ncol = 4, nrow = 1, dimnames = list(NULL, c("SP", "FTSE", "CAC", "DAX"))) pick[1,4] <- 1 vdist <- list(distribution("unif", min = -0.02, max = 0)) views <- COPViews(pick, vdist, 0.2, c("SP", "FTSE", "CAC", "DAX")) # test posterior extractor functions posterior <- COPPosterior(marketDistribution, views, numSimulations = NUMTESTSIMULATIONS) checkEquals(posteriorSimulations(posterior), posterior@posteriorSims) checkEquals(numSimulations(posterior), NUMTESTSIMULATIONS) checkEquals(priorViews(posterior), views) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/RUnit/runit.COPPosterior.r
test.addBLViews <- function() { # very simple case first stocks <- colnames(monthlyReturns) viewInput <- matrix(0, ncol = 6, nrow = 2, dimnames = list(NULL, stocks)) viewInput[1,"IBM"] <- 1 viewInput[1, "DELL"] <- 0.04 viewInput[2, "C"] <- 1 viewInput[2, "JPM"] <- 0.6 confidences <- 1 / c(0.7, 0.1) views <- new("BLViews", P = viewInput, qv = c(1,1) , confidences = confidences, assets = stocks) x <- BLViews(P = viewInput, confidences = confidences, q = c(1,1), assetNames = stocks) checkEquals(x, views) pick <- matrix(0, ncol = 2, nrow = 1, dimnames = list(NULL, c("MS", "BAC"))) pick[1, "MS"] <- 1 pick[1, "BAC"] <- 0.01 x <- addBLViews(pick, 0.15, 1/ 0.03 , views = x) views2 <- BLViews(P = matrix(0,nrow = 3, ncol = 6, dimnames=list(NULL,stocks)), q = rep(0, 3), confidences = 1 / c(0.7,0.1,0.03),stocks) views2@P[1:2,] <- viewInput views2@P[3, c("MS", "BAC")] <- c(1, 0.01) views2@qv <- c(1,1,0.15) checkEquals(x, views2) } test.addCOPViews <- function() { stocks <- colnames(monthlyReturns) viewInput <- matrix(0, ncol = 6, nrow = 2, dimnames = list(NULL, stocks)) viewInput[1,"IBM"] <- 1 viewInput[1, "DELL"] <- 0.04 viewInput[2, "C"] <- 1 viewInput[2, "JPM"] <- 0.6 confidences <- c(0.7, 0.1) viewDist <- list(distribution("unif", min = 0.04, max = 0.1), distribution("norm", sd = 10, mean = 0.05)) views1 <- COPViews(viewInput, viewDist, confidences, stocks) views2 <- new("COPViews", pick = viewInput, viewDist = viewDist, confidences = confidences, assets = stocks) checkEquals(views1, views2) pick <- matrix(0, ncol = 2, nrow = 1, dimnames = list(NULL, c("MS", "BAC"))) pick[1, "MS"] <- 1 pick[1, "BAC"] <- 0.01 if(!require("sn", quiet = TRUE)) { warning("The next tests require the sn package, which could not be loaded \n") return() } viewDist2 <- list(distribution("sn", xi = 0.05, omega = 10, alpha = 0.001)) views1 <- addCOPViews(pick, viewDist2, 0.4, views1 ) views2 <- COPViews(pickMatrix = matrix(0, ncol = 6, nrow = 3, dimnames = list(NULL, stocks)), c(viewDist, viewDist2), c(confidences, 0.4), stocks ) views2@pick[1:2,] <- viewInput views2@pick[3, c("MS", "BAC")] <- c(1, 0.01) checkEquals(views1, views2) } test.newPMatrix <- function() { x <- newPMatrix(c("DAX", "FTSE", "CAC"), 2) checkEquals(x, matrix(0, ncol = 3, nrow = 2, dimnames = list(NULL, c("DAX", "FTSE", "CAC")))) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/RUnit/runit.buildviews.R
test.deleteViews <- function() { stocks <- colnames(monthlyReturns) viewInput <- matrix(0, ncol = 6, nrow = 2, dimnames = list(NULL, c(stocks))) viewInput[1,"IBM"] <- 1 viewInput[1, "DELL"] <- -1 viewInput[2, "C"] <- 1 viewInput[2, "JPM"] <- -1 confidences <- c(70, 10) x <- BLViews(viewInput, c(-0.1, 0.1), confidences, stocks) x <- deleteViews(x,1) y <- BLViews(viewInput[-1,,drop=FALSE], c(0.1), confidences[2], stocks) checkEquals(x, y) viewInput <- matrix(0, ncol = 6, nrow = 2, dimnames = list(NULL,stocks)) viewInput[1,"IBM"] <- 1 viewInput[1, "DELL"] <- -1 viewInput[2, "C"] <- 1 viewInput[2, "JPM"] <- -1 confidences <- c(0.7, 0.1) viewDist <- list(distribution("unif", min = 0.04, max = 0.1), distribution("norm", sd = 10, mean = 0.05)) x <- COPViews(viewInput, viewDist, confidences, stocks) x <- deleteViews(x,1) y <- COPViews(viewInput[2,,drop=FALSE], viewDist[2], confidences[2], stocks) checkEquals(x,y) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/RUnit/runit.deleteviews.r
# $LastChangedDate: 2010-02-28 13:45:31 +0000 (Sun, 28 Feb 2010) $ # $Rev: 4767 $ # Author: Francisco ############################################################################### BLExample <- function() { entries <- c(0.001005,0.001328,-0.000579,-0.000675,0.000121,0.000128,-0.000445,-0.000437 , 0.001328,0.007277,-0.001307,-0.000610,-0.002237,-0.000989,0.001442,-0.001535 , -0.000579,-0.001307,0.059852,0.027588,0.063497,0.023036,0.032967,0.048039 , -0.000675,-0.000610,0.027588,0.029609,0.026572,0.021465,0.020697,0.029854 , 0.000121,-0.002237,0.063497,0.026572,0.102488,0.042744,0.039943,0.065994 , 0.000128,-0.000989,0.023036,0.021465,0.042744,0.032056,0.019881,0.032235 , -0.000445,0.001442,0.032967,0.020697,0.039943,0.019881,0.028355,0.035064 , -0.000437,-0.001535,0.048039,0.029854,0.065994,0.032235,0.035064,0.079958 ) myVarcov2 <- matrix(entries, ncol = 8, nrow = 8) mu <- c(0.08, 0.67,6.41, 4.08, 7.43, 3.70, 4.80, 6.60) / 100 pick <- matrix(0, ncol = 8, nrow = 3, dimnames = list(NULL, letters[1:8])) pick[1,7] <- 1 pick[2,1] <- -1; pick[2,2] <- 1 pick[3, 3:6] <- c(0.9, -0.9, .1, -.1) confidences <- 1 / c(0.00709, 0.000141, 0.000866) myViews <- BLViews(pick, c(0.0525, 0.0025, 0.02), confidences, letters[1:8]) myPosterior <- posteriorEst(myViews, tau = 0.025, mu = mu, myVarcov2 ) list(prior = myViews, posterior = myPosterior) } # check the basic "toy" portfolio optimizer and the portfolio optimizer that uses fPortfolio test.optimalPortfolios.BL <- function() { BLEx <- get(load( file.path(BLCOPOptions("unitTestPath"), "BLExample.RData") )) #BLEx <- get(load(BLExample.RData)) #BLEx <- BLCOP:::BLExample() myPosterior <- BLEx$posterior res <- optimalPortfolios(myPosterior, doPlot = TRUE) expected <- c(0.00000000,0.00000000,0.38204176, 0.00000000, 0.08198505,0.00000000, 0.36548138,0.17049181) checkEquals(res$priorPfolioWeights, expected, checkNames = FALSE, tolerance = 1e-06) expected <- c(0.00000000, 0.00000000, 0.32444891, 0.08071719, 0.09377903, 0.00000000 ,0.32478427 ,0.17627060) checkEquals(res$postPfolioWeights, expected, checkNames = FALSE, tolerance = 1e-06) # now try the fPortfolio optimizer #optimalPortfolios.fPort.BL <- function(result, spec,constraints = "LongOnly", optimizer = "minriskPortfolio", # inputData = NULL, numSimulations = NA) if(!require("fPortfolio")) { warning("The fPortfolio package is required to run these tests, but you don't have it installed") return() } res2 <- optimalPortfolios.fPort(myPosterior, spec = portfolioSpec(), optimizer = "tangencyPortfolio") # there should be two portfolios, each of class checkEquals(c(class(res2[[1]]), class(res2[[2]])), c("fPORTFOLIO", "fPORTFOLIO"), check.names = FALSE) # posterior weights should be similar to those in Idzorek's paper checkEquals(round(getWeights(res2$"posteriorOptimPortfolio"), 2), c(0.28, 0.17, 0.1, 0.14, 0.01, 0.01, 0.25, 0.04), check.names=FALSE) # try another optimizer res3 <- optimalPortfolios.fPort(myPosterior, spec = portfolioSpec(), optimizer = "minriskPortfolio") checkEquals(round(sapply(res3, getWeights), 2), structure(c(0.94, 0, 0, 0.04, 0, 0, 0.02, 0, 0.94, 0, 0, 0.04, 0, 0, 0.02, 0), .Dim = c(8L, 2L), .Dimnames = list(c("a", "b", "c", "d", "e", "f", "g", "h"), c("priorOptimPortfolio", "posteriorOptimPortfolio" ))) ,msg = " |minimum risk portfolios as expected") } # tests optimalPortfolios.fPort for COPResults objects test.optimalPortfolios.COP <- function() { if(!require("fPortfolio", quiet = TRUE)) { warning("This test relies on the fPortfolio package which is not available \n") return() } COPEx <- get(load( file.path(BLCOPOptions("unitTestPath"), "copexample.RData") )) # Check optimization with COP myPosterior <- COPEx$posterior res4 <- optimalPortfolios.fPort(myPosterior, spec = NULL, optimizer = "minriskPortfolio", inputData = NULL, numSimulations = 100 ) checkEqualsNumeric(round(getWeights(res4$posteriorOptimPortfolio),3), c(0.535, 0.465, 0, 0) ) # second example, using input data COPEx2 <- get(load( file.path(BLCOPOptions("unitTestPath"), "copexample2.RData") )) spec <- portfolioSpec() setType(spec) <- "CVaR" setWeights(spec) <- rep(1 / 6, times = 6) setSolver(spec) <- "solveRglpk.CVAR" setTargetReturn(spec) <- 0.005 res5 <- optimalPortfolios.fPort( COPEx2, spec = spec, inputData = as.timeSeries(monthlyReturns), numSimulations = nrow(monthlyReturns)) checkEqualsNumeric(round(getWeights(res5$priorOptimPortfolio),3), c(0.071, 0, 0.011, 0.522, 0, 0.396)) checkEqualsNumeric(round(getWeights(res5$posteriorOptimPortfolio),3), c(0.513, 0, 0, 0, 0, 0.487)) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/RUnit/runit.portfolioopt.R
# $LastChangedDate: 2010-02-13 19:41:48 +0000 (Sat, 13 Feb 2010) $ # $Rev: 4745 $ # # Unit test for "show" methods # # Author: Francisco ############################################################################### test.show.COPPosterior <- function() { x <- get(load( file.path(BLCOPOptions("unitTestPath"), "copexample.RData") )) checkEquals(capture.output(show(x$posterior)), c("Asset set: SP,FTSE,CAC,DAX ", "Views used to generate this posterior: ", "[1] \"1*DAX~unif:(min=-0.02,max=0)\"", "Number of simulations: 1000 " )) }
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/RUnit/runit.showmethods.R
### R code from vignette source 'BLCOP.Rnw' ################################################### ### code chunk number 1: BLCOP.Rnw:76-79 ################################################### require(fPortfolio) require(BLCOP) require(mnormt) ################################################### ### code chunk number 2: BLCOP.Rnw:81-85 ################################################### pickMatrix <- matrix(c(1/2, -1, 1/2, rep(0, 3)), nrow = 1, ncol = 6 ) views <- BLViews(P = pickMatrix, q = 0.06,confidences = 100, assetNames = colnames(monthlyReturns)) views ################################################### ### code chunk number 3: BLCOP.Rnw:90-92 ################################################### priorMeans <- rep(0, 6) priorVarcov <- cov.mve(monthlyReturns)$cov ################################################### ### code chunk number 4: BLCOP.Rnw:100-102 ################################################### marketPosterior <- posteriorEst(views = views, sigma = priorVarcov, mu = priorMeans, tau = 1/2) ################################################### ### code chunk number 5: BLCOP.Rnw:107-111 ################################################### finViews <- matrix(ncol = 4, nrow = 1, dimnames = list(NULL, c("C","JPM","BAC","MS"))) finViews[,1:4] <- rep(1/4,4) views <- addBLViews(finViews, 0.15, 90, views) views ################################################### ### code chunk number 6: BLCOP.Rnw:118-120 ################################################### marketPosterior <- BLPosterior(as.matrix(monthlyReturns), views, tau = 1/2, marketIndex = as.matrix(sp500Returns),riskFree = as.matrix(US13wTB)) ################################################### ### code chunk number 7: BLCOP.Rnw:136-140 ################################################### optPorts <- optimalPortfolios.fPort(marketPosterior, optimizer = "tangencyPortfolio") par(mfcol = c(2, 1)) weightsPie(optPorts$priorOptimPortfolio) weightsPie(optPorts$posteriorOptimPortfolio) ################################################### ### code chunk number 8: BLCOP.Rnw:146-149 ################################################### optPorts2 <- optimalPortfolios.fPort(marketPosterior, constraints = "minW[1:6]=0.1", optimizer = "minriskPortfolio") optPorts2 ################################################### ### code chunk number 9: BLCOP.Rnw:155-156 ################################################### densityPlots(marketPosterior, assetsSel = "JPM") ################################################### ### code chunk number 10: BLCOP.Rnw:207-214 ################################################### dispersion <- c(.376,.253,.360,.333,.360,.600,.397,.396,.578,.775) / 1000 sigma <- BLCOP:::.symmetricMatrix(dispersion, dim = 4) caps <- rep(1/4, 4) mu <- 2.5 * sigma %*% caps dim(mu) <- NULL marketDistribution <- mvdistribution("mt", mean = mu, S = sigma, df = 5 ) class(marketDistribution) ################################################### ### code chunk number 11: BLCOP.Rnw:222-228 ################################################### pick <- matrix(0, ncol = 4, nrow = 1, dimnames = list(NULL, c("SP", "FTSE", "CAC", "DAX"))) pick[1,"DAX"] <- 1 viewDist <- list(distribution("unif", min = -0.02, max = 0)) views <- COPViews(pick, viewDist = viewDist, confidences = 0.2, assetNames = c("SP", "FTSE", "CAC", "DAX")) ################################################### ### code chunk number 12: BLCOP.Rnw:233-238 ################################################### newPick <- matrix(0, 1, 2) dimnames(newPick) <- list(NULL, c("SP", "FTSE")) newPick[1,] <- c(1, -1) # add a relative view views <- addCOPViews(newPick, list(distribution("norm", mean = 0.05, sd = 0.02)), 0.5, views) ################################################### ### code chunk number 13: BLCOP.Rnw:243-245 ################################################### marketPosterior <- COPPosterior(marketDistribution, views, numSimulations = 50000) densityPlots(marketPosterior, assetsSel = 4)
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/artifacts/BLCOP.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>", echo = TRUE, fig.align = "center", fig.width = 6, fig.height = 6) ## ---- message = FALSE, warning = FALSE, print = FALSE------------------------- library(fPortfolio) library(BLCOP) library(mnormt) ## ---- print = FALSE----------------------------------------------------------- pickMatrix <- matrix(c(1/2, -1, 1/2, rep(0, 3)), nrow = 1, ncol = 6 ) views <- BLViews(P = pickMatrix, q = 0.06,confidences = 100, assetNames = colnames(monthlyReturns)) views ## ---- print = FALSE----------------------------------------------------------- priorMeans <- rep(0, 6) priorVarcov <- MASS::cov.mve(monthlyReturns)$cov ## ---- print = TRUE------------------------------------------------------------ marketPosterior <- posteriorEst(views = views, sigma = priorVarcov, mu = priorMeans, tau = 1/2) marketPosterior ## ---- print = FALSE----------------------------------------------------------- finViews <- matrix(ncol = 4, nrow = 1, dimnames = list(NULL, c("C","JPM","BAC","MS"))) finViews[,1:4] <- rep(1/4,4) views <- addBLViews(finViews, 0.15, 90, views) views ## ---- print = TRUE------------------------------------------------------------ marketPosterior <- BLPosterior(as.matrix(monthlyReturns), views, tau = 1/2, marketIndex = as.matrix(sp500Returns),riskFree = as.matrix(US13wTB)) marketPosterior ## ---- print = TRUE, fig = TRUE, fig.height = 3-------------------------------- optPorts <- optimalPortfolios.fPort(marketPosterior, optimizer = "tangencyPortfolio") optPorts weightsPie(optPorts$priorOptimPortfolio) weightsPie(optPorts$posteriorOptimPortfolio) ## ---- echo=TRUE, print=FALSE, fig=FALSE--------------------------------------- optPorts2 <- optimalPortfolios.fPort(marketPosterior, constraints = "minW[1:6]=0.1", optimizer = "minriskPortfolio") optPorts2 ## ---- echo=TRUE, print=FALSE, fig=TRUE---------------------------------------- densityPlots(marketPosterior, assetsSel = "JPM") ## ---- echo=TRUE, print=FALSE-------------------------------------------------- dispersion <- c(.376,.253,.360,.333,.360,.600,.397,.396,.578,.775) / 1000 sigma <- BLCOP:::.symmetricMatrix(dispersion, dim = 4) caps <- rep(1/4, 4) mu <- 2.5 * sigma %*% caps dim(mu) <- NULL marketDistribution <- mvdistribution("mt", mean = mu, S = sigma, df = 5 ) class(marketDistribution) ## ---- echo=TRUE, print=FALSE-------------------------------------------------- pick <- matrix(0, ncol = 4, nrow = 1, dimnames = list(NULL, c("SP", "FTSE", "CAC", "DAX"))) pick[1,"DAX"] <- 1 viewDist <- list(distribution("unif", min = -0.02, max = 0)) views <- COPViews(pick, viewDist = viewDist, confidences = 0.2, assetNames = c("SP", "FTSE", "CAC", "DAX")) ## ---- echo=TRUE, print=FALSE-------------------------------------------------- newPick <- matrix(0, 1, 2) dimnames(newPick) <- list(NULL, c("SP", "FTSE")) newPick[1,] <- c(1, -1) # add a relative view views <- addCOPViews(newPick, list(distribution("norm", mean = 0.05, sd = 0.02)), 0.5, views) ## ---- echo=TRUE, print=FALSE, fig=TRUE---------------------------------------- marketPosterior <- COPPosterior(marketDistribution, views, numSimulations = 50000) densityPlots(marketPosterior, assetsSel = 4)
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/doc/BLCOP.R
--- title: "Notes on the BLCOP Package" author: "Francisco Gochez" date: "2015-02-05" output: rmarkdown::html_vignette bibliography: references.bib vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Notes on the BLCOP Package} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", echo = TRUE, fig.align = "center", fig.width = 6, fig.height = 6) ``` ## Introduction The BLCOP package is an implementation of the Black-Litterman and copula opinion pooling frameworks. This vignette gives an overview of these two opinion-blending methods, briefly shows how they are implemented in this package, and closes with a short discussion of how the package may evolve in the future (any feedback would be greatly appreciated). ## Overview of the Black-Litterman model The Black-Litterman model was devised in 1992 by Fisher Black and Robert Litterman. Their goal was to create a systematic method of specifying and then incorporating analyst/portfolio manager views into the estimation of market parameters. Let $A = \{a_1, a_2, ...,. a_n \}$ be a set of random variables representing the returns of $n$ assets. In the BL approach, the joint distribution of $A$ is taken to be multivariate normal, i.e. $A \sim N(\mu, \Sigma)$. The problem they then addressed was that of incorporating an analyst's views into the estimation of the market mean $\mu$ ^[A. Meucci has reformulated the model in terms of forming views directly on market realization rather than the mean, and in my opinion this formulation is considerably clearer. [See @MeucciBL].] Suppose that we take $\mu$ itself to be a random variable which is itself normally distributed, and moreover that its dispersion is proportional to that of the market. Then $$\mu \sim N(\pi, \tau \Sigma), $$ and $\pi$ is some underlying parameter which can be determined by the analyst using some established procedure. Black and Litterman argued from equilibrium considerations that this should be obtained from the intercepts of the capital-asset pricing model. Next, the analyst forms subjective views on the actual mean of the returns for the holding period. This is the part of the model that allows the analyst/portfolio manager to include his or her views. BL proposed that views should be made on linear combinations (i.e. portfolios) of the asset return variable means $\mu$. Each view would take the form of a "mean plus error" . Thus for example, a typical view would look as follows: $$ p_{i1} \mu_1 + p_{i2} \mu_2 + ... + p_{in} \mu_n = q_i + \epsilon_i, $$ where $\epsilon_i \sim N(0, \sigma_i^2)$. The standard deviations $\sigma_i^2$ of each view could be taken as controlling the confidence in each view. Collecting these views into a matrix we will call the "pick" matrix, we obtain the "general" view specification $$P \mu \sim N(\mu, \Omega).$$ $\Omega$ is the diagonal matrix $diag(\sigma_1^2, \sigma_2^2, ..., \sigma_2^n)$. It can be shown (c.f. [@MeucciBL], p.5 and appendix), based on Bayes' Law, that the posterior distribution of the market mean conditional on these views is $$\mu |_{q; \Omega} \sim N(\mu_{BL}, \Sigma_{BL}^{\mu}) $$ where \begin{aligned} \mu_{BL} &= ((\tau \Sigma)^{-1} + P^{T} \Omega^{-1} P ) ^{-1} ((\tau \Sigma)^{-1} \pi + P^{T} \Omega^{-1} q) \\ \Sigma_{BL}^{\mu} &= ((\tau \Sigma)^{-1} + P^{T} \Omega^{-1} P)^{-1} \end{aligned} We can then obtain the posterior distribution of the *market* by taking $A|_{q, \Omega} = \mu|_{q, \Omega} + Z$, and $Z \sim N(0, \Sigma)$ is independent of $\mu$. One then obtains that $E[A] = \mu_{BL}$ and $\Sigma_{BL} = \Sigma + \Sigma_{BL}^{\mu}$ ([@MeucciBL], p. 5). Let us now see how these ideas are implemented in the **BLCOP** package. ## Using the Black-Litterman model in BLCOP The implementation of the Black-Litterman model in BLCOP is based on objects that represent views on the market and objects that represent the posterior distribution of the market after blending the views. We will illustrate this with a simple example. Suppose that an analyst wishes to form views on 6 stocks, 2 of which are technology stocks and the other 4 of which are from the financial sector. Initially, she believes that the average of the 2 tech stocks will outperform one of the financial stocks, say $\frac{1}{2}( \textrm{DELL} + \textrm{IBM}) - \textrm{MS} \sim N(0.06, 0.01)$. We will create a **BLViews** class object with the `BLViews()` constructor function. Its arguments are the "pick" matrix, a vector of confidences, the vector "q", and the the names of the assets in one's "universe". Please note that the following examples may require the suggested `{fPortfolio}` and `{mnormt}` packages. ```{r, message = FALSE, warning = FALSE, print = FALSE} library(fPortfolio) library(BLCOP) library(mnormt) ``` ```{r, print = FALSE} pickMatrix <- matrix(c(1/2, -1, 1/2, rep(0, 3)), nrow = 1, ncol = 6 ) views <- BLViews(P = pickMatrix, q = 0.06,confidences = 100, assetNames = colnames(monthlyReturns)) views ``` Next, we need to determine the "prior" distribution of these assets. The analyst may for instance decide to set these means to 0, and then calculate the variance-covariance matrix of these through some standard estimation procedure (e.g. exponentially weighted moving average). Here we use `cov.mve()` from the `{MASS}` package. ```{r, print = FALSE} priorMeans <- rep(0, 6) priorVarcov <- MASS::cov.mve(monthlyReturns)$cov ``` We can now calculate the posterior market distribution using the `posteriorEst()`. This takes as parameters the view object, the prior covariance and mean, and "tau" ^[An additional parameter called **kappa** will be discussed shortly]. The procedure for setting $\tau$ is the subject of some controversy in the literature, but here we shall set it to $1/2$. ```{r, print = TRUE} marketPosterior <- posteriorEst(views = views, sigma = priorVarcov, mu = priorMeans, tau = 1/2) marketPosterior ``` Now suppose that we wish to add another view, this time on the average of the four financial stocks. This can be done conveniently with `addBLViews()` as in the following example: ```{r, print = FALSE} finViews <- matrix(ncol = 4, nrow = 1, dimnames = list(NULL, c("C","JPM","BAC","MS"))) finViews[,1:4] <- rep(1/4,4) views <- addBLViews(finViews, 0.15, 90, views) views ``` We will now recompute the posterior, but this time using the captial asset pricing model to compute the "prior" means. Rather than manually computing these, it is convenient to use the `BLPosterior()` wrapper function. It will compute these "alphas", as well as the variance-covariance matrix of a returns series, and will then call `poseriorEst()` automatically. ```{r, print = TRUE} marketPosterior <- BLPosterior(as.matrix(monthlyReturns), views, tau = 1/2, marketIndex = as.matrix(sp500Returns),riskFree = as.matrix(US13wTB)) marketPosterior ``` Both `BLPosterior()` and `posteriorEst()` have a **kappa** parameter which may be used to replace the matrix $\Omega$ of confidences in the posterior calculation. If it is greater than 0, then $\Omega$ is set to $\kappa P^{T} \Sigma P$ rather than $diag(\sigma_1^2, \sigma_2^2, ..., \sigma_2^n)$. This choice of $\Omega$ is suggested by several authors, and it leads to the confidences being determined by volatilities of the asset returns. A user may also be interested in comparing allocations that are optimal under the prior and posterior distributions. The `{fPortfolio}` package of the Rmetrics project ([@fPort]), for example, has a rich set of functionality available for portfolio optimization. The helper function `optimalPortfolios.fPort()` was created to wrap these functions for exploratory purposes. ```{r, print = TRUE, fig = TRUE, fig.height = 3} optPorts <- optimalPortfolios.fPort(marketPosterior, optimizer = "tangencyPortfolio") optPorts weightsPie(optPorts$priorOptimPortfolio) weightsPie(optPorts$posteriorOptimPortfolio) ``` Additional parameters may be passed into function to control the optimization process. Users are referred to the `{fPortfolio}` package documentation for details. ```{r, echo=TRUE, print=FALSE, fig=FALSE} optPorts2 <- optimalPortfolios.fPort(marketPosterior, constraints = "minW[1:6]=0.1", optimizer = "minriskPortfolio") optPorts2 ``` Finally, density plots of marginal prior and posterior distributions can be generated with `densityPlots()`. As we will see in the next section, this gives more interesting results when used with copula opinion pooling. ```{r, echo=TRUE, print=FALSE, fig=TRUE} densityPlots(marketPosterior, assetsSel = "JPM") ``` ## Overview of Copula Opinion Pooling Copula opinion pooling is an alternative way to blend analyst views on market distributions that was developed by Attilio Meucci towards the end of 2005. It is similar to the Black-Litterman model in that it also uses a "pick" matrix to formulate views. However it has several advantages including the following: * Views are made on realizations of the market, not on market parameters as in the original formulation of BL * The joint distribution of the market can be any multivariate distribution * Views are not restricted to the normal distribution * The parameters in the model have clearer meanings * The model can easily be generalized to incorporate the views of multiple analysts Nevertheless, all of this comes at a price. We can no longer use closed-form expressions for calculating the posterior distribution of the market and hence must rely on simulation instead. Before proceeding to the implementation however let us look at the theory. Readers are referred to [@MeucciCOP] for a more detailed discussion. As before, suppose that we have a set of $n$ assets whose returns are represented by a set of random variables $A = \{a_1, a_2, ..., a_n \}$. As in Black-Litterman, we suppose that $A$ has some prior joint distribution whose c.d.f we will denote by $\Phi_A$. Denote the marginals of this distribution by $\phi_i$. An analyst forms his views on linear combinations of future *realizations* of the values of $A$ by assigning subjective probability distributions to these linear combinations. That is we form views of the form $p_{i,1} a_1 + p_{i,2} a_2 +... + p_{i,n} a_n \sim \theta_i$, where $\theta_i$ is some distribution. Denote the pick matrix formed by all of these views by $P$ once again. Now, since we have assigned some prior distribution $\Phi_A$ to these assets, it follows that actually the product $V = PA$ inherits a distribution as well, say $$v_i = p_{i,1} a_1 + p_{i,2} a_2 +... + p_{i,n}a_n \sim \theta_i'$$. In general $\theta_i \neq \theta_i'$ unless one's views are identical to the market prior. Thus we must somehow resolve this contradiction. A straightforward way of doing this is to take the weighted sum of the two marginal c.d.fs, so i.e. $\hat{\theta_i} = \tau_i \theta_i + (1 - \tau_i) \theta_i'$, and $\tau_i \in [0,1]$ is a parameter representing our confidence in our subjective views. This is the actual marginal distribution that will be used to determine the market posterior. The market posterior is actually determined by setting the marginals of distributions of $V$ to $\hat{\theta_i}$, while using a copula to keep the dependence structure of $V$ intact. Let $V = (v_1, v_2, ..., v_k)$, where $k$ is the number of views that the analyst has formed. Then $v_i \sim \theta_i'$. Let $C$ be the copula of $V$ so that $C$ is the joint distribution of $$(\theta_1'(v_1), \theta_2'(v_2), ..., \theta_k'(v_k)) = (C_1, C_2, ..., C_k)$$ if we now take the $\theta_i'$ to be cumulative density functions. Next set $\hat{V}$ as the random variable with the joint distribution $(\hat{\theta_1}^{-1} (C_1), \hat{\theta_2}^{-1} (C_2), ..., \hat{\theta_k}^{-1} (C_k)) $. The posterior market distribution is obtained by rotating $\hat{V}$ back into market coordinates using the orthogonal complement of $P$. See [@MeucciCOP], p.5 for details. ## COP in BLCOP Let us now work through a brief example to see how these ideas are implemented in the BLCOP package. First, one again works with objects that hold the view specification, which in the COP case are of class \code{COPViews}. These can again be created with a constructor function of the same name. However a significant difference is the use of `mvdistribution` and `distribution` class objects to specify the prior distribution and view distributions respectively. We will show the use of these in the following example, which is based on the example used in [@MeucciCOP], p.9. Suppose that we wish to invest in 4 market indices (S\&P500, FTSE, CAC and DAX). Meucci suggests a multivariate Student-t distribution with $\nu = 5$ degrees of freedom and dispersion matrix given by: $$ 10^{-3} \left( \begin{array}{cccc} .376 & .253 & .333 & .397 \\ . &.360 & .360 & .396 \\ . & . & .600 & .578 \\ . & . & . & .775 \end{array} \right).$$ He then sets $\mu = \delta \Sigma w_{eq} $ where $w_{eq}$ is the relative capitilization of the 4 indices and $\delta = 2.5$. For simplicity we will simply take $w_{eq} = (1/4, 1/4, 1/4, 1/4)$. ```{r, echo=TRUE, print=FALSE} dispersion <- c(.376,.253,.360,.333,.360,.600,.397,.396,.578,.775) / 1000 sigma <- BLCOP:::.symmetricMatrix(dispersion, dim = 4) caps <- rep(1/4, 4) mu <- 2.5 * sigma %*% caps dim(mu) <- NULL marketDistribution <- mvdistribution("mt", mean = mu, S = sigma, df = 5 ) class(marketDistribution) ``` The class `mvdistribution` works with R multivariate probability distribution "suffixes". `mt` is the R "name"/"suffix" of the multivariate Student-t as found in the package `{mnormt}`. That is, the sampling function is given by `rmt()`, the density by `dmt()`, and so on. The other parameters are those required by the these functions to fully parameterize the multivariate Student-t. The `distribution` class works with univariate distributions in a similar way and is used to create the view distributions. We continue with the above example by creating a single view on the DAX. ```{r, echo=TRUE, print=FALSE} pick <- matrix(0, ncol = 4, nrow = 1, dimnames = list(NULL, c("SP", "FTSE", "CAC", "DAX"))) pick[1,"DAX"] <- 1 viewDist <- list(distribution("unif", min = -0.02, max = 0)) views <- COPViews(pick, viewDist = viewDist, confidences = 0.2, assetNames = c("SP", "FTSE", "CAC", "DAX")) ``` As can be seen, the view distributions are given as a list of `distribution` class objects, and the confidences set the $tau$'s described previously. Here we have assigned a $U(-0.02, 0)$ distribution to our view with confidence $0.2$. Additional views can be added with `addCOPViews()`. ```{r, echo=TRUE, print=FALSE} newPick <- matrix(0, 1, 2) dimnames(newPick) <- list(NULL, c("SP", "FTSE")) newPick[1,] <- c(1, -1) # add a relative view views <- addCOPViews(newPick, list(distribution("norm", mean = 0.05, sd = 0.02)), 0.5, views) ``` The posterior is calculated with `COPPosterior()`, and the updated marginal distributions can be visualized with `densityPlots()` once again. The calculation is performed by simulation, based on the ideas described in [@MeucciCOP2]. The simulations of the posterior distribution are stored in the `posteriorSims` of the class `COPResult` that is returned by `COPPosterior()`. ```{r, echo=TRUE, print=FALSE, fig=TRUE} marketPosterior <- COPPosterior(marketDistribution, views, numSimulations = 50000) densityPlots(marketPosterior, assetsSel = 4) ``` ## Future developments While mostly stable, the code is currently in need of some minor cleanup work and refactoring (e.g. pick matrices are referred to as `P` in some places and `pick` in others) as well as improvements in the documentation and examples. Attilio Meucci has also very recently proposed an even more general view-blending method which he calls *Entropy Pooling* and its inclusion would be another obvious extension of this package's functionality in the longer term. ## References
/scratch/gouwar.j/cran-all/cranData/BLCOP/inst/doc/BLCOP.Rmd
--- title: "Notes on the BLCOP Package" author: "Francisco Gochez" date: "2015-02-05" output: rmarkdown::html_vignette bibliography: references.bib vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Notes on the BLCOP Package} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", echo = TRUE, fig.align = "center", fig.width = 6, fig.height = 6) ``` ## Introduction The BLCOP package is an implementation of the Black-Litterman and copula opinion pooling frameworks. This vignette gives an overview of these two opinion-blending methods, briefly shows how they are implemented in this package, and closes with a short discussion of how the package may evolve in the future (any feedback would be greatly appreciated). ## Overview of the Black-Litterman model The Black-Litterman model was devised in 1992 by Fisher Black and Robert Litterman. Their goal was to create a systematic method of specifying and then incorporating analyst/portfolio manager views into the estimation of market parameters. Let $A = \{a_1, a_2, ...,. a_n \}$ be a set of random variables representing the returns of $n$ assets. In the BL approach, the joint distribution of $A$ is taken to be multivariate normal, i.e. $A \sim N(\mu, \Sigma)$. The problem they then addressed was that of incorporating an analyst's views into the estimation of the market mean $\mu$ ^[A. Meucci has reformulated the model in terms of forming views directly on market realization rather than the mean, and in my opinion this formulation is considerably clearer. [See @MeucciBL].] Suppose that we take $\mu$ itself to be a random variable which is itself normally distributed, and moreover that its dispersion is proportional to that of the market. Then $$\mu \sim N(\pi, \tau \Sigma), $$ and $\pi$ is some underlying parameter which can be determined by the analyst using some established procedure. Black and Litterman argued from equilibrium considerations that this should be obtained from the intercepts of the capital-asset pricing model. Next, the analyst forms subjective views on the actual mean of the returns for the holding period. This is the part of the model that allows the analyst/portfolio manager to include his or her views. BL proposed that views should be made on linear combinations (i.e. portfolios) of the asset return variable means $\mu$. Each view would take the form of a "mean plus error" . Thus for example, a typical view would look as follows: $$ p_{i1} \mu_1 + p_{i2} \mu_2 + ... + p_{in} \mu_n = q_i + \epsilon_i, $$ where $\epsilon_i \sim N(0, \sigma_i^2)$. The standard deviations $\sigma_i^2$ of each view could be taken as controlling the confidence in each view. Collecting these views into a matrix we will call the "pick" matrix, we obtain the "general" view specification $$P \mu \sim N(\mu, \Omega).$$ $\Omega$ is the diagonal matrix $diag(\sigma_1^2, \sigma_2^2, ..., \sigma_2^n)$. It can be shown (c.f. [@MeucciBL], p.5 and appendix), based on Bayes' Law, that the posterior distribution of the market mean conditional on these views is $$\mu |_{q; \Omega} \sim N(\mu_{BL}, \Sigma_{BL}^{\mu}) $$ where \begin{aligned} \mu_{BL} &= ((\tau \Sigma)^{-1} + P^{T} \Omega^{-1} P ) ^{-1} ((\tau \Sigma)^{-1} \pi + P^{T} \Omega^{-1} q) \\ \Sigma_{BL}^{\mu} &= ((\tau \Sigma)^{-1} + P^{T} \Omega^{-1} P)^{-1} \end{aligned} We can then obtain the posterior distribution of the *market* by taking $A|_{q, \Omega} = \mu|_{q, \Omega} + Z$, and $Z \sim N(0, \Sigma)$ is independent of $\mu$. One then obtains that $E[A] = \mu_{BL}$ and $\Sigma_{BL} = \Sigma + \Sigma_{BL}^{\mu}$ ([@MeucciBL], p. 5). Let us now see how these ideas are implemented in the **BLCOP** package. ## Using the Black-Litterman model in BLCOP The implementation of the Black-Litterman model in BLCOP is based on objects that represent views on the market and objects that represent the posterior distribution of the market after blending the views. We will illustrate this with a simple example. Suppose that an analyst wishes to form views on 6 stocks, 2 of which are technology stocks and the other 4 of which are from the financial sector. Initially, she believes that the average of the 2 tech stocks will outperform one of the financial stocks, say $\frac{1}{2}( \textrm{DELL} + \textrm{IBM}) - \textrm{MS} \sim N(0.06, 0.01)$. We will create a **BLViews** class object with the `BLViews()` constructor function. Its arguments are the "pick" matrix, a vector of confidences, the vector "q", and the the names of the assets in one's "universe". Please note that the following examples may require the suggested `{fPortfolio}` and `{mnormt}` packages. ```{r, message = FALSE, warning = FALSE, print = FALSE} library(fPortfolio) library(BLCOP) library(mnormt) ``` ```{r, print = FALSE} pickMatrix <- matrix(c(1/2, -1, 1/2, rep(0, 3)), nrow = 1, ncol = 6 ) views <- BLViews(P = pickMatrix, q = 0.06,confidences = 100, assetNames = colnames(monthlyReturns)) views ``` Next, we need to determine the "prior" distribution of these assets. The analyst may for instance decide to set these means to 0, and then calculate the variance-covariance matrix of these through some standard estimation procedure (e.g. exponentially weighted moving average). Here we use `cov.mve()` from the `{MASS}` package. ```{r, print = FALSE} priorMeans <- rep(0, 6) priorVarcov <- MASS::cov.mve(monthlyReturns)$cov ``` We can now calculate the posterior market distribution using the `posteriorEst()`. This takes as parameters the view object, the prior covariance and mean, and "tau" ^[An additional parameter called **kappa** will be discussed shortly]. The procedure for setting $\tau$ is the subject of some controversy in the literature, but here we shall set it to $1/2$. ```{r, print = TRUE} marketPosterior <- posteriorEst(views = views, sigma = priorVarcov, mu = priorMeans, tau = 1/2) marketPosterior ``` Now suppose that we wish to add another view, this time on the average of the four financial stocks. This can be done conveniently with `addBLViews()` as in the following example: ```{r, print = FALSE} finViews <- matrix(ncol = 4, nrow = 1, dimnames = list(NULL, c("C","JPM","BAC","MS"))) finViews[,1:4] <- rep(1/4,4) views <- addBLViews(finViews, 0.15, 90, views) views ``` We will now recompute the posterior, but this time using the captial asset pricing model to compute the "prior" means. Rather than manually computing these, it is convenient to use the `BLPosterior()` wrapper function. It will compute these "alphas", as well as the variance-covariance matrix of a returns series, and will then call `poseriorEst()` automatically. ```{r, print = TRUE} marketPosterior <- BLPosterior(as.matrix(monthlyReturns), views, tau = 1/2, marketIndex = as.matrix(sp500Returns),riskFree = as.matrix(US13wTB)) marketPosterior ``` Both `BLPosterior()` and `posteriorEst()` have a **kappa** parameter which may be used to replace the matrix $\Omega$ of confidences in the posterior calculation. If it is greater than 0, then $\Omega$ is set to $\kappa P^{T} \Sigma P$ rather than $diag(\sigma_1^2, \sigma_2^2, ..., \sigma_2^n)$. This choice of $\Omega$ is suggested by several authors, and it leads to the confidences being determined by volatilities of the asset returns. A user may also be interested in comparing allocations that are optimal under the prior and posterior distributions. The `{fPortfolio}` package of the Rmetrics project ([@fPort]), for example, has a rich set of functionality available for portfolio optimization. The helper function `optimalPortfolios.fPort()` was created to wrap these functions for exploratory purposes. ```{r, print = TRUE, fig = TRUE, fig.height = 3} optPorts <- optimalPortfolios.fPort(marketPosterior, optimizer = "tangencyPortfolio") optPorts weightsPie(optPorts$priorOptimPortfolio) weightsPie(optPorts$posteriorOptimPortfolio) ``` Additional parameters may be passed into function to control the optimization process. Users are referred to the `{fPortfolio}` package documentation for details. ```{r, echo=TRUE, print=FALSE, fig=FALSE} optPorts2 <- optimalPortfolios.fPort(marketPosterior, constraints = "minW[1:6]=0.1", optimizer = "minriskPortfolio") optPorts2 ``` Finally, density plots of marginal prior and posterior distributions can be generated with `densityPlots()`. As we will see in the next section, this gives more interesting results when used with copula opinion pooling. ```{r, echo=TRUE, print=FALSE, fig=TRUE} densityPlots(marketPosterior, assetsSel = "JPM") ``` ## Overview of Copula Opinion Pooling Copula opinion pooling is an alternative way to blend analyst views on market distributions that was developed by Attilio Meucci towards the end of 2005. It is similar to the Black-Litterman model in that it also uses a "pick" matrix to formulate views. However it has several advantages including the following: * Views are made on realizations of the market, not on market parameters as in the original formulation of BL * The joint distribution of the market can be any multivariate distribution * Views are not restricted to the normal distribution * The parameters in the model have clearer meanings * The model can easily be generalized to incorporate the views of multiple analysts Nevertheless, all of this comes at a price. We can no longer use closed-form expressions for calculating the posterior distribution of the market and hence must rely on simulation instead. Before proceeding to the implementation however let us look at the theory. Readers are referred to [@MeucciCOP] for a more detailed discussion. As before, suppose that we have a set of $n$ assets whose returns are represented by a set of random variables $A = \{a_1, a_2, ..., a_n \}$. As in Black-Litterman, we suppose that $A$ has some prior joint distribution whose c.d.f we will denote by $\Phi_A$. Denote the marginals of this distribution by $\phi_i$. An analyst forms his views on linear combinations of future *realizations* of the values of $A$ by assigning subjective probability distributions to these linear combinations. That is we form views of the form $p_{i,1} a_1 + p_{i,2} a_2 +... + p_{i,n} a_n \sim \theta_i$, where $\theta_i$ is some distribution. Denote the pick matrix formed by all of these views by $P$ once again. Now, since we have assigned some prior distribution $\Phi_A$ to these assets, it follows that actually the product $V = PA$ inherits a distribution as well, say $$v_i = p_{i,1} a_1 + p_{i,2} a_2 +... + p_{i,n}a_n \sim \theta_i'$$. In general $\theta_i \neq \theta_i'$ unless one's views are identical to the market prior. Thus we must somehow resolve this contradiction. A straightforward way of doing this is to take the weighted sum of the two marginal c.d.fs, so i.e. $\hat{\theta_i} = \tau_i \theta_i + (1 - \tau_i) \theta_i'$, and $\tau_i \in [0,1]$ is a parameter representing our confidence in our subjective views. This is the actual marginal distribution that will be used to determine the market posterior. The market posterior is actually determined by setting the marginals of distributions of $V$ to $\hat{\theta_i}$, while using a copula to keep the dependence structure of $V$ intact. Let $V = (v_1, v_2, ..., v_k)$, where $k$ is the number of views that the analyst has formed. Then $v_i \sim \theta_i'$. Let $C$ be the copula of $V$ so that $C$ is the joint distribution of $$(\theta_1'(v_1), \theta_2'(v_2), ..., \theta_k'(v_k)) = (C_1, C_2, ..., C_k)$$ if we now take the $\theta_i'$ to be cumulative density functions. Next set $\hat{V}$ as the random variable with the joint distribution $(\hat{\theta_1}^{-1} (C_1), \hat{\theta_2}^{-1} (C_2), ..., \hat{\theta_k}^{-1} (C_k)) $. The posterior market distribution is obtained by rotating $\hat{V}$ back into market coordinates using the orthogonal complement of $P$. See [@MeucciCOP], p.5 for details. ## COP in BLCOP Let us now work through a brief example to see how these ideas are implemented in the BLCOP package. First, one again works with objects that hold the view specification, which in the COP case are of class \code{COPViews}. These can again be created with a constructor function of the same name. However a significant difference is the use of `mvdistribution` and `distribution` class objects to specify the prior distribution and view distributions respectively. We will show the use of these in the following example, which is based on the example used in [@MeucciCOP], p.9. Suppose that we wish to invest in 4 market indices (S\&P500, FTSE, CAC and DAX). Meucci suggests a multivariate Student-t distribution with $\nu = 5$ degrees of freedom and dispersion matrix given by: $$ 10^{-3} \left( \begin{array}{cccc} .376 & .253 & .333 & .397 \\ . &.360 & .360 & .396 \\ . & . & .600 & .578 \\ . & . & . & .775 \end{array} \right).$$ He then sets $\mu = \delta \Sigma w_{eq} $ where $w_{eq}$ is the relative capitilization of the 4 indices and $\delta = 2.5$. For simplicity we will simply take $w_{eq} = (1/4, 1/4, 1/4, 1/4)$. ```{r, echo=TRUE, print=FALSE} dispersion <- c(.376,.253,.360,.333,.360,.600,.397,.396,.578,.775) / 1000 sigma <- BLCOP:::.symmetricMatrix(dispersion, dim = 4) caps <- rep(1/4, 4) mu <- 2.5 * sigma %*% caps dim(mu) <- NULL marketDistribution <- mvdistribution("mt", mean = mu, S = sigma, df = 5 ) class(marketDistribution) ``` The class `mvdistribution` works with R multivariate probability distribution "suffixes". `mt` is the R "name"/"suffix" of the multivariate Student-t as found in the package `{mnormt}`. That is, the sampling function is given by `rmt()`, the density by `dmt()`, and so on. The other parameters are those required by the these functions to fully parameterize the multivariate Student-t. The `distribution` class works with univariate distributions in a similar way and is used to create the view distributions. We continue with the above example by creating a single view on the DAX. ```{r, echo=TRUE, print=FALSE} pick <- matrix(0, ncol = 4, nrow = 1, dimnames = list(NULL, c("SP", "FTSE", "CAC", "DAX"))) pick[1,"DAX"] <- 1 viewDist <- list(distribution("unif", min = -0.02, max = 0)) views <- COPViews(pick, viewDist = viewDist, confidences = 0.2, assetNames = c("SP", "FTSE", "CAC", "DAX")) ``` As can be seen, the view distributions are given as a list of `distribution` class objects, and the confidences set the $tau$'s described previously. Here we have assigned a $U(-0.02, 0)$ distribution to our view with confidence $0.2$. Additional views can be added with `addCOPViews()`. ```{r, echo=TRUE, print=FALSE} newPick <- matrix(0, 1, 2) dimnames(newPick) <- list(NULL, c("SP", "FTSE")) newPick[1,] <- c(1, -1) # add a relative view views <- addCOPViews(newPick, list(distribution("norm", mean = 0.05, sd = 0.02)), 0.5, views) ``` The posterior is calculated with `COPPosterior()`, and the updated marginal distributions can be visualized with `densityPlots()` once again. The calculation is performed by simulation, based on the ideas described in [@MeucciCOP2]. The simulations of the posterior distribution are stored in the `posteriorSims` of the class `COPResult` that is returned by `COPPosterior()`. ```{r, echo=TRUE, print=FALSE, fig=TRUE} marketPosterior <- COPPosterior(marketDistribution, views, numSimulations = 50000) densityPlots(marketPosterior, assetsSel = 4) ``` ## Future developments While mostly stable, the code is currently in need of some minor cleanup work and refactoring (e.g. pick matrices are referred to as `P` in some places and `pick` in others) as well as improvements in the documentation and examples. Attilio Meucci has also very recently proposed an even more general view-blending method which he calls *Entropy Pooling* and its inclusion would be another obvious extension of this package's functionality in the longer term. ## References
/scratch/gouwar.j/cran-all/cranData/BLCOP/vignettes/BLCOP.Rmd
#'Computes the Black-Litterman posterior distribution. #' #'@description BL_post_distr computes posterior distribution in the Black-Litterman model starting from arbitrary prior distribution #' given as a discrete time series \code{dat} and using \code{views_distr} -- submitted by the user distribution of views. #' #'@usage BL_post_distr (dat, returns_freq, prior_type = c("elliptic", NULL), market_portfolio, #' SR, P, q, tau, risk = c("CVAR", "DCVAR", "LSAD", "MAD"), alpha = NULL, #' views_distr, views_cov_matrix_type = c("diag", "full"), cov_matrix = NULL) #' #'@param dat Time series of returns data; dat = cbind(rr, pk), where \eqn{rr} is an array (time series) of market asset returns, #' for \eqn{n} returns and \eqn{k} assets it is an array with \eqn{\dim(rr) = (n, k)}, #' \eqn{pk} is a vector of length \eqn{n} containing probabilities of returns. #'@param returns_freq Frequency of data in time series \code{dat}; given as a number of data rows corresponding to the period of 1 year, #'i.e. 52 for weekly data or 12 for monthly data. #'@param prior_type Type of distribution in time series \code{dat}; can be "elliptic" -- \eqn{rr} is distributed according #'to (any) elliptical distribution, NULL -- \eqn{rr} is distributed according to any non-elliptical distribution. #'@param market_portfolio Market portfolio -- benchmark (equilibrium) portfolio (for details see Palczewski&Palczewski). #'@param SR Benchmark Sharpe ratio. #'@param P "Pick" matrix in the Black-Litterman model (see Palczewski&Palczewski). #'@param q Vector of investor's views on future returns in the Black-Litterman model (see Palczewski&Palczewski). #'@param tau Confidence parameter in the Black-Litterman model. #'@param risk Risk measure chosen for optimization; one of "CVAR", "DCVAR", "LSAD", "MAD", where #' "CVAR" – denotes Conditional Value-at-Risk (CVaR), #' "DCVAR" – denotes deviation CVaR, #' "LSAD" – denotes Lower Semi Absolute Deviation, #' "MAD" – denotes Mean Absolute Deviation. #'@param alpha Value of alpha quantile in the definition of risk measures CVAR and DCVAR. Can be any number when risk measure is parameter free. #'@param views_distr Distribution of views. An external function submitted by the user which computes densities of the distribution of views in given data points. #' It is assumed implicitly that this distribution is an elliptical distribution (but any other distribution type can be used #'provided calling to this function will preserve described below structure). #' Call to that function has to be of the following form #' \code{FUN(x,q,covmat,COF = NULL)}, where \code{x} is a data points matrix which collects in rows the coordinates of the points in which density is computed, #'\code{q} is a vector of investor's views, #' \code{covmat} is covariance matrix of the distribution and \code{COF} is a vector of additional parameters characterizing the distribution (if needed). #'@param views_cov_matrix_type Type of the covariance matrix of the distribution of views; can be: #'"diag" -- diagonal part of the covariance matrix is used; #'"full" -- the complete covariance matrix is used; #'(for details see Palczewski&Palczewski). #'@param cov_matrix Covariance matrix used for computation of market expected return (\code{RM}) from the formula #' \code{RM = SR * sqrt( t(w_m) * cov_matrix * w_m)} where \code{w_m} is market portfolio #' and \code{SR} -- benchmark Sharpe ratio. #'When \code{cov_matrix} = NULL covariance matrix is computed from matrix \eqn{rr} in data set \code{dat}. #'@return #' \tabular{llll}{ #'\code{post_distr} \tab a time series of data for posterior distribution; for a time series of length \eqn{n} and \eqn{k} assets \cr #' #'\code{} \tab it is a matrix \eqn{(n, k+1)}, where columns (1:k) contain return vectors and the last column \cr #' #'\code{} \tab probabilities of returns. #'} #' #'@examples #'library(mvtnorm) #'k = 3 #'num =100 #'dat <- cbind(rmvnorm (n=num, mean = rep(0,k), sigma=diag(k)), matrix(1/num,num,1)) #'# a data sample with num rows and (k+1) columns for k assets; #'returns_freq = 52 # we assume that data frequency is 1 week #'w_m <- rep(1/k,k) # benchmark portfolio, a vector of length k, #'SR = 0.5 # Sharpe ratio #'Pe <- diag(k) # we assume that views are "absolute views" #'qe <- rep(0.05, k) # user's opinions on future returns (views) #'tau = 0.02 #'BL_post_distr(dat, returns_freq, NULL, w_m, SR, Pe, qe, tau, risk = "MAD", alpha = 0, #' views_distr = observ_normal, "diag", cov_matrix = NULL) #' #' #' #' #'@references Palczewski, J., Palczewski, A., Black-Litterman Model for Continuous Distributions (2016). Available at SSRN: https://ssrn.com/abstract=2744621. #' #'@export BL_post_distr <- function(dat, returns_freq, prior_type = c("elliptic", NULL), market_portfolio, SR, P, q, tau, risk = c("CVAR", "DCVAR", "LSAD", "MAD"), alpha = NULL, views_distr, views_cov_matrix_type = c("diag", "full"), cov_matrix = NULL) { FUN = match.fun(views_distr, descend = FALSE) PARAM = views_cov_matrix_type risk <- match.arg(risk) prior_type <- match.arg(prior_type) w_m = market_portfolio/sum(market_portfolio) if (!is.null(cov_matrix)){ sample_cov = cov_matrix } else { disc_data = .discrete_variance (returns_freq, dat) sample_cov = disc_data$variance } nVar <- length(w_m) if( !all.equal( dim( sample_cov ), c( nVar, nVar ) ) == TRUE ) { stop( paste( "Number of asset for which return data are provided ", "must be the same as length of portfolio weights.\n" ) ) } if (is.null(prior_type)) { RM = SR * sqrt( t(w_m) %*% sample_cov %*% w_m) eq_mu = equilibrium_mean (dat, w_m, RM/returns_freq, risk, alpha)} else { Agamma = SR / sqrt( t(w_m) %*% sample_cov %*% w_m) eq_mu = .equilibrium_mean_elliptic (sample_cov/returns_freq, w_m, Agamma[1,1]) } tmp = .post_distr_new (dat, t(eq_mu$market_returns), q/returns_freq, P, sample_cov/returns_freq, tau, FUN, PARAM) return(list(post_distr =tmp)) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/BL_post_distr.R
#' Extracts the diagonal from matrix x. #' #'@param x g #' #' .diag_of <- function (x) { if ((m <- min(dim(x))) == 0) return(numeric(0)) y <- c(x)[1 + 0:(m - 1) * (dim(x)[1] + 1)] nms <- dimnames(x) if (is.list(nms) && !any(sapply(nms, is.null)) && identical((nm <- nms[[1]][1:m]), nms[[2]][1:m])) names(y) <- nm return(y) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/diag_of.R
#'for a given k-dimentional discrete distribution the function computes expected value vector and coviariance matrix of this distribution #' #'@param returns_coef g #'@param returns g #' #' .discrete_variance <- function (returns_coef, returns ) { k = ncol(returns)-1 n = nrow(returns) pk = returns[,k+1] ra = as.matrix(returns[,1:k]) if( is.null(rownames(ra))) { clab <- as.character(1:k) } else { clab <- colnames(ra) } mu = matrix(0,1,k) for (i in 1:n ){ mu = mu + ra[i, ] * pk[i] } rr = (ra - matrix(1,n,1)%*%mu) dimnames (rr) = NULL for (i in 1:n ){ rr[i, ] = rr[i, ] * sqrt(pk[i]) } mu = mu * returns_coef colnames (mu) = clab rownames (mu) = as.character("assets excess returns") cov = (t(rr) %*% rr) * returns_coef colnames (cov) = clab rownames (cov) = clab return (list (mu_disc = mu, variance = cov)) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/discrete_variance.R
#'Solves the inverse optimization to mean-risk standard optimization problem to find equilibrium returns. #' The function is invoked by BL_post_distr and arguments are supplemented by BL_post_distr. #' #'@description The function computes the vector of equilibrium returns implied by a market portfolio. #'The vector of means for the mean-risk optimization problem is found by inverse optimization. \cr #'The optimization problem is:\cr #'\eqn{\min F(w_m^{T} r)}\cr #'subject to\cr #'\eqn{w_m^{T} E(r) \ge RM},\cr #'where \cr #'\eqn{F} is a risk measure -- one from the list c("CVAR", "DCVAR", "LSAD", "MAD"),\cr #'\eqn{r} is a time series of market returns,\cr #'\eqn{w_m} is market portfolio,\cr #'\eqn{RM} is market expected return. #' #'@param dat Time series of returns data; dat = cbind(rr, pk), where \eqn{rr} is an array (time series) of market asset returns, #' for \eqn{n} returns and \eqn{k} assets it is an array with \eqn{\dim(rr) = (n, k)}, #' \eqn{pk} is a vector of length \eqn{n} containing probabilities of returns. #'@param w_m Market portfolio. #'@param RM Market_expected_return. #'@param risk A risk measure, one from the list c("CVAR", "DCVAR", "LSAD", "MAD"). #'@param alpha Value of alpha quantile in the definition of risk measures CVAR and DCVAR. Can be any number when risk measure is parameter free. #'@return #' \tabular{llll}{ #'\code{market_returns} \tab a vector of market returns obtain by inverse optimization; this is vector \eqn{E(r)}\cr #' #'\code{ } \tab from the description of this function. #'} #' #'@examples #' #'# In normal usage all data are supplemented by function BL_post_distr. #'library(mvtnorm) #'k = 3 #'num =100 #'dat <- cbind(rmvnorm (n=num, mean = rep(0,k), sigma=diag(k)), matrix(1/num,num,1)) #'# a data sample with num rows and (k+1) columns for k assets; #'w_m <- rep(1/k,k) # market portfolio. #'RM = 0.05 # market expected return. #'equilibrium_mean (dat, w_m, RM, risk = "CVAR", alpha = 0.95) #' #'@references Palczewski, J., Palczewski, A., Black-Litterman Model for Continuous Distributions (2016). Available at SSRN: https://ssrn.com/abstract=2744621. #'@export equilibrium_mean <- function (dat, w_m, RM, risk = c("CVAR", "DCVAR", "LSAD", "MAD"), alpha=0.95) { k = ncol(dat)-1 n = nrow(dat) x_m = w_m RM = RM if (length(x_m) != k){ stop( paste( "Length of a vector of assets weights must be the same", "as a number of assets for which returns data are provided.\n" ) ) } if (RM <= 0){ stop( paste("Market portfolio return must be positive.\n" ) ) } risk = toupper(risk) risk <- match.arg(risk) cvarind = switch(risk, CVAR = TRUE, DCVAR = TRUE, LSAD = FALSE, MAD = FALSE) # center the returns # changing sign we pass from returns to losses ra = -as.matrix(dat[,1:k]) pk = dat[,k+1] mu = matrix(0,1,k) for (i in 1:n ){ mu = mu + ra[i, ] * pk[i] } dimnames (mu) = NULL rr = ra - matrix(1,n,1)%*%mu dimnames (rr) = NULL ## Labels if( is.null(colnames(ra))) { ralab <- as.character(1:k) } else { ralab <- colnames(ra) } returns_m = rr %*% x_m o = order (returns_m) sorted_returns = as.matrix(returns_m [o]) weight = as.matrix( pk[o]) if (cvarind) { index = sum(cumsum(weight) < alpha) +1 mu_m = t( t(rr [o [(index + 1):length(o)],])%*% (weight[(index + 1):length(o)])) + (sum(weight[1:index]) - alpha) * rr [o[index],] } else { index = sum(sorted_returns <= 0 ) mu_m = t( t(rr [o [1:(index)],])%*% (weight[1:(index)])) } u0 = (mu_m %*% x_m)/RM mu_m = mu_m / u0[1,1] mu_m = t(mu_m) rownames (mu_m) = ralab colnames (mu_m) = as.character("assets excess returns") return (list (market_returns = mu_m)) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/equilibrium_mean.R
#'Solves the inverse portfolio optimization problem with risk measured by variance to find equilibrium returns #' #'@param MCov g #'@param MarketPortfolio g #'@param MarketPriceOfRisk g #'@return res list contains market_returns and portfolio #' #' .equilibrium_mean_elliptic <- function (MCov, MarketPortfolio, MarketPriceOfRisk) { Portfolio = cbind(MarketPortfolio) if( is.null(rownames(MCov))) { clab <- as.character(1:nrow(MCov)) } else { clab <- rownames(MCov) } rownames(Portfolio) = clab colnames(Portfolio)= as.character("assets weigths") mu = MarketPriceOfRisk * MCov %*% Portfolio rownames(mu) = clab colnames(mu)= as.character("assets excess returns") res = list(market_returns = mu, portfolio = Portfolio) return (res) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/equilibrium_mean_elliptic.R
#'Makes a diagonal matrix with values from x #' #' #'Function make_diag() takes a vector x and returns a matrix with diagonal consists the values of x. #' #'@param x vector #'@return diagonal matrix y .make_diag <- function (x){ if(class(x)!= "numeric"){ stop(paste("Wrong input")) } n <- length(x) y <- array(0, c(n, n)) if (n > 0) y[1 + 0:(n - 1) * (n + 1)] <- x return(y) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/make_diag.R
#'Example of distribution of views -- normal distribution #' #'@description Function observ_normal computes density of normal distribution of views using the formula \cr #'\eqn{f(x) = c_k*\exp(-((x-q)^{T}*covmat^{-1}*(x-q))/2)},\cr #'where \eqn{c_k} is a normalization constant (depends on the dimension of \eqn{x} and \eqn{q}). #' #'@param x Data points matrix which collects in rows coordinates of points in which distribution density is computed. #'@param q Vector of investor's views. #'@param covmat Covariance matrix of the distribution. #' #'@return function returns a vector of distribution densities in data points x. #' #'@examples #' k =3 #' observ_normal (x = matrix(c(rep(0.5,k),rep(0.2,k)),k,2), q = matrix(0,k,1), #' covmat = diag(k)) #' #'@references Palczewski, J., Palczewski, A., Black-Litterman Model for Continuous Distributions (2016). Available at SSRN: https://ssrn.com/abstract=2744621. #' #'@export observ_normal <- function (x, q, covmat ) { # for normal distributions of observations k = ncol(covmat) Omega = covmat # dispersion matrix for normal distribution n = ncol(x) q = matrix(q,k,1) aux1 = t(q %*% matrix(1,1,n)- x) Omega_inv = solve(Omega) ck = 1/sqrt((2*pi)^k*det(Omega)) tpm = rowSums(((aux1) %*% Omega_inv) * (aux1)) odf = ck * exp(-tpm/2) odf = (as.vector(odf, mode="numeric")) odf = cbind((odf)) dimnames ( odf) = NULL return (odf) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/observ_normal.R
#'Example of distribution of views -- power exponential distribution #' #'@description Function observ_powerexp computes density of power exponential distribution of views using the formula\cr #'\eqn{f(x) = c_k*\exp(- ((x-q)^{T}*\Sigma^{-1}*(x-q))^{\beta}/2)},\cr #' where \eqn{c_k} is a normalization constant (depends on the dimension of \eqn{x} and \eqn{q}) and \eqn{\Sigma} is the dispersion matrix. #' #'@param x Data points matrix which collects in rows coordinates of points in which distribution density is computed. #'@param q Vector of investor's views. #'@param covmat Covariance matrix of the distribution; dispersion matrix \eqn{\Sigma} is computed from \code{covmat}. #'@param beta Shape parameter of the power exponential distribution. #' #'@return function returns a vector of distribution densities in data points x. #' #'@examples #' k =3 #'observ_powerexp (x = matrix(c(rep(0.5,k),rep(0.2,k)),k,2), q = matrix(0,k,1), #' covmat = diag(k), beta = 0.6) #' #'@references Gomez, E., Gomez-Villegas, M., Marin, J., A multivariate generalization of the power exponential family of distributions. Commun. Statist. Theory Methods, 27 (1998), 589--600. #'DOI: 10.1080/03610929808832115 #'@export observ_powerexp <- function (x, q, covmat, beta = 0.6) { # for power-exponential distribution of observations betal = beta k = ncol(covmat) Omega = k*gamma(k/(2*betal))/(2^(1/betal)*gamma((k+2)/(2*betal)))*covmat # dispersion matrix n = ncol(x) q = matrix(q,k,1) aux1 = t(q %*% matrix(1,1,n)- x) Omega_inv = solve(Omega) tpm = rowSums(((aux1) %*% Omega_inv) * (aux1)) ck = k*gamma(k/2)/(pi^(k/2)* gamma(1 + k/(2*betal)) * 2^(1 + k/(2*betal)) *sqrt(det(Omega))) odf = ck * exp(-tpm^betal/2 ) odf = (as.vector(odf, mode="numeric")) odf = cbind((odf)) dimnames ( odf) = NULL return (odf) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/observ_powerexp.R
#'Example of distribution of views -- Student t-distribution #' #'@description Function observ_ts computes density of Student t-distribution of views using the formula \cr #'\eqn{f(x) = c_k*(1 +(x-q)^{T}*\Sigma^{-1}*(x-q)/df)^{(-(df+k)/2)}}, \cr #'where \eqn{c_k} is a normalization constant (depends on the dimension of \eqn{x} and \eqn{q}) and \eqn{\Sigma} is the dispersion matrix. #' #'@param x Data points matrix which collects in rows coordinates of points in which distribution density is computed. #'@param q Vector of investor's views. #'@param covmat Covariance matrix of the distribution; dispersion matrix \eqn{\Sigma} is computed from \code{covmat}. #'@param df Number of degrees of freedom of Students t-distribution. #' #'@return function returns a vector of observation distribution densities in data points x. #' #'@examples #' k =3 #'observ_ts (x = matrix(c(rep(0.5,k),rep(0.2,k)),k,2), q = matrix(0,k,1), covmat = diag(k), #' df=5) #' #'@references Kotz, S., Nadarajah, S., Multivariate t Distributions and Their Applications. Cambridge University Press, 2004. #' #'@export observ_ts <- function (x, q, covmat, df = 5) { # for Student t-distribution of observations dfp = df k = ncol(covmat) Omega = (dfp-2)/dfp * covmat # dispersion matrix n = ncol(x) q = matrix(q,k,1) aux1 = t(q %*% matrix(1,1,n)- x) Omega_inv = solve(Omega) tpm = rowSums(((aux1) %*% Omega_inv) * (aux1)) ck = gamma((dfp+k)/2)/(gamma(dfp/2)*sqrt(dfp^k*pi^k*det(Omega))) odf = ck * (1 + tpm/dfp)^(-(dfp+k)/2) odf = (as.vector(odf, mode="numeric")) odf = cbind((odf)) dimnames ( odf) = NULL return (odf) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/observ_ts.R
#'Computes posterior distribution for discrete prior #' #'@param returns g #'@param mu_m g #'@param q g #'@param P g #'@param covmat g #'@param tau g #'@param FUN g #'@param PARAM g #' #' .post_distr_new <- function (dat, mu_m, q, P, covmat, tau, FUN, PARAM) { # center the returns and shift to new mean mu_m k = ncol(dat)-1 n = nrow(dat) pk = dat[,k+1] ra = as.matrix(dat[,1:k]) mu = matrix(0,1,k) for (i in 1:n ){ mu = mu + ra[i, ] * pk[i] } dimnames (mu) = NULL rr = ra - matrix(1,n,1) %*% (mu - mu_m) colnames (rr) = colnames(ra) view_cov_type = switch(PARAM, diag = TRUE, full = FALSE, stop( "wrong 'cov_matrix' type")) aux = P %*% (covmat %*% t(P)) if (view_cov_type) { Omega = .make_diag (.diag_of(aux / tau)) } else { Omega = aux/tau } data_points = P %*% t(rr) new_prob = pk * FUN(data_points, q, Omega) new_prob = cbind( new_prob/sum(new_prob)) postdf = cbind(rr, new_prob) return (postdf) }
/scratch/gouwar.j/cran-all/cranData/BLModel/R/post_distr_new.R
################################################################# ### description: ### ### Compute the log-censored likelihood of multivariate normal ### (additive error) or log-normal (multiplicative error) ### for a given row of data ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' function to compute the cendored log-likelihood function #' for a given row of the data #' @param inputRow vector a given row of the data #' @param meanVec vector mean #' @param covMat matrix covariance matrix #' @param LOQ LOQ #' @param isMultiplicative logical variable indicating whether #' an addtive error model (FALSE- default) or a multiplicative #' model should be used #' @return the value of log-likelihood function #' #' @author Vahid Nassiri, Helen Yvette Barnett #' @noRd computeLogLikforEachRow <- function(inputRow, meanVec, covMat, LOQ, isMultiplicative = FALSE){ isCensored <- inputRow < LOQ # require("mvtnorm") if(any(isCensored)){ idxCensored <- which(isCensored %in% T) idxObserved <- which(isCensored %in% F) censoredCovMat <- covMat[idxCensored,idxCensored] observedCovMat <- covMat[idxObserved,idxObserved] censoredObservedCovMat <- matrix(covMat[idxCensored,idxObserved], nrow=length(idxCensored)) censoredMean <- meanVec[idxCensored] observedMean <- meanVec[idxObserved] conditionalMean <- as.vector( censoredMean + censoredObservedCovMat%*%solve(observedCovMat)%*% (inputRow[idxObserved]-observedMean)) conditionalCovMat <- censoredCovMat - censoredObservedCovMat%*% solve(observedCovMat)%*%t(censoredObservedCovMat) # This defines the lower bound for normal (additive) or # log-normal (multiplicative) errors invisible(ifelse(isMultiplicative, lowerCensoredPart <- rep(-Inf,length(idxCensored)), lowerCensoredPart <- rep(0,length(idxCensored)))) computedLogLik <- (log(as.numeric( pmvnorm(lower=lowerCensoredPart,upper=rep((LOQ) ,length(idxCensored)), mean=conditionalMean,sigma=as.matrix(conditionalCovMat))))+ log(dmvnorm(inputRow[idxObserved], mean=observedMean,sigma=as.matrix(observedCovMat)))) } else{ computedLogLik<-log(dmvnorm( inputRow, mean=meanVec,sigma=as.matrix(covMat))) } return(computedLogLik) } ### This function replaces the following functions in the ### original codes: ### sumlog_function ### sumlog_function_g
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/computeLogLikforEachRow.R
################################################################# ### description: ### ### Compute the log-censored likelihood of multivariate normal ### (additive error) or log-normal (multiplicative error) ### for a gievn dataset ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' function to compute the cendored log-likelihood function #' for a given dataset #' @param parametersMVN vector parameters of the multivariate #' normal distribution #' @param inputData matrix or data frame the input dataset #' @param LOQ LOQ #'@param isMultiplicative logical variable indicating whether #' an addtive error model (FALSE- default) or a multiplicative #' model should be used #' @return value of the censored multivariate normal #' log-likelihood #' #' @author Vahid Nassiri, Helen Yvette Barnett #' @noRd computeMVNLogLik <- function(parametersMVN, inputData, LOQ, isMultiplicative = FALSE){ parametersLength <- length(parametersMVN) muLength <- (parametersLength+1)/3 meanVec <- parametersMVN[1:(muLength)] covVec <- parametersMVN[(muLength+1):parametersLength] covMat <- convertCovandVec(covVec) invisible(ifelse(isMultiplicative, computedLogLikMVN <- sum(apply(inputData,1,computeLogLikforEachRow, meanVec, covMat, LOQ, isMultiplicative = TRUE)), computedLogLikMVN <- sum(apply(inputData,1,computeLogLikforEachRow, meanVec, covMat, LOQ, isMultiplicative = FALSE)))) return(computedLogLikMVN) } ### This function replaces the following functions in the ### original codes: ### mvnormlik_cens_srs_off ### mvnormlik_cens_srs_off_g
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/computeMVNLogLik.R
################################################################# ### description: ### ### Transformign a covariance matrix to a vecotr or vice versa ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' function to covert a covariance matrix (vector) to a vector #' (covariance matrix) with a special structure allowing #' non-zero correlation only for two successive variables. #' @param input matrix of vector which should be converted #' @return converted matrix or vector #' #' @author Vahid Nassiri, Helen Yvette Barnett #' @noRd convertCovandVec <- function (input){ input <- as.matrix(input) ## Check whether input is a matrix or a vector, so then we ## proceed to the appropriate command. if (1 %in% dim(input)){ vecLength <- length(input) # This function solely cobnvert a covariance matrix, i.e., # a positive definite symmetric matrix, into a vector within # the assumptions of this method, therefore, it is expected # that the lenfth of the given vector be an odd number if (vecLength /2 == floor(vecLength/2)){ stop("The given vector should have an odd length!") } numCol <- 0.5*(vecLength+1) tmpOutput <- matrix(0,numCol,numCol) selectingMat <- row(tmpOutput) - col(tmpOutput) tmpOutput[(selectingMat <= 0) & (selectingMat >= -1)] <- input # correcting negative diagonal elements tmpOutput[,which(diag(tmpOutput)<0)] <- -1 * tmpOutput[,which(diag(tmpOutput)<0)] # Get the original matrix from Cholesky decomposition output <- t(tmpOutput)%*%tmpOutput }else{ # As only two successive time points are allowed to be # correlated, this is used to select such elements. selectingMat <- row(input) - col(input) output <- chol(input)[(selectingMat <= 0) & (selectingMat >=-1)] } return(output) } ### This function repalces the following functions in the ### original codes: ### cova2vec_off ### vec2cova_off
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/convertCovandVec.R
################################################################# ### description: ### ### Computing an estimate of area under the concentrations versus ### time curve (AUC) and its variance in an PK-study with ### non-compartmental (NCA) for additive amd multiplicative ### error models. ### approach. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' Estimate AUC and its standard error #' #' function to estimate AUC and compute standard error of this #' estimate #' @encoding UTF-8 #' @param imputedData numeric matrix or data frame of size #' n by J (n the sample size and J the number of time points) #' @param timePoints vector of time points #' @param isMultiplicative logical variable indicating whether #' an additive error model (FALSE) or a multiplicative error #' model (TRUE) should be used #' @param na.rm logical variable indicating whether the rows with #' missing values should be ignored or not. #' @return vector of length 2 with estimated AUC and its #' standard error #' @examples #' # generate data from Beal model with only fixed effects #' set.seed(111) #' genDataFixedEffects <- simulateBealModelFixedEffects(10, 0.693, #' + 1, 1, seq(0.5,3,0.5)) #' # Impute the data with BLOQ's with one of the provided methods, #' # for example, here we use ROS #' imputedDataROS <- imputeROS(genDataFixedEffects, 0.1) #' # estimate AUC and its standard error #' estimateAUCandStdErr(imputedDataROS,seq(0.5,3,0.5)) #' #' @author Vahid Nassiri, Helen Yvette Barnett #' @export estimateAUCandStdErr <- function (imputedData,timePoints, isMultiplicative = FALSE, na.rm = FALSE){ ## The estimate of AUC and its variance are computed for ## additive and multiplicative error models separately ## With multiplicative error, dur to takign logarithm, ## zero and negative values are not allowed, so first we check this if (isMultiplicative & any(imputedData <= 0)){ stop("Due to taking logarithm, negative or zero elements in the imputedData are not allowed with multiplicative error model.") } ## The length of timePionts and number of columns of ## input Data should be the same. if (length(timePoints)!= ncol(imputedData)){ stop("The length of timePionts and number of columns of input Data should be the same.") } if (na.rm == FALSE & sum(is.na(imputedData)) > 0){ stop("There are missing values in the data! You may sue na.rm = TRUE to remove rows with NA.") } if (na.rm == TRUE){ imputedData <- imputedData[which(complete.cases(imputedData)),] } ## First step is to compute the necessary weights numTimePoints <- length(timePoints) invisible(ifelse (numTimePoints >2, computedWeights <- c(timePoints[2]/2, (timePoints[3:numTimePoints] -timePoints[1:(numTimePoints-2)])/2, (timePoints[numTimePoints]- timePoints[(numTimePoints-1)])/2), computedWeights <- c(timePoints[2]/2, (timePoints[numTimePoints]- timePoints[(numTimePoints-1)])/2))) ## Having the weights, AUC estimate and its variance ## can be computed. if (!isMultiplicative){ meanCols <- colMeans(imputedData) estimatedAUC <- sum(computedWeights * meanCols) varEstimatedAUC <- var(imputedData%*%computedWeights) } if (isMultiplicative){ meanCols <- colMeans(log(imputedData)) estimatedAUC <- sum(computedWeights * exp(meanCols)) varEstimatedAUC <- (computedWeights * exp(meanCols))%*% var(log(imputedData))%*%(computedWeights * exp(meanCols)) } ## Following general convention, standard error will be ## reported instead of the variance. estimatedAUCandStd <- c(estimatedAUC, sqrt(varEstimatedAUC)) names(estimatedAUCandStd)=c('Estimated AUC','Std Err.') return(estimatedAUCandStd) } ### estimateAUCandStdErr replaces the following functrions ### in the original codes: ### weig ### AUC_calc_single ### AUC_var_data ### AUC_val_var_g
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/estimateAUCandStdErr.R
################################################################# ### description: ### ### Implementing the approach which uses censored ### maximum likelihood per time point to estimate time point ### specific mean and standard deviation which are needed ### to estimate AUC and its standard error. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' estimate AUC with censored maximum likelihood per time point #' #' function to estimate mean and standard error of each column #' of data with BLOQ's using a censored maximum likelihood (CML) approach, #' then use these estimates for estimating AUC and its standard #' error #' @encoding UTF-8 #' @param inputData numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' the input dataset #' @param LOQ scalar, limit of quantification value #' @param timePoints vector of time points #' @param isMultiplicative logical variable indicating whether #' an additive error model (FALSE) or a multiplicative error #' model (TRUE) should be used #' @param onlyFitCML logical variable with FALSE as default, if TRUE only the #' censored maximum likelihood estimates will be calculated #' @param printCMLmessage logical variable with TRUE as default, if TRUE then #' messages regarding the convergence status of censored #' log-likelihood maximization will be printed. #' @param optimizationMethod single string specifying the method to be used for optimizing the log-likelihood, #' the default is NULL that allows the function to decide the about the best method. Otherwise, one can select among choices #' available via R package maxLik: "NR" (for Newton-Raphson), "BFGS" (for Broyden-Fletcher-Goldfarb-Shanno), #' "BFGSR" (for the BFGS algorithm implemented in R), #' "BHHH" (for Berndt-Hall-Hall-Hausman), "SANN" (for Simulated ANNealing), #' "CG" (for Conjugate Gradients), or "NM" (for Nelder-Mead). #' Lower-case letters (such as "nr" for Newton-Raphson) are allowed. #' @param CMLcontrol list of arguments to control #' convergence of maximization algorithm. It is the same argument #' as control in the function maxLik in the R package maxLik #' @return a list with three components: output of maxLik function, #' estimated parameters for #' each column using censored maximum likelihood, and estimated #' AUC and its standard error. #' @seealso \href{https://www.rdocumentation.org/packages/maxLik/versions/1.3-4/topics/maxLik}{maxLik} #' @examples #' # generate data from Beal model with only fixed effects #' set.seed(111) #' genDataFixedEffects <- simulateBealModelFixedEffects(10, 0.693, #' 1, 1, seq(0.5,3,0.5)) #' # Multiplicative error model #' estimateAUCwithCMLperTimePoint(genDataFixedEffects, 0.1, seq(0.5,3,0.5), TRUE) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export estimateAUCwithCMLperTimePoint <- function(inputData, LOQ, timePoints, isMultiplicative = FALSE, onlyFitCML = FALSE, printCMLmessage = TRUE, optimizationMethod = NULL, CMLcontrol = NULL){ ## With multiplicative error, dur to takign logarithm, ## zero values are not allowed, so first we check this if (isMultiplicative & any(inputData <= 0)){ stop("Due to taking logarithm, negative or zero elements in the inputData are not allowed with multiplicative error model.") } ## The length of timePionts and number of columns of ## input Data should be the same. if (length(timePoints)!= ncol(inputData)){ stop("The length of timePionts and number of columns of input Data should be the same.") } ## First based on the value of isMultiplicative the log of the ## data as well as the LOQ will be taken. if (isMultiplicative){ inputData <- log10(inputData) LOQ <- log10(LOQ) } ## Cheking if there are any BLOQs, if not just jump to the ## AUC calculations CML = NULL if (sum(inputData<LOQ, na.rm = TRUE) == 0){ invisible(ifelse(isMultiplicative, estCMLmean <- exp(apply(inputData, 2, mean)), estCMLmean <- apply(inputData, 2, mean))) estCMLsd <- apply(inputData, 2, sd) }else{ #require ("maxLik") estCMLmean <- rep(0, length(timePoints)) estCMLsd <- rep(0, length(timePoints)) ## Estimate the paramerers using censored maximum ## likelihood (CML) for each time point separately for (iCol in 1:ncol(inputData)){ dataCol <- inputData[,iCol] # cheking missing values, as the censored maximum # likelihood is estimated per time point, we ignore # all missing value, but let the user know about it if (sum(is.na(dataCol))>0){ if (sum(is.na(dataCol))==1){ warning(paste("There is one missing value in column", iCol, " that is ignored!")) } if (sum(is.na(dataCol))>1){ warning(paste("There are", sum(is.na(dataCol)), " missing values in column", iCol, " that are ignored!")) } idxObserved <- which(is.na(dataCol) == FALSE) dataCol <- dataCol[idxObserved] } censoredCol <- dataCol isCensored <- dataCol < LOQ if (any(isCensored)){ censoredCol [dataCol < LOQ] <- LOQ # Defining the censored log-likelihood function logLikCensoredFun <- function(inputParameters){ mean <- inputParameters[1] sd <- inputParameters[2] if(sd<0){ return(NA) } sum(ifelse(isCensored, pnorm(LOQ, mean = mean, sd = sd, log.p = TRUE), dnorm(censoredCol, mean = mean, sd = sd, log = TRUE))) } # estimate the parameters using CML if (is.null(optimizationMethod)){ fitCML <- try(maxLik(logLik = logLikCensoredFun, start = c(mean = 0, sd = 1), control = CMLcontrol)) }else{ fitCML <- try(maxLik(logLik = logLikCensoredFun, start = c(mean = 0, sd = 1), method = optimizationMethod,control = CMLcontrol)) } # controlling non-conbvergence issues: if the parameters # cannot be estimated using censored maximum likelihood # then the function stops CML[[iCol]] <- fitCML if (is.character(fitCML)){ stop(paste("The optimization function stopped with the following message:",fitCML)) } if (is.null(fitCML$estimate)){ stop(paste("The function stopped because censored maximum likelihood could not estimate the parameters for column", iCol)) } if (printCMLmessage){ print(paste("The message from maximizing censored log-likelihood", "for column", iCol,":", fitCML$message)) flush.console() } invisible(ifelse (isMultiplicative, estCMLmean [iCol] <- exp(as.numeric(fitCML$estimate[1])), estCMLmean [iCol] <- as.numeric(fitCML$estimate[1]))) estCMLsd [iCol] <- as.numeric(fitCML$estimate[2]) }else{ estCMLmean[iCol] <- mean(dataCol) invisible(ifelse (isMultiplicative, estCMLmean [iCol] <- exp(mean(dataCol)), estCMLmean [iCol] <- mean(dataCol))) estCMLsd[iCol] <- sd(dataCol) CML[[iCol]] <- NULL } } } estCML <- cbind(estCMLmean,estCMLsd) colnames(estCML) <- c('mean','sd') ## Computing AUC and its variance if (!onlyFitCML){ ## Computing the necessary weights for calculating AUC ## and its variance numTimePoints <- length(timePoints) invisible(ifelse (numTimePoints >2, computedWeights <- c(timePoints[2]/2, (timePoints[3:numTimePoints] -timePoints[1:(numTimePoints-2)])/2, (timePoints[numTimePoints]- timePoints[(numTimePoints-1)])/2), computedWeights <- c(timePoints[2]/2, (timePoints[numTimePoints]- timePoints[(numTimePoints-1)])/2))) estAUC <- sum(computedWeights * estCMLmean) invisible(ifelse(isMultiplicative, varAUC <- sum(((computedWeights*estCMLmean)^2)* (estCMLsd^2)), varAUC <- sum(((computedWeights*estCMLsd)^2)))) estimatedAUCandVariance <- c(estAUC,sqrt(varAUC)) names(estimatedAUCandVariance)=c('Estimated AUC','Std Err.') }else{ estimatedAUCandVariance <- NULL } return(list(CML = CML, estCML=estCML,AUC=estimatedAUCandVariance)) } ### This function replaces the following functions in the ### original codes: ### lik_data_means_gv ### lik_data_means_av
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/estimateAUCwithCMLperTimePoint.R
################################################################# ### description: ### ### Implementing the approach which uses full censored ### maximum likelihood with multivariatre normal distribution ### (with a special strucutre for the covariance matrix) ### to estimate time point specific mean and standard deviation ### which are needed to estimate AUC and its standard error. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' estimate AUC with Full censored maximum likelihood #' #' function to estimate mean and and covariance matrix of censored data using a #' full censored maximum likelihood approach (with a #' special structure for the covariance matrix which only allows correlations between #' successive time points), then use these #' estimates for estimating AUC and its standard error #' #' @encoding UTF-8 #' @param inputData numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' the input dataset #' @param LOQ scalar, limit of quantification value #' @param timePoints vector of time points #' @param isMultiplicative logical variable indicating whether #' an additive error model (FALSE) or a multiplicative error #' model (TRUE) should be used #' @param onlyFitCML logical variable with FALSE as default, if TRUE only the #' censored maximum likelihood estimates will be calculated #' @param printCMLmessage logical variable with TRUE as default, if TRUE then #' messages regarding the convergence status of censored #' log-likelihood maximization will be printed. #' @param optimizationMethod single string specifying the method to be used for optimizing the log-likelihood, #' the default is NULL that allows the function to decide the about the best method. Otherwise, one can select among choices #' available via R package maxLik: "NR" (for Newton-Raphson), "BFGS" (for Broyden-Fletcher-Goldfarb-Shanno), #' "BFGSR" (for the BFGS algorithm implemented in R), #' "BHHH" (for Berndt-Hall-Hall-Hausman), "SANN" (for Simulated ANNealing), #' "CG" (for Conjugate Gradients), or "NM" (for Nelder-Mead). #' Lower-case letters (such as "nr" for Newton-Raphson) are allowed. #' @param CMLcontrol list of arguments to control #' convergence of maximization algorithm. It is the same argument #' as control in the function maxLik in the R package maxLik #' @param na.rm logical variable indicating whether the lines with missing values #' should be ignored (TRUE, default) or not (FALSE). #' @return a list with three components: output of maxLik function, #' estimated parameters (mean vector and the covariance matrix) #' using censored maximum likelihood, and estimated #' AUC and its standard error. #' @seealso \href{https://www.rdocumentation.org/packages/maxLik/versions/1.3-4/topics/maxLik}{maxLik} #' @examples #' #' # generate data from Beal model with only fixed effects #' set.seed(123) #' genDataFixedEffects <- simulateBealModelFixedEffects(10, 0.693, #' 1, 1, seq(0.5,3,1.5)) #' estimateAUCwithFullCML(genDataFixedEffects, 0.15, seq(0.5,3,1.5)) #' #' @author Vahid Nassiri, Helen Yvette Barnett #' @export estimateAUCwithFullCML <- function(inputData, LOQ, timePoints, isMultiplicative = FALSE, onlyFitCML = FALSE, printCMLmessage = TRUE, optimizationMethod = NULL, CMLcontrol = NULL, na.rm = TRUE){ ## With multiplicative error, dur to takign logarithm, ## zero values are not allowed, so first we check this if (isMultiplicative & any(inputData <= 0)){ stop("Due to taking logarithm, negative or zero elements in the inputData are not allowed with multiplicative error model.") } ## The length of timePionts and number of columns of ## input Data should be the same. if (length(timePoints)!= ncol(inputData)){ stop("The length of timePionts and number of columns of input Data should be the same.") } ## For the multivariate normal missings are not allowed if (na.rm){ inputData <- inputData[complete.cases(inputData),] }else{ if (sum(apply(inputData, 2, is.na)) > 0){ stop("When using censored maximum lielihood with multivariate normal, missing value are not allowed.") } } ## First based on the value of isMultiplicative the log of the ## data as well as the LOQ will be taken. if (isMultiplicative){ inputData <- log10(inputData) LOQ <- log10(LOQ) } ## computing starting values by first replacing all BLOQ's with ## LOQ and then fit the likelihood to the NOW non-censored data startValuesData <- imputeConstant(inputData, LOQ, LOQ) #require("mvnmle") # as mvnmle becomes orphan now, we may remove this dependency as follows. #startValuesEst <- mlest(startValuesData) startValuesEst <- list() startValuesEst$muhat <- apply(startValuesData, 2, mean) startValuesEst$sigmahat <- cov(startValuesData)* (nrow(startValuesData)-1) / nrow(startValuesData) startMeanEst <- startValuesEst$muhat startCovMatEst <- startValuesEst$sigmahat startValues <- as.numeric(c(startMeanEst, convertCovandVec(startCovMatEst))) ## Computing the CMLE, the maximization method is fixed by ## Conjugate Gradient #require("maxLik") if (is.null(optimizationMethod)){ invisible(ifelse(isMultiplicative, fitCML <- try(maxLik(logLik=computeMVNLogLik, inputData = inputData, LOQ = LOQ, isMultiplicative = TRUE, start = startValues, control = CMLcontrol)), fitCML <- try(maxLik(logLik=computeMVNLogLik, inputData = inputData, LOQ = LOQ, isMultiplicative = FALSE, start = startValues, control = CMLcontrol)))) }else{ invisible(ifelse(isMultiplicative, fitCML <- try(maxLik(logLik=computeMVNLogLik, inputData = inputData, LOQ = LOQ, isMultiplicative = TRUE, start = startValues, method = optimizationMethod, control = CMLcontrol)), fitCML <- try(maxLik(logLik=computeMVNLogLik, inputData = inputData, LOQ = LOQ, isMultiplicative = FALSE, start = startValues, , method = optimizationMethod, control = CMLcontrol)))) } if (is.character(fitCML)){ stop(paste("The optimization function stopped with the following message:",fitCML)) } if (is.null(fitCML$estimate)){ stop("The function stopped because censored maximum likelihood could not estimate the parameters!") } if (printCMLmessage){ print(paste("The message from maximizing censored log-likelihood:", fitCML$message)) flush.console() } ## extracting and converting estimated parameters muLength = length(timePoints) invisible(ifelse(isMultiplicative, estCMLmean <- exp(fitCML$estimate[1:muLength]), estCMLmean <- fitCML$estimate[1:muLength])) estCMLcovMat <- convertCovandVec( fitCML$estimate[(muLength+1):(3*muLength-1)]) estCML <- list(mu = estCMLmean, Sigma = estCMLcovMat) ## Computing AUC and its variance if (!onlyFitCML){ ## Computing the necessary weights for calculating AUC ## and its variance numTimePoints <- length(timePoints) invisible(ifelse (numTimePoints >2, computedWeights <- c(timePoints[2]/2, (timePoints[3:numTimePoints] -timePoints[1:(numTimePoints-2)])/2, (timePoints[numTimePoints]- timePoints[(numTimePoints-1)])/2), computedWeights <- c(timePoints[2]/2, (timePoints[numTimePoints]- timePoints[(numTimePoints-1)])/2))) estAUC <- sum(computedWeights * estCML$mu) invisible(ifelse(isMultiplicative, varAUC <- (computedWeights * estCML$mu) %*% estCML$Sigma %*% (computedWeights * estCML$mu) , varAUC <- computedWeights %*% estCML$Sigma %*% computedWeights)) estimatedAUCandVariance <- c(estAUC,sqrt(varAUC)) names(estimatedAUCandVariance)=c('Estimated AUC','Std Err.') }else{ estimatedAUCandVariance <- NULL } return(list(CML = fitCML, estCML=estCML, AUC=estimatedAUCandVariance)) } ### This function replaces the following functions in the ### original codes: ### start_fr_off ### multi_norm_cens_srs_off ### multi_norm_cens_srs_off_g ### full_like_norm_off ### full_like_norm_mv_off ### full_like_log_off ### full_like_lognorm_mv_off
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/estimateAUCwithFullCML.R
################################################################# ### description: ### ### Thsi function combines two methods of estimating AUC using ### Multivatiate normal censored maximum likelihood: using the ### full likelihood with special structure for the covariance matruix ### or estimating it unstructured using a pairwise approach. ### ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' estimate AUC with multivariate normal censored maximum likelihood #' #' function to estimate mean and and covariance matrix of censored data using a #' full censored maximum likelihood approach (with a #' special structure for the covariance matrix which only allows correlations between #' successive time points), then use these #' estimates for estimating AUC and its standard error #' @encoding UTF-8 #' @param inputData numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' the input dataset #' @param LOQ scalar, limit of quantification value #' @param timePoints vector of time points #' @param isMultiplicative logical variable indicating whether #' an additive error model (FALSE) or a multiplicative error #' model (TRUE) should be used #' @param onlyFitCML logical variable with FALSE as default, if TRUE only the #' censored maximum likelihood estimates will be calculated #' @param printCMLmessage logical variable with TRUE as default, if TRUE then #' messages regarding the convergence status of censored #' log-likelihood maximization will be printed. #' @param optimizationMethod single string specifying the method to be used for optimizing the log-likelihood, #' the default is NULL that allows the function to decide the about the best method. Otherwise, one can select among choices #' available via R package maxLik: "NR" (for Newton-Raphson), "BFGS" (for Broyden-Fletcher-Goldfarb-Shanno), #' "BFGSR" (for the BFGS algorithm implemented in R), #' "BHHH" (for Berndt-Hall-Hall-Hausman), "SANN" (for Simulated ANNealing), #' "CG" (for Conjugate Gradients), or "NM" (for Nelder-Mead). #' Lower-case letters (such as "nr" for Newton-Raphson) are allowed. #' @param CMLcontrol list of arguments to control #' convergence of maximization algorithm. It is the same argument #' as control in the function maxLik in the R package maxLik #' @param na.rm logical variable indicating whether the lines with missing values #' should be ignored (TRUE, default) or not (FALSE). #' @param isPairwise logical variable, if TRUE the unstructured covariance #' matrix will be estimated using pairwise approach, otherwise (FALSE, default) #' the full maximum likelihood will be used with a special structure imposed on the covariance matrix. #' @return a list with three components: output of maxLik function, #' estimated parameters (mean vector and the covariance matrix) #' using censored maximum likelihood, and estimated #' AUC and its standard error. #' @seealso \href{https://www.rdocumentation.org/packages/maxLik/versions/1.3-4/topics/maxLik}{maxLik} #' @examples #' # generate data from Beal model with only fixed effects #' set.seed(111) #' genDataFixedEffects <- simulateBealModelFixedEffects(10, 0.693, #' 1, 1, seq(0.5,3,1.5)) #' estimateAUCwithMVNCML(genDataFixedEffects, 0.1, seq(0.5,3,1.5)) #' estimateAUCwithMVNCML(genDataFixedEffects, 0.1, seq(0.5,3,1.5), #' isPairwise = TRUE) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export estimateAUCwithMVNCML <- function(inputData, LOQ, timePoints, isMultiplicative = FALSE, onlyFitCML = FALSE, printCMLmessage = TRUE, optimizationMethod = NULL, CMLcontrol = NULL, na.rm = TRUE, isPairwise = FALSE){ ## Use one of two options based on the value of isPairwise if (isPairwise){ outputResults <- estimateAUCwithPairwiseCML (inputData, LOQ, timePoints, isMultiplicative, onlyFitCML, optimizationMethod = optimizationMethod, CMLcontrol, na.rm) }else{ outputResults <- estimateAUCwithFullCML (inputData, LOQ, timePoints, isMultiplicative, onlyFitCML, printCMLmessage, optimizationMethod = optimizationMethod, CMLcontrol, na.rm) } return(outputResults) }
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/estimateAUCwithMVNCML.R
################################################################# ### description: ### ### Implementing the approach which uses full censored ### maximum likelihood with multivariatre normal distribution ### fitted in a pairwise fashion, to estimate time point specific ### mean and standard deviation which are needed to estimate AUC ### and its standard error. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' estimate AUCwith pairwise censored maximum likelihood #' #' function to estimate mean and and covariance matrix of censored data using a #' full censored maximum likelihood approach via fitting all possible pairs, then use these #' estimates for estimating AUC and its standard error #' @encoding UTF-8 #' @param inputData numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' the input dataset #' @param LOQ scalar, limit of quantification value #' @param timePoints vector of time points #' @param isMultiplicative logical variable indicating whether #' an additive error model (FALSE) or a multiplicative error #' model (TRUE) should be used #' @param onlyFitCML logical variable with FALSE as default, if TRUE only the #' censored maximum likelihood estimates will be calculated. #' @param optimizationMethod single string specifying the method to be used for optimizing the log-likelihood, #' the default is NULL that allows the function to decide the about the best method. Otherwise, one can select among choices #' available via R package maxLik: "NR" (for Newton-Raphson), "BFGS" (for Broyden-Fletcher-Goldfarb-Shanno), #' "BFGSR" (for the BFGS algorithm implemented in R), #' "BHHH" (for Berndt-Hall-Hall-Hausman), "SANN" (for Simulated ANNealing), #' "CG" (for Conjugate Gradients), or "NM" (for Nelder-Mead). #' Lower-case letters (such as "nr" for Newton-Raphson) are allowed. #' @param CMLcontrol list of arguments to control #' convergence of maximization algorithm. It is the same argument #' as control in the function maxLik in the R package maxLik #' @param na.rm logical variable indicating whether the lines with missing values #' should be ignored (TRUE, default) or not (FALSE). Note that, it will be applied #' for the sub-datasets regarding each pair. #' @return a list with three components: output of maxLik function, #' estimated parameters (mean vector and the covariance matrix) #' using censored maximum likelihood, and estimated #' AUC and its standard error. #' @seealso \href{https://www.rdocumentation.org/packages/maxLik/versions/1.3-4/topics/maxLik}{maxLik} #' @examples #' # generate data from Beal model with only fixed effects #' set.seed(111) #' genDataFixedEffects <- simulateBealModelFixedEffects(10, 0.693, #' 1, 1, seq(0.5,3,1.5)) #' estimateAUCwithPairwiseCML(genDataFixedEffects, 0.1, seq(0.5,3,1.5)) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export estimateAUCwithPairwiseCML <- function(inputData, LOQ, timePoints, isMultiplicative = FALSE, onlyFitCML = FALSE, optimizationMethod = NULL, CMLcontrol = NULL, na.rm = TRUE){ if (isMultiplicative & any(inputData <= 0)){ stop("Due to taking logarithm, negative or zero elements in the inputData are not allowed with multiplicative error model.") } ## The length of timePionts and number of columns of ## input Data should be the same. if (length(timePoints)!= ncol(inputData)){ stop("The length of timePionts and number of columns of input Data should be the same.") } ## missing data if (!na.rm & sum(apply(inputData, 2, is.na)) > 0){ stop("When using censored maximum lielihood with multivariate normal, missing value are not allowed.") } # first define all possible pairs numTimePoints <- length(timePoints) allPairsIdx <- combn(numTimePoints, 2) numPairs <- ncol(allPairsIdx) estMu <- matrix(NA, numPairs, numTimePoints) estSigma <- matrix(NA, numPairs, numTimePoints^2) fitCML <- NULL for (iPairs in 1:numPairs){ pairInputData <- inputData[,allPairsIdx[,iPairs]] # cheking the missing values and removing them if na.rm = TRUE if (na.rm){ pairInputData <- pairInputData[complete.cases(pairInputData),] } # Here we check if for every subject, we have at least one # non-censoted measurement. If both of the measurements are # censored, that particular subject is removed from computations # of the current pair, it will be announced by issuing a warnin message. numCensoredSubjectwise <- apply(pairInputData< LOQ, 1, sum) if (any(numCensoredSubjectwise >1)){ pairInputData <- pairInputData[-which(numCensoredSubjectwise>1),] warning(paste("For pair ", paste(allPairsIdx[,iPairs],collapse="-")," the following subjects are removed due to being completely censored for this particular pair: ", which(numCensoredSubjectwise>1))) } fitPair <- estimateAUCwithFullCML(pairInputData, LOQ, timePoints[allPairsIdx[,iPairs]], isMultiplicative, onlyFitCML = TRUE, printCMLmessage = FALSE, CMLcontrol, optimizationMethod = optimizationMethod, na.rm = FALSE) estMu[iPairs, allPairsIdx[,iPairs]] <- fitPair$estCML$mu tmpSigma <- matrix(NA, numTimePoints, numTimePoints) tmpSigma[allPairsIdx[,iPairs], allPairsIdx[,iPairs]] <- fitPair$estCML$Sigma estSigma [iPairs,] <- c(tmpSigma) fitCML[[iPairs]] <- fitPair$CML } pairwiseEstMu <- apply(estMu, 2, mean, na.rm = TRUE) pairwiseEstSigma <- matrix(apply(estSigma, 2, mean, na.rm = TRUE), numTimePoints, numTimePoints) estCML = list(mu = pairwiseEstMu, Sigma = pairwiseEstSigma) ## Computing AUC and its variance if (!onlyFitCML){ ## Computing the necessary weights for calculating AUC ## and its variance invisible(ifelse (numTimePoints >2, computedWeights <- c(timePoints[2]/2, (timePoints[3:numTimePoints] -timePoints[1:(numTimePoints-2)])/2, (timePoints[numTimePoints]- timePoints[(numTimePoints-1)])/2), computedWeights <- c(timePoints[2]/2, (timePoints[numTimePoints]- timePoints[(numTimePoints-1)])/2))) estAUC <- sum(computedWeights * pairwiseEstMu) invisible(ifelse(isMultiplicative, varAUC <- (computedWeights * pairwiseEstMu) %*% pairwiseEstSigma %*% (computedWeights * pairwiseEstMu) , varAUC <- computedWeights %*% pairwiseEstSigma %*% computedWeights)) estimatedAUCandVariance <- c(estAUC,sqrt(varAUC)) names(estimatedAUCandVariance)=c('Estimated AUC','Std Err.') }else{ estimatedAUCandVariance <- NULL } return(list(pairwiseCML = fitCML, PairwiseEstCML=estCML, AUC=estimatedAUCandVariance)) }
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/estimateAUCwithPairwiseCML.R
################################################################# ### description: ### ### function to impute BLOQ's. The user can define column-specific ## methods to impute the BLOQ's. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' impute BLOQ's with various methods #' #' function to impute BLOQ's. The user can define column-specific methods to impute the BLOQ's. #' @encoding UTF-8 #' @param inputData numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' the input dataset #' @param LOQ scalar, limit of quantification value #' @param imputationMethod could be a single string or a vector of strings with the same length as #' the number of time points (ncol(inputData)). If it is left blank, then the imputation is done using #' kernel density estimation method for the columns with at least one non-BLOQ component. For all the #' rest (only BLOQ) the constant imputation is used. The allowed values are #' "constant", "ros", "kernel", "cml" corresponding to constant imputation, #' imputing using regression on order statistics, imputing using kernel density estimator, and #' imputing using censored maximum likelihood, respectively. #' @param progressPrint logical variable indicating whether the imputation progress should be printed or not. #' @param ... any other argument which should be changed according to the input arguments regarding #' the functions corresponding to different imputation methods. #' @return a list with two components: imputed dataset, and the methods used to impute each column. #' @examples #' set.seed(111) #' inputData <- simulateBealModelFixedEffects(10, 0.693,1, 1, seq(0.5,3,0.5)) #' LOQ = 0.125 #' imputeBLOQ(inputData, LOQ, #' imputationMethod = c("cml", "ros", "kernel","constant", "constant", "constant"), #' maxIter = 500, isMultiplicative = TRUE, constantValue = LOQ) #' imputeBLOQ(inputData, LOQ, maxIter = 500, isMultiplicative = TRUE, #' constantValue = LOQ/5, epsilon = 1e-04) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export imputeBLOQ <- function(inputData, LOQ, imputationMethod , progressPrint = FALSE, ...){ ## defining the method of imputation. If it's not specified, then the default ## will be used. The default uses Kernel density estimator for columns ## who have at least one non-BLOQ, and for only BLOQ columns imputeConstant will be used. ## Otherwise, it can be either a single string or a vector of strings with the length ## as the number of time points (number of columns in inputData). If former, then the same ## imputation method is used for all columns, otherwise, each column will use the specified method. methodNames <- c("constant", "ros", "kernel", "cml") if (missing(imputationMethod)){ imputationMethod <- rep("kernel", ncol(inputData)) numBLOQ <- apply(inputData < LOQ, 2, sum, na.rm = TRUE) numObserved <- apply(apply(inputData, 2, complete.cases), 2, sum) imputationMethod[numBLOQ > numObserved-1] <- "constant" }else{ if (sum(imputationMethod %in% methodNames) != length(imputationMethod)){ stop("Please either set no imputationMethod or select it from 'contant', 'ros', 'kernel', 'cml'") } if (length(imputationMethod) == 1){ imputationMethod <- rep(imputationMethod, ncol(inputData)) }else{ if (length(imputationMethod)!= ncol(inputData)){ stop("If specified, the imputationMethod should be a single string, or a vector of strings with length the same as number of columns in the inputData.") } } } ## making the input parameters for all functions inputParams <- list(...) inputParams[["LOQ"]] <- LOQ ## defining set of input arguments for different methods paramsConstant <- c("inputData", "LOQ", "constantValue") paramsROS <- c("inputData", "LOQ", "isMultiplicative", "useSeed") paramsKernel <- c("inputData", "LOQ", "epsilon", "maxIter", "useSeed") paramsCML <- c("inputData", "LOQ", "isMultiplicative", "useSeed", "printCMLmessage", "CMLcontrol") ## Define an itnernal function to do the imputation based on selected method imputeMethod <- function(impMet){ if (impMet == "constant"){ inputParamsIDX <- which(names(inputParams) %in% paramsConstant) imputedData <- do.call(imputeConstant, inputParams[inputParamsIDX]) } if (impMet == "ros"){ inputParamsIDX <- which(names(inputParams) %in% paramsROS) imputedData <- do.call(imputeROS, inputParams[inputParamsIDX]) } if (impMet == "kernel"){ inputParamsIDX <- which(names(inputParams) %in% paramsKernel) imputedData <- do.call(imputeKernelDensityEstimation, inputParams[inputParamsIDX]) } if (impMet == "cml"){ inputParamsIDX <- which(names(inputParams) %in% paramsCML) imputedData <- do.call(imputeCML, inputParams[inputParamsIDX]) } return(imputedData) } ## If the same imputation method should be applied for all time points if (length(imputationMethod) == 1){ inputParams[["inputData"]] <- inputData imputedData <- imputeMethod(imputationMethod) }else{ imputedData <- matrix(0, nrow(inputData), ncol(inputData)) for (iCol in 1:ncol(inputData)){ if (progressPrint){ print(paste("imputing column ", iCol, " with method ", imputationMethod[iCol])) } inputParams[["inputData"]] <- as.matrix(inputData[, iCol]) imputedData[,iCol] <- imputeMethod(imputationMethod[iCol]) } } return(list(imputedData = imputedData, imputationMethod = imputationMethod)) }
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/imputeBLOQ.R
################################################################# ### description: ### ### Implementing the imputation approach which uses censored ### maximum likelihood per time point to estimate time point ### specific mean and standard deviation then use them to ### impute BLOQ's. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' imputing BLOQ's using censored maximum likelihood #' #' function to impute BLOQ's using quantiles of a normal #' distribution with mean and standard error estimates using #' censored maximum likelihood #' @encoding UTF-8 #' @param inputData numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' the input dataset #' @param LOQ scalar, limit of quantification value #' @param isMultiplicative logical variable indicating whether #' an additive error model (FALSE) or a multiplicative error #' model (TRUE) should be used #' @param useSeed scalar, set a seed to make the results #' reproducible, default is runif(1), it is used to randomly #' order the first imputed column (if the first column has any BLOQ's) #' @param printCMLmessage logical variable with TRUE as default, if TRUE then #' messages regarding the convergence status of censored #' log-likelihood maximization will be printed. #' @param CMLcontrol list of arguments to control #' convergence of maximization algorithm. It is the same argument #' as control in the function maxLik in the R package maxLik #' @return the imputed dataset: a numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' @seealso \href{https://www.rdocumentation.org/packages/maxLik/versions/1.3-4/topics/maxLik}{maxLik} #' @examples #' # generate data from Beal model with only fixed effects #' set.seed(111) #' genDataFixedEffects <- simulateBealModelFixedEffects(10, 0.693, #' + 1, 1, seq(0.5,3,0.5)) #' imputeCML(genDataFixedEffects, 0.1, FALSE, 1) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export imputeCML <- function(inputData, LOQ, isMultiplicative = FALSE, useSeed = runif(1), printCMLmessage = TRUE, CMLcontrol = NULL){ ## Check whether the inputData includes any BLOQ ## if not, the same dataset as inputData will be returned. imputedData <- inputData if (sum(imputedData<LOQ, na.rm = TRUE) == 0){ print("There are not any BLOQ's in the dataset, so imputation is not needed") }else{ ## First we estimate the mean and stadard error of each ## column using censored maximum likelihood estCML <- estimateAUCwithCMLperTimePoint(inputData, LOQ, timePoints = rep(0,ncol(inputData)), isMultiplicative, onlyFitCML = TRUE, printCMLmessage, CMLcontrol)$estCML nCensored <- apply (inputData < LOQ, 2, sum) isCensored <- which(nCensored > 0) invisible(ifelse(isMultiplicative, probBLOQ <- pnorm(log(LOQ),estCML[isCensored,1], estCML[isCensored,2]), probBLOQ <- pnorm(LOQ,estCML[isCensored,1], estCML[isCensored,2]))) for (iCensoredCol in 1:length(isCensored)){ colCensored <- isCensored[iCensoredCol] ## Distributing probability of being BLOQ among different ## BLOQ positions and compute corresponding quantiles ## using estimated mean and sd for each colums with ## a BLOQ # find the quantiles as the imputed values, note that # as the estimate are found for multiplicative or additive # models via "estimateAUCwithCMLperTimePoint", the mean # and sd of the qnorm does not need to be transformed, # only at the end for multiplicative error one needs to # exponentiate the quantile. imputedValues <- qnorm(((1:nCensored[colCensored]) / (nCensored[colCensored]+1)) * probBLOQ[iCensoredCol], mean = estCML[colCensored,1], sd = estCML[colCensored,2]) if (isMultiplicative){ imputedValues = exp(imputedValues) } ## Order the imputed values if(colCensored == 1){ # set seed to make the results reproducible set.seed(useSeed) imputingOrder <- sample(nCensored[colCensored]) }else{ imputingOrder <- rank(imputedData[,(colCensored-1)] [which(inputData[,colCensored]< LOQ)]) } imputedData[which(inputData[,colCensored]< LOQ), colCensored]<- sort(imputedValues)[imputingOrder] } } return(imputedData) } ### this function replaces the following function is the ### original codes" ### impute_data3
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/imputeCML.R
################################################################# ### description: ### ### Implementing the imputation approach which simply replaces ### every BLOQ with a pre-defined constant. The two common ### options are using LOQ/2 or 0. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' imputing BLOQ's with a constant value #' #' function to impute BLOQ observations by replacing them #' with a constant value. #' @encoding UTF-8 #' @param inputData numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' the input dataset #' @param LOQ scalar, limit of quantification value #' @param constantValue scalar, the constant value which replaces #' all BLOQ's, default is LOQ/2 #' @return the imputed dataset: a numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' @examples #' # generate data from Beal model with only fixed effects #' set.seed(111) #' genDataFixedEffects <- simulateBealModelFixedEffects(10, 0.693, #' + 1, 1, seq(0.5,3,0.5)) #' # replacing BLOQ's with LOQ/2 #' imputeConstant(genDataFixedEffects, 0.1, 0.1/2) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export imputeConstant <- function(inputData, LOQ, constantValue){ if (missing(constantValue)){ constantValue <- LOQ/2 } imputedData <- inputData imputedData[inputData<LOQ]=constantValue return(imputedData) } ### This function replaces the following functions in the ### original codes: ### replace_0 ### replace_LOQ2
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/imputeConstant.R
################################################################# ### description: ### ### imputes the BLOQ observation using kernel density estimation. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' imputing BLOQ's using kernel density estimation #' #' function to impute BLOQ observations using kernel density #' estimation. #' @encoding UTF-8 #' @param inputData numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' the input dataset #' @param LOQ scalar, limit of quantification value #' @param epsilon scalar with 1e-07 as default, the difference between two iterations #' which achieving it would stop the procedure (convergence). #' @param maxIter scalar, the maximum number of iterations with 1000 as default. #' @param useSeed scalar, set a seed to make the results #' reproducible, default is runif(1), it is used to randomly #' order the first imputed column (if the first column has any BLOQ's) #' @return the imputed dataset: a numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' @examples #' # generate data from Beal model with only fixed effects #' set.seed(111) #' genDataFixedEffects <- simulateBealModelFixedEffects(10, 0.693, #' + 1, 1, seq(0.5,3,0.5)) #' imputeKernelDensityEstimation(genDataFixedEffects, 0.1, epsilon = 1e-05) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export imputeKernelDensityEstimation <- function (inputData, LOQ, epsilon = 1e-07, maxIter = 1000, useSeed = runif(1)){ ## Check whether the inputData includes any BLOQ ## if not, the same dataset as inputData will be returned. imputedData <- inputData if (sum(imputedData<LOQ, na.rm = TRUE) == 0){ print("There are not any BLOQ's in the dataset, so imputation is not needed") }else{ for(iCol in 1:ncol(inputData)){ observedCol <- inputData[,iCol] imputedCol <- imputedData[,iCol] idxObserved <- which(is.na(observedCol) == FALSE) dataCol <- observedCol[idxObserved] imputedCol <- imputedCol[idxObserved] isCensored <- dataCol < LOQ if(any(isCensored)){ numCensored <- sum(isCensored, na.rm = TRUE) imputedValues <- rep(0,numCensored) ## ordering the imputed values # if the very first column has any BLOQ then the order # is random if(iCol==1){ set.seed(useSeed) # the following has been used as seed in the original codes #set.seed(numCensored) imputingOrder <- sample(1:numCensored) # for columns other than the first one the order is based # on the previous column. }else{ imputingOrder <- rank(imputedData[, (iCol-1)][which(isCensored)]) } ## imputing the first BLOQ imputedValues[1] <- imputeKernelDensityEstimationInnerIteration(dataCol[!isCensored], LOQ, epsilon, maxIter) ## if there are more than one BLOQ, here they are imputed. if (numCensored > 1){ for(iCensored in 2:numCensored){ imputedValues[iCensored] <- imputeKernelDensityEstimationInnerIteration( c(imputedValues[1:(iCensored-1)], dataCol[!isCensored]), LOQ, epsilon, maxIter) } } imputedCol[which(isCensored)] <- sort(imputedValues)[imputingOrder] imputedData[idxObserved,iCol] <- imputedCol } } } return(imputedData) } ### This function replaces the following function in the ### original codes: ### kdens_iterations_multi_imp
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/imputeKernelDensityEstimation.R
################################################################# ### description: ### ### compuites the inner itration to produce one imputed value ### for the BLOQ observations. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' function to produce a single imputation using kernel density #' estimation, this will be used an in innter itreration for the #' function imputeKernelDensityEstimation #' @encoding UTF-8 #' @param inputCol numeric vector of the size #' n (the sample size) #' @param LOQ scalar limit of quantification value #' @param epsilon scalar the difference between two iterations #' which achieving it would stop the procedure (convergence) #' @param maxIter scalar, the maximum number of iterations with 1000 as default. #' @return single imputed value #' #' @author Vahid Nassiri, Helen Yvette Barnett #' @noRd imputeKernelDensityEstimationInnerIteration <- function( inputCol, LOQ, epsilon, maxIter){ ## checking the input if (length(inputCol) < 1){ stop("At least one non-BLOQ observation is needed to impute using kernel density estimator") } ## initializing imputedObs <- 0 ## estimating the density estimatedDensity <- density(c(imputedObs,inputCol), from=0) ## extract x and y from the estimated density for the censored part of the sample estimatedDensityCensoredX <- estimatedDensity$x[estimatedDensity$x < LOQ] estimatedDensityCensoredY <- estimatedDensity$y[estimatedDensity$x < LOQ] ## update the imputed observation by taking the ## conditional expectation updatedImputedObs <- sum( estimatedDensityCensoredX*estimatedDensityCensoredY)/ sum(estimatedDensityCensoredY) ## Iterating updated imputed observation till it converge, ## i.e., the difference between two ## computed conditional expectations becomes smaller ## than epsilon # define number of iterations numIter <- 1 while(abs(imputedObs-updatedImputedObs) > epsilon & numIter <= maxIter){ imputedObs <- updatedImputedObs estimatedDensity <- density(c(imputedObs,inputCol), from=0) estimatedDensityCensoredX <- estimatedDensity$x[estimatedDensity$x < LOQ] estimatedDensityCensoredY <- estimatedDensity$y[estimatedDensity$x < LOQ] updatedImputedObs <- sum( estimatedDensityCensoredX*estimatedDensityCensoredY)/ sum(estimatedDensityCensoredY) numIter <- numIter + 1 } if (abs(imputedObs-updatedImputedObs) > epsilon){ warning("Convergence is not achieved, increase maxIter or use a larger epsilon.") } return(updatedImputedObs) } ### This function replaces the following function in the ### original codes: ### kdens_iterations
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/imputeKernelDensityEstimationInnerIteration.R
################################################################# ### description: ### ### Implementing the imputation approach which uses Regression ### on order statistics (ROS) to impute the BLOQ's. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' imputing BLOQ's using regression on order statistics #' #' function to impute BLOQ's with regression on order statistics #' (ROS) approach. #' @encoding UTF-8 #' @param inputData numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' the input dataset #' @param LOQ scalar limit of quantification value #' @param isMultiplicative logical variable indicating whether #' an additive error model (FALSE) or a multiplicative #' model (TRUE) should be used #' @param useSeed scalar, set a seed to make the results #' reproducible, default is runif(1), it is used to randomly #' order the first imputed column (if the first column has any BLOQ's) #' @return the imputed dataset: a numeric matrix or data frame of the size #' n by J (n the sample size and J the number of time points) #' @examples #' # generate data from Beal model with only fixed effects #' set.seed(111) #' genDataFixedEffects <- simulateBealModelFixedEffects(10, 0.693, #' + 1, 1, seq(0.5,3,0.5)) #' imputeROS(genDataFixedEffects, 0.1) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export imputeROS <- function(inputData, LOQ, isMultiplicative = FALSE, useSeed = runif(1)){ ## Check whether the inputData includes any BLOQ ## if not, the same dataset as inputData will be returned. imputedData <- inputData if (sum(imputedData<LOQ, na.rm = TRUE) == 0){ print("There are not any BLOQ's in the dataset, so imputation is not needed") }else{ for (iCol in 1:ncol(imputedData)){ dataCol <- imputedData[,iCol] # Note that, as it could be possible to have # different number of measurement per time point # i.e., missing value, we ignore the NA's in the # computation, and then replace them with NA's in the # final imputed dataset idxObserved <- which(is.na(dataCol) == FALSE) dataCol <- dataCol[idxObserved] isCensored <- dataCol < LOQ if(any(isCensored)){ invisible(ifelse(isMultiplicative, measuredObs <- sort(log(dataCol[!isCensored])), measuredObs <- sort(dataCol[!isCensored]))) nObs <- length(dataCol) nCensored <- sum((isCensored)) nMeasured <- nObs-nCensored if (nMeasured>1){ ## Calculations regarding regression on ## order statistics (ROS) empiricalExceedanceProbability <- nMeasured/(nMeasured+nCensored) idxMeasured <- 1:nMeasured idxCensored <- 1:nCensored plottingPosiotionsForMeasuredObs <- 1- empiricalExceedanceProbability+ empiricalExceedanceProbability* (idxMeasured/(nMeasured+1)) plottingPosiotionsForCensoredObs <- (idxCensored/(1+nCensored))* (1-empiricalExceedanceProbability) # Fitting the regression model of measured observations # on their corresponding plotting positions regressObsonQuantilesForMeasured <- lm (measuredObs~ qnorm(plottingPosiotionsForMeasuredObs)) imputingModelIntercept <- as.numeric( regressObsonQuantilesForMeasured$coefficients[1]) imputingModelSlope <- as.numeric( regressObsonQuantilesForMeasured$coefficients[2]) # Using the fitted model to compute imputed values # for censored observations (BLOQ's). tmpImputedCensoredObs <- imputingModelIntercept+ imputingModelSlope* qnorm(plottingPosiotionsForCensoredObs) invisible(ifelse(isMultiplicative, imputedCensoredObs <- exp(tmpImputedCensoredObs), imputedCensoredObs <- tmpImputedCensoredObs)) # Ordering the imputed values according to the rules. if(iCol==1){ # set seed to make the results reproducible set.seed(useSeed) imputingOrder <- sample(sum(isCensored)) }else{ imputingOrder <- rank( imputedData[, (iCol-1)][which(isCensored)]) } dataCol[which(isCensored)] <- sort(imputedCensoredObs)[imputingOrder] }else{ print(paste('Due to only one measured observation, the imputation cannot be done for column ', iCol,' and the original values are kept')) } imputedData[idxObserved,iCol] <- dataCol } } } return(imputedData) } ### This function replaces the following functions in the ### original codes: ### ROS_impute_data_order ### ROS_impute_data_a_o
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/imputeROS.R
################################################################# ### description: ### ### Simulating data from a Beal model with fixed effects only. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' simulate data from Beal model with fixed effects #' #' function to generate data from a Beal model with fixed effects #' @encoding UTF-8 #' @param numSubjects scalar, number of subject which should be generated #' @param clearance scalar, clearance #' @param volumeOfDistribution scalar, volume of distribution #' @param dose scalar, dose #' @param timePoints vector of time points #' @return generated sample with numSubjects as the number of rows #' and length of timePoints as the number of columns #' @details The model used to generate data at time t is as follows #' \deqn{y(t)=C(t)\exp(e(t)),} #' where \eqn{C(t)}, the PK-model, is defined as follows: #' \deqn{C(t) = \frac{\mathrm{dose}}{V_d} \exp{(CL.t)},} #' with \eqn{V_d} the volume of distribution and \eqn{CL} as clearance. #' The error model is consdiered as \eqn{e(t) \sim N(0, h(t))}, with: #' \deqn{h(t) = 0.03 + 0.165 \frac{C(t)^{-1}}{C(1.5)^{-1} + C(t)^{-1}}} #' @seealso Beal S. L., Ways to fit a PK model with some data below #' the quantification limit, Journal of Pharmacokinetics #' and Pharmacodynamics, 2001;28(\strong{5}):481–504. #' @examples #' set.seed(111) #' simulateBealModelFixedEffects(10, 0.693, #' + 1, 1, seq(0.5,3,0.5)) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export simulateBealModelFixedEffects <- function(numSubjects, clearance, volumeOfDistribution,dose, timePoints){ simulateBealModelFixedEffects1Subject <- function(x, clearance, volumeOfDistribution,dose, timePoints){ PKmodel<-function(timePoints){ (dose/volumeOfDistribution)*exp(-clearance*timePoints) } errorVariance <- function(timePoints){ 0.03+0.165*((1/PKmodel(timePoints))/(1/(PKmodel(1.5))+(1/PKmodel(timePoints)))) } errorModel <- function(timePoints){ rnorm(1,0,(errorVariance(timePoints))^0.5) } computingBealModel <- function(timePoints){ PKmodel(timePoints)*exp(errorModel(timePoints)) } allTimePointsBealModel <- sapply(timePoints,computingBealModel) return(allTimePointsBealModel) } t(sapply(c(1:numSubjects), simulateBealModelFixedEffects1Subject, clearance, volumeOfDistribution, dose, timePoints)) }
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/simulateBealModelFixedEffects.R
################################################################# ### description: ### ### Simulating data from a Beal model with fixed and random effects. ### Author: Vahid Nassiri <[email protected]> ### maintainer: Vahid Nassiri <[email protected]> ################################################################# #' simulate data from Beal model with fixed and random effects #' #' function to generate data from a Beal model with fixed effects #' @encoding UTF-8 #' @param numSubjects scalar, number of subject which should be generated #' @param clearance scalar, clearance #' @param volumeOfDistribution scalar, volume of distribution #' @param dose scalar, dose #' @param varCompClearance scalar, standard error of #' the normal distribution generating clearance #' @param varCompVolumeOfDistribution scalar, standard error of #' the normal distribution generating volume of distribution #' @param timePoints vector of time points #' @return generated sample with numSubjects as the number of rows #' and length of timePoints as the number of columns #' @details The model used to generate data at time t is as follows #' \deqn{y(t)=C(t)\exp(e(t)),} #' where \eqn{C(t)}, the PK-model, is defined as follows: #' \deqn{C(t) = \frac{\mathrm{dose}}{V_d} \exp{(CL.t)},} #' with \eqn{V_d} the volume of distribution and \eqn{CL} as clearance. #' The error model is consdiered as \eqn{e(t) \sim N(0, h(t))}, with: #' \deqn{h(t) = 0.03 + 0.165 \frac{C(t)^{-1}}{C(1.5)^{-1} + C(t)^{-1}}.} #' For the mixed effects model, \eqn{CL=\widetilde{CL} \exp{(\eta_1)}}, and #' \eqn{V_d=\widetilde{V_d} \exp{(\eta_2)}}, where \eqn{\eta_1 \sim N(0, w_1^2)} and #' \eqn{\eta_1 \sim N(0, w_2^2)}. Note that \eqn{w_1} and \eqn{w_2} are specified by \emph{varCompClearance}, #' and \emph{varCompVolumeOfDistribution} in the arguments, respectively. #' @seealso Beal S. L., Ways to fit a PK model with some data below the quantification limit, Journal of Pharmacokinetics #' and Pharmacodynamics, 2001;28(\strong{5}):481–504. #' @examples #' set.seed(111) #' simulateBealModelMixedEffects(10, 0.693, #' + 1, 1, 0.2,0.2, seq(0.5,3,0.5)) #' @author Vahid Nassiri, Helen Yvette Barnett #' @export simulateBealModelMixedEffects <- function(numSubjects, clearance, volumeOfDistribution, dose, varCompClearance, varCompVolumeOfDistribution, timePoints){ simulateBealModelMixedEffects1Subject <- function(x,clearance, volumeOfDistribution,dose,varCompClearance, varCompVolumeOfDistribution, timePoints){ clearance <- clearance*exp(rnorm(1,0,varCompClearance)) volumeOfDistribution<-volumeOfDistribution*exp(rnorm(1,0, varCompVolumeOfDistribution)) PKmodel <- function(timePoints){ (dose/volumeOfDistribution)*exp(-clearance*timePoints) } errorVariance <- function(timePoints){ 0.03+0.165*( (1/PKmodel(timePoints))/(1/(PKmodel(1.5))+ (1/PKmodel(timePoints)))) } errorModel <- function(timePoints){ rnorm(1,0,(errorVariance(timePoints))^0.5) } computingBealModel <- function(timePoints){ PKmodel(timePoints)*exp(errorModel(timePoints)) } #browser() allTimePointsBealModel<-sapply(timePoints, computingBealModel) return(allTimePointsBealModel) } t(sapply(c(1:numSubjects), simulateBealModelMixedEffects1Subject, clearance, volumeOfDistribution, dose, varCompClearance, varCompVolumeOfDistribution, timePoints)) }
/scratch/gouwar.j/cran-all/cranData/BLOQ/R/simulateBealModelMixedEffects.R
#' @useDynLib BLPestimatoR #' @importFrom Rcpp sourceCpp NULL #' Prepares data and parameters related to the BLP algorithm for estimation. #' #' @param model the model to be estimated in R's formula syntax, #' @param market_identifier character specifying the market identifier (variable name must be included in \code{productData}), #' @param product_identifier character specifying the product identifier (variable name must be included in \code{productData}), #' @param par_delta optional: numeric vector with values for the mean utility (variable name must be included in \code{productData}), #' @param group_structure optional: character specifying a group structure for clustered standard erros (variable name must be included in \code{productData}), #' @param additional_variables optional: character vector specifying variables you want to keep for later analysis (variable names must be included in \code{productData}) #' @param productData data.frame with product characteristics, #' @param demographic_draws optional: list with demographic draws for each market to consider observed heterogeneity (see details), #' @param integration_accuracy integer specifying integration accuracy, #' @param integration_method character specifying integration method, #' @param integration_draws numeric matrix of manually provided integration draws (see details), #' @param integration_weights numeric vector of manually provided integration weights, #' @param integration_seed seed for the draws of Monte Carlo based integration, #' @param blp_inner_tol tolerance for the contraction mapping (default: 1e-9), #' @param blp_inner_maxit maximum iterations for the contraction mapping (default: 10000) #' #' @details For any form of user provided integration draws, i.e. \code{integration_draws} (unobserved heterogeneity) #' or \code{demographic_draws} (observed heterogeneity), list entries must be named and contain the variable \code{market_identifier} to allow market matching. #' Each line in these list entries contains the draws for one market. #' In case of unobserved heterogeneity, list names must match the random coefficients from the model formula. #' The \code{par_delta} argument provides the variable name for mean utilitys. For example, in the estimation algorithm these values are used as starting guesses in the contraction mapping. #' Another example is the evaluation of the GMM, which is also based on the provided mean utilitys. #' If you need to update \code{par_delta} or any other variable in the data object, use \code{update_BLP_data}. #' #' @return Returns an object of class \code{blp_data}. #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' @importFrom stats dnorm #' @importFrom stats pnorm #' @importFrom stats qnorm #' @importFrom stats rnorm #' @importFrom stats runif #' @importFrom stats pchisq #' @importFrom stats na.omit #' @importFrom stats optim #' @importFrom stats model.frame #' @importFrom stats model.matrix #' @importFrom stats model.response #' @importFrom stats na.fail #' @importFrom stats optim #' @importFrom Formula as.Formula #' @importFrom mvQuad createNIGrid #' @importFrom mvQuad rescale #' @importFrom mvQuad getWeights #' @importFrom mvQuad getNodes #' @importFrom numDeriv hessian #' @importFrom randtoolbox halton #' #' @export BLP_data <- function(model, market_identifier, product_identifier, par_delta, group_structure = NULL, additional_variables=NULL, productData , demographic_draws , integration_accuracy, integration_method, integration_draws, integration_weights, integration_seed, blp_inner_tol = 1e-9, blp_inner_maxit = 10000){ #### Formula extraction---- call_arguments <- match.call(expand.dots = TRUE) # capture the call used to create the model formula <- Formula::as.Formula(model) # length reports the number of parts on the LHS and RHS: model_rhs_length <- length(formula)[2] stopifnot(length(formula)[1] == 1L, model_rhs_length %in% 1:4) # NA Check tmp <- model.frame(formula, productData, na.action = na.fail) tmp <- NULL # shares f1 <- formula(formula, lhs = 1, rhs = 0) shares <- model.response( model.frame(f1, productData), type = "numeric" ) if( any( shares < 0 ) || any( shares > 1 ) ) stop( "Shares contain values out of [0,1]." ) nobs <- length(shares) # market and product identifyer if( (!is.character(market_identifier)) || (length(market_identifier)!=1) ) stop( "market_identifier is not valid." ) if( !market_identifier %in% names(productData) ) stop( "market_identifier is not available in the provided data." ) market_id_char_in <- vapply(market_identifier, function(x) as.character( get(x, productData) ) , character(nobs)) nmkt <- length( unique(market_id_char_in) ) market_id_numeric <- .indexing.markets(market_id_char_in) # order of numeric values depends on order of input market identifyer market_id_numeric_o <- order(market_id_numeric) #numbers in market_id_numeric correspond to order of markets in market_id_char_in: all(unique(market_id_char_in[market_id_numeric_o]) == unique(market_id_char_in)) if( !product_identifier %in% names(productData) ) stop( "market_identifier is not available in the provided data." ) if( (!is.character(product_identifier)) || (length(product_identifier)!=1) ) stop( "product_identifier is not valid." ) product_id_char_in <- vapply(product_identifier, function(x) as.character( get(x, productData) ) , character(nobs)) # uniqueness check tmp <- table(paste0(market_id_char_in,"_",product_id_char_in)) if( any(tmp>1) ) warning("Combination of market_identifier and product_identifier is not unique.") # BLP parameter check if( !missing(blp_inner_tol)){ if( (!is.finite(blp_inner_tol)) || (length(blp_inner_tol)!=1) ){ cat("Invalid blp_inner_tol. Set to default (1e-9).\n") blp_inner_tol<- 1e-9 } } if( !missing(blp_inner_maxit)){ if( (!is.finite(blp_inner_maxit)) || (length(blp_inner_maxit)!=1) ){ cat("Invalid blp_inner_maxit. Set to default (10000).\n") blp_inner_maxit<- 10000 } } # mean utility missing_delta <- missing(par_delta) if( !missing_delta){ if( (!is.character(par_delta)) || (length(par_delta)!=1) ) stop( "par_delta is not valid." ) par_delta_var_name <- par_delta par_delta <- get(par_delta, productData) delta_error <- !all(is.finite(exp(par_delta))) # checks NA's and NaN's and infinite values } else delta_error <- FALSE if( missing_delta || delta_error ){ par_delta_var_name <- "delta" par_delta <- rep(0, nobs) cat( "Mean utility (variable name: `delta`) is initialized with 0 because of missing or invalid par_delta argument.\n") } # linear data f2 <- formula(formula, lhs = 0, rhs = 1) X_lin <- model.matrix( f2 , productData) tmp <- apply(X_lin, 2, function(x) round(sum(abs(diff(x))), 3) == 0) if (sum(tmp) > 1) stop("Do not include a column of constants. Constants are used by default and can be omitted in the formula.") # exogenous data if( model_rhs_length >= 2){ f3 <- formula(formula, lhs = 0, rhs = 2) X_exg <- model.matrix( f3 , productData) tmp <- apply(X_exg, 2, function(x) round(sum(abs(diff(x))), 3) == 0) if (sum(tmp) > 1) stop("Do not include a column of constants. Constants are used by default and can be omitted in the formula.") } else X_exg <- NULL # random coef. data if( model_rhs_length >= 3){ f4 <- formula(formula, lhs = 0, rhs = 3) X_rand <- model.matrix( f4 , productData) K <- dim(X_rand)[2] tmp <- apply(X_rand, 2, function(x) round(sum(abs(diff(x))), 3) == 0) if (sum(tmp) > 1) stop("Do not include a column of constants. Constants are used by default and can be omitted in the formula.") } else X_rand <- NULL # IV's if( model_rhs_length >= 4 ){ f5 <- formula(formula, lhs = 0, rhs = 4) IV <- model.matrix( f5, productData ) tmp <- apply(IV, 2, function(x) round(sum(abs(diff(x))), 3) == 0) if (sum(tmp) > 1) stop("Do not include a column of constants. Constants are used by default and can be omitted in the formula.") } else IV <- NULL #### Data preparation---- ### integration I: normaly distributed RC has_own_int <- !missing(integration_weights) && !missing(integration_draws) has_int_method <- !missing(integration_accuracy) && !missing(integration_method) if( has_own_int == has_int_method ) stop("Provide either the name and accuracy of a valid integration method or your own weights and draws.") final_order_draws <- colnames(X_rand) if(has_own_int){ integration_method <- "provided_by_user" weights <- na.fail( as.matrix(as.numeric( integration_weights ), ncol =1) ) integration_list_names <- names(integration_draws) if(!( length(integration_draws) == K )) stop("Provided list of integration draws has not enough entries. Number of random coefficients determines length.") if( !setequal( integration_list_names , final_order_draws )) stop("Names of list entries for draws (unobs. heterogeneity) do not match with names of random coefficients. Remember to name any constant \"(Intercept)\" .\n") final_order <- match( final_order_draws, integration_list_names ) integration_draws <- integration_draws[ final_order ] # list is now in the order of X_rand draws_mktShape <- .draws_listToMatrix( drawList = integration_draws, amountDraws = length(weights), market_identifier_pd =get( market_identifier , productData), market_identifier_list_name = market_identifier, use = "rc") # the order of rows in draws_mktShape in determined by the order of markets in productData$market_id } if(has_int_method){ ## c) Generate nodes & weights by a specified accuracy and method: tmp<- get_integration_input(dim = K, method = integration_method, accuracy = integration_accuracy, nmkt = nmkt, seed = integration_seed) draws_mktShape <- tmp$nodesMktShape weights <- tmp$weights } stopifnot( all( dim(draws_mktShape) == c(nmkt, length(weights) * K))) # final check # integration II: demographic data (optional) if( !missing(demographic_draws) ){ stopifnot(is.list(demographic_draws)) demographic_names <- names(demographic_draws) M <- length( demographic_names ) dD <- .draws_listToMatrix( drawList = demographic_draws, amountDraws = length(weights), market_identifier_pd =get( market_identifier , productData), market_identifier_list_name = market_identifier, use = "demographics") stopifnot( all( dim(dD) == c(nmkt, length(weights) * M))) # final check } else { demographic_names <- NULL dD <- NULL M <- 0 } # reordering all data according to market identifier shares <- shares[ market_id_numeric_o ] X_rand <- X_rand[ market_id_numeric_o, ,drop=FALSE] X_lin <- X_lin[ market_id_numeric_o, ,drop=FALSE] X_exg <- X_exg[ market_id_numeric_o, ,drop=FALSE] IV <- IV[ market_id_numeric_o, ,drop=FALSE] if( is.null(dD) ) dD <- matrix(NA) # necessary for Rcpp input type if( is.null(group_structure)){ group_structure <- NULL } else{ if( (!is.character(group_structure)) || (length(group_structure)!=1) ) stop( "group_structure is not valid." ) group_structure <- as.character( get(group_structure, productData) ) group_structure <- group_structure[market_id_numeric_o] } market_id_numeric <- market_id_numeric[market_id_numeric_o] market_id_char_in <- market_id_char_in[market_id_numeric_o] product_id_char_in <- product_id_char_in[market_id_numeric_o] par_delta <- par_delta[ market_id_numeric_o ] #cdindex cdindex <- as.numeric( c(0, cumsum( table(market_id_numeric) )) ) ## exogenous (included and excluded) Z <- cbind(X_exg, IV) Z <- Z[, unique(colnames(Z))] #duplicate variables are suppressed ## additional_variables if( !is.null(additional_variables)){ additional_data <- data.frame("identifier" = paste0(market_id_char_in, product_id_char_in)) for( i in 1:length(additional_variables)){ vn_i <- additional_variables[i] if( !vn_i %in% names(productData) ) stop( paste0(vn_i ," is not available in the provided data." )) additional_data[[vn_i]] <- productData[[vn_i]][ market_id_numeric_o ] } }else{ additional_data <- NULL } ## Output integration<- list('drawsRcMktShape' = draws_mktShape, 'drawsDemMktShape' = dD, 'weights' = weights, 'method' = integration_method , 'amountDraws' = length(weights) ) parameters <- list( 'inner_tol' = blp_inner_tol, 'inner_maxit'= blp_inner_maxit, 'nobs' = nobs, 'cdindex' = cdindex, 'market_id' = market_id_numeric, 'nmkt' = nmkt, 'K' = K, 'total_demogr'= M, 'market_id_numeric_o' = market_id_numeric_o, 'demographic_names' = demographic_names, 'market_id_char_in' =market_id_char_in, 'market_id_varname' = market_identifier, 'product_id' = product_id_char_in, 'product_id_varname' = product_identifier, 'par_delta_varname' = par_delta_var_name, 'share_varname' = as.character(f1)[2]) data <- list('X_lin' = X_lin, 'X_exg' = X_exg, 'X_rand' = X_rand, 'shares' = shares, 'Z' = Z, 'group_structure'= group_structure, 'delta' = par_delta, 'additional_data' = additional_data) out <- list( call_arguments=call_arguments, integration = integration, parameters = parameters, data = data) class(out) <- "blp_data" return(out) } #' Updates the set of linear, exogenous, random coefficient, share or mean utility variable in the data object. #' #' @param data_update data.frame with variables to update (must contain the market_identifier and product_identifier variables as in \code{blp_data}), #' @param blp_data data object created by the function \code{BLP_data} #' #' @return Returns an object of class \code{blp_data}. #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' new_data <- data.frame(price = seq(1,10,length.out=500), #' x1 = seq(2,10,length.out=500), #' cdid = sort(rep(1:25,20)), #' prod_id = rep(1:20,25) ) #' blp_data_example_updated <-update_BLP_data(blp_data = blp_data, #' data_update = new_data) #' #' @importFrom stats dnorm #' @importFrom stats pnorm #' @importFrom stats qnorm #' @importFrom stats rnorm #' @importFrom stats runif #' @importFrom stats pchisq #' @importFrom stats na.omit #' @importFrom stats optim #' @importFrom stats model.frame #' @importFrom stats model.matrix #' @importFrom stats model.response #' @importFrom stats na.fail #' @importFrom stats optim #' @importFrom Formula as.Formula #' @importFrom mvQuad createNIGrid #' @importFrom mvQuad rescale #' @importFrom mvQuad getWeights #' @importFrom mvQuad getNodes #' @importFrom numDeriv hessian #' @importFrom randtoolbox halton #' #' @export update_BLP_data <- function(data_update, blp_data){ ## BLP_data class if( !is(blp_data,"blp_data")) stop("Input has wrong class. Call BLP_data() first.") if( !is(data_update,"data.frame")) stop("data_update must be a data.frame.") product_id_varname_old <- blp_data$parameters$product_id_varname market_id_varname_old <- blp_data$parameters$market_id_varname if( is.null(product_id_varname_old) ) stop("Matching of new data not possible, because product_identifier in blp_data is not available.") if( !product_id_varname_old %in% names(data_update) ) stop(paste0(product_id_varname_old, " is not available in data_update.")) if( !market_id_varname_old %in% names(data_update) ) stop(paste0(market_id_varname_old, " is not available in data_update.")) ## reorder new data according to "old" market_identifier and product_identifier in blp_data unique_obs_id_old <- paste0(blp_data$parameters$market_id_char_in,"_", blp_data$parameters$product_id) tmp <- table( unique_obs_id_old ) if( any(tmp>1) ) stop("Matching not possible. Combination of market_identifier and product_identifier in blp_data is not unique.") unique_obs_id_new <- paste0(as.character( data_update[[market_id_varname_old]] ),"_", as.character( data_update[[product_id_varname_old]] )) tmp <- table( unique_obs_id_new ) if( any(tmp>1) ) stop("Matching not possible. Combination of market_identifier and product_identifier in data_update is not unique.") neworder <- match( unique_obs_id_old, unique_obs_id_new ) if( any( is.na( neworder ) ) ) stop("Market/product combinations in new and old data are not matching.") data_update <- data_update[neworder, ] data_update[product_id_varname_old] <- NULL data_update[market_id_varname_old] <- NULL ## update all related data objects new_variables <- names(data_update) for(i in new_variables){ if(i %in% colnames(blp_data$data$X_lin)){ blp_data$data$X_lin[,i] <- data_update[[i]] cat( paste0("Linear variable ", i ," has been updated.\n")) } else if(i %in% colnames(blp_data$data$X_exg)){ blp_data$data$X_exg[,i] <- blp_data$data$Z[,i] <- data_update[[i]] cat( paste0("Exogenous variable ", i ," has been updated.\n")) } else if((i %in% colnames(blp_data$data$Z)) && !(i %in% colnames(blp_data$data$X_exg)) ){ blp_data$data$Z[,i] <- data_update[[i]] cat( paste0("Instrument variable ", i ," has been updated.\n")) } else if(i %in% colnames(blp_data$data$X_rand)){ blp_data$data$X_rand[,i] <- data_update[[i]] cat( paste0("Random coefficient variable ", i ," has been updated.\n")) }else if(i == blp_data$parameters$share_varname){ blp_data$data$shares <- data_update[[i]] cat( paste0("Share variable ", i ," has been updated.\n")) }else if(i == blp_data$parameters$par_delta_varname) { blp_data$data$delta<- data_update[[i]] cat( paste0("Mean utility variable ", i ," has been updated.\n")) }else{ cat( paste0("No updates performed!\n")) } } return( blp_data ) }
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/BLP_data.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 getSij <- function(expmu, expdelta, cdindex) { .Call('_BLPestimatoR_getSij', PACKAGE = 'BLPestimatoR', expmu, expdelta, cdindex) } getSjtMod <- function(expmu, expdelta, nprodt, startpos, weights) { .Call('_BLPestimatoR_getSjtMod', PACKAGE = 'BLPestimatoR', expmu, expdelta, nprodt, startpos, weights) } getExpMu <- function(theta2Matrix, qv, Xrandom, cdid, demographics) { .Call('_BLPestimatoR_getExpMu', PACKAGE = 'BLPestimatoR', theta2Matrix, qv, Xrandom, cdid, demographics) } getDelta <- function(theta2, deltaOld, cdid, cdindex, Xrandom, obsshare, innerCrit, innerMaxit, printLevel, indices, nodesRcMktShape, nodesDemMktShape, weights) { .Call('_BLPestimatoR_getDelta', PACKAGE = 'BLPestimatoR', theta2, deltaOld, cdid, cdindex, Xrandom, obsshare, innerCrit, innerMaxit, printLevel, indices, nodesRcMktShape, nodesDemMktShape, weights) } dstddelta_c <- function(sijt, weights) { .Call('_BLPestimatoR_dstddelta_c', PACKAGE = 'BLPestimatoR', sijt, weights) } dstdtheta_c <- function(sijt_arma, indices, xt_arma, qvt_arma, dt_arma, weights_arma) { .Call('_BLPestimatoR_dstdtheta_c', PACKAGE = 'BLPestimatoR', sijt_arma, indices, xt_arma, qvt_arma, dt_arma, weights_arma) } jacob_c <- function(sij, indices, blp_data, blp_parameters, blp_integration, printLevel) { .Call('_BLPestimatoR_jacob_c', PACKAGE = 'BLPestimatoR', sij, indices, blp_data, blp_parameters, blp_integration, printLevel) }
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/RcppExports.R
#' Ownership matrix in BLP's car example. #' #' @format Dummy variables. #' \describe{ #' \item{column i}{1, if product in row j is produced by firm i, 0 otherwise} #' } #' @source \url{https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/26803/SOF9FW&version=1.0} "dummies_cars" #' Product data of BLP's car example. #' #' @format A data frame with product data of 2217 cars in 20 markets. #' \describe{ #' \item{share}{car market share,} #' \item{price}{car price,} #' \item{hpwt}{horsepower-weight ratio,} #' \item{air}{1, if car has air conditioning, 0 otherwise,} #' \item{mpg}{market identifier,} #' \item{space}{length times width of the car,} #' \item{const}{constant,} #' \item{id}{uniquely identifies a car,} #' \item{cdid}{uniquely identifies the market of a product,} #' \item{firmid}{uniquely identifies the firm of a product (corresponds to column number in the ownership matrix).} #' } #' @source \url{https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/26803/SOF9FW&version=1.0} "productData_cars"
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/car.R
#' Draws for observed heterogeneity in Nevo's cereal example. #' #' @format Draws for observed heterogeneity for each demographic. #' \describe{ #' \item{cdid}{market identifier,} #' \item{draws_}{20 draws differing across markets.} #' } #' @source \url{https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/26803/SOF9FW&version=1.0} "demographicData_cereal" #' Draws for unobserved heterogeneity in Nevo's cereal example. #' #' @format Each list entry contains draws (unobserved heterogeneity) for a random coefficient. #' \describe{ #' \item{cdid}{market identifier,} #' \item{draws_}{20 draws differing across markets.} #' } #' @source \url{https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/26803/SOF9FW&version=1.0} "originalDraws_cereal" #' Product data of Nevo's cereal example. #' #' @format A data frame with product data of 24 cereals in each of 94 markets. #' \describe{ #' \item{share}{cereals market share,} #' \item{price}{cereals price,} #' \item{const}{constant,} #' \item{sugar}{cereals sugar,} #' \item{mushy}{cereals mushy,} #' \item{cdid}{market identifier,} #' \item{product_id}{uniquely identifies a product in a market,} #' \item{productdummy}{uniquely identifies a product in a market,} #' \item{IV1}{1. instrument,} #' \item{IV2}{2. instrument,} #' \item{IV3}{3. instrument,} #' \item{IV4}{4. instrument,} #' \item{IV5}{5. instrument,} #' \item{IV6}{6. instrument,} #' \item{IV7}{7. instrument,} #' \item{IV8}{8. instrument,} #' \item{IV9}{9. instrument,} #' \item{IV10}{10. instrument,} #' \item{IV11}{11. instrument,} #' \item{IV12}{12. instrument,} #' \item{IV13}{13. instrument,} #' \item{IV14}{14. instrument,} #' \item{IV15}{15. instrument,} #' \item{IV16}{16. instrument,} #' \item{IV17}{17. instrument,} #' \item{IV18}{18. instrument,} #' \item{IV19}{19. instrument,} #' \item{IV20}{20. instrument} #' } #' @source \url{https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/26803/SOF9FW&version=1.0} "productData_cereal" #' Parameter starting guesses for Nevo's cereal example. #' #' @format A matrix with 4 random coefficients (rows) and columns for 4 demographics and one unobserved heterogeneity column (5 cols in total). #' @source \url{https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/26803/SOF9FW&version=1.0} "theta_guesses_cereal" #' Mean utility starting guesses for Nevo's cereal example. #' #' @format A numeric vector of 2256 values. #' @source \url{https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/26803/SOF9FW&version=1.0} "w_guesses_cereal"
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/cereal.R
#' @useDynLib BLPestimatoR #' @importFrom Rcpp sourceCpp NULL #' Performs a BLP demand estimation. #' #' @param blp_data data object created by the function \code{BLP_data}, #' @param par_theta2 matrix with column and rownames providing a starting value for the optimization routine (see details), #' @param solver_method character specifying the solver method in \code{optim} (further arguments can be passed to \code{optim} by ...) #' @param solver_maxit integer specifying maximum iterations for the optimization routine (default=10000), #' @param solver_reltol integer specifying tolerance for the optimization routine (default= 1e-6), #' @param standardError character specifying assumptions about the GMM residual (homoskedastic , heteroskedastic (default), or cluster) #' @param extremumCheck if \code{TRUE}, second derivatives are checked for the existence of minimum at the point estimate (default = FALSE), #' @param printLevel level of output information ranges from 0 (no GMM results) to 4 (every norm in the contraction mapping) #' @param ... additional arguments for \code{optim} #' #' @return Returns an object of class "blp_est". This object contains, among others, all estimates for preference parameters and standard errors. #' #' @details NA's in \code{par_theta2} entries indicate the exclusion from estimation, i.e. the coefficient is assumed to be zero. #' If only unobserved heterogeneity is used (no demographics), the column name of \code{par_theta2} must be "unobs_sd". #' With demographics the colnames must match the names of provided demographics (as in \code{demographic_draws}) and "unobs_sd". #' Row names of \code{par_theta2} must match random coefficients as specified in \code{model}. Constants must be named "(Intercept)". #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' theta_guesses <- matrix(c(0.5,2), nrow=2) #' rownames(theta_guesses) <- c("x1","x2") #' colnames(theta_guesses) <- "unobs_sd" #' #' blp_est <- estimateBLP(blp_data =blp_data, #' par_theta2 = theta_guesses, #' extremumCheck = FALSE , #' printLevel = 1 ) #' summary(blp_est) #' #' @importFrom stats dnorm #' @importFrom stats pnorm #' @importFrom stats qnorm #' @importFrom stats rnorm #' @importFrom stats runif #' @importFrom stats pchisq #' @importFrom stats na.omit #' @importFrom stats optim #' @importFrom stats model.frame #' @importFrom stats model.matrix #' @importFrom stats model.response #' @importFrom stats na.fail #' @importFrom stats optim #' @importFrom Formula as.Formula #' @importFrom mvQuad createNIGrid #' @importFrom mvQuad rescale #' @importFrom mvQuad getWeights #' @importFrom mvQuad getNodes #' @importFrom numDeriv hessian #' @importFrom randtoolbox halton #' @importFrom Matrix Diagonal #' @importFrom methods is #' #' @export estimateBLP <- function( blp_data, par_theta2, solver_method = "BFGS", solver_maxit = 10000, solver_reltol = 1e-6, standardError = "heteroskedastic", extremumCheck = FALSE, printLevel = 2, ... ) { call_arguments <- match.call(expand.dots = TRUE) # capture the call used to create the model nobs <- blp_data$parameters$nobs K <- blp_data$parameters$K ## BLP_data class if( !is(blp_data,"blp_data")) stop("Input has wrong class. Call BLP_data() first.") ## calc matrices Z <- blp_data$data$Z W <- try( solve((t(Z) %*% Z)) ) if (any(class(W) == "try-error")) stop("Problems with singular matrizes. This might be caused by (nearly) linear dependent regressors or weak instruments.") xzwz <- t(blp_data$data$X_lin) %*% Z %*% W %*% t(Z) xzwzx <- xzwz %*% blp_data$data$X_lin invxzwzx <- try( solve(xzwzx) ) if (any(class(invxzwzx) == "try-error")) stop("Problems with singular matrices. This might be caused by (nearly) linear dependent regressors or weak instruments.") blp_data$data$W <- W blp_data$data$xzwz <- xzwz blp_data$data$invxzwzx <- invxzwzx ## check and prepare par_theta2 start_theta2 <- .prepare_theta2(par_theta2, final_col_names_par = c( "unobs_sd" , blp_data$parameters$demographic_names), final_row_names_par = colnames(blp_data$data$X_rand), K = blp_data$parameters$K, M = blp_data$parameters$total_demogr) cat("blp_data were prepared with the following arguments:\n") print(blp_data$call_arguments) ## Initialising and optimisation if (printLevel > 0) { cat("Starting a BLP demand estimation with ", blp_data$parameters$nobs, " observations in ", blp_data$parameters$nmkt, " markets...\n") cat("[integration::method", blp_data$integration$integration_method, " integration::amountDraws", blp_data$integration$amountDraws, "]\n") cat("[blp::inner_tol", blp_data$parameters$inner_tol, " blp::inner_maxit", blp_data$parameters$inner_maxit, "]\n") cat("[solver::method", solver_method, " solver::maxit", solver_maxit," solver::reltol", solver_reltol, "]\n") } # making use of global variables (environments), because optim allows just a scalar as output of gmm_obj: blp_results <- new.env( parent = emptyenv()) blp_results$deltaOld <- blp_data$data$delta blp_results$innerItAll <- c() blp_results$negShares<- FALSE blp_results$gradient <- rep(NA_real_, start_theta2$total_par ) ## Estimation ---- start_time <- Sys.time() # optimization res <- optim(par = start_theta2$par_theta2, fn = gmm_obj, gr = gmm_gr, method = solver_method, control = list( reltol = solver_reltol, maxit = solver_maxit ), indices=start_theta2$indices, blp_results=blp_results, blp_data=blp_data, printLevel=printLevel, ... ) solverMessage<- if( res$convergence==0 ) "Successful convergence" else paste("See error code (optim package)", res$convergence ) outer_it_out <- res$counts[1] end_time <- Sys.time() time <- end_time - start_time cat("------------------------------------------ \n") cat(paste("Solver message:", solverMessage ,"\n") ) if( !( solverMessage=="Successful convergence" ) ) stop( "Cannot compute post estimation results due to failed minimization routine." ) cat("------------------------------------------ \n") cat("Final GMM evaluation at optimal parameters: \n") # the next call ensures that values that are written to environments # and are used for post estimation analysis are based on the optimal # set of parameters and not just the last step of the solver: innerItAll_out <- blp_results$innerItAll blp_results$deltaOld <- rep(0,nobs) #reset environment for final evaluation finalTmp <- gmm_obj(par_theta2 = res$par,#### indices=start_theta2$indices, blp_results=blp_results, blp_data=blp_data, printLevel=3) delta_out<- blp_results$deltaOld theta_rc_out <- res$par theta_lin_out <- blp_results$bet sij_out <- blp_results$sij local_min_out <- finalTmp gradient_out <- blp_results$gradient jacob_out <- blp_results$jacobian xi_out <- blp_results$xi # naming of rc names_rc <- kronecker( start_theta2$final_col_names_par , start_theta2$final_row_names_par, paste, sep="*") relevantRcDem_index <- start_theta2$indices[,"row"] + max( start_theta2$indices[,"row"] ) * ( start_theta2$indices[,"col"] - 1 ) names(theta_rc_out) <- names_rc[relevantRcDem_index] ### Post estimation---- ## standard errors X_lin <- blp_data$data$X_lin Z <- blp_data$data$Z W <- blp_data$data$W a <- t(cbind(X_lin, jacob_out)) %*% Z tmpSE <- try( solve(a %*% W %*% t(a)) ) lin_len <- dim(X_lin)[2] valid_SE <- (standardError %in% c("heteroskedastic","homoskedastic","cluster")) && (length(standardError)==1) if(!valid_SE){ message("Invalid standard error option is provided. Switching to heteroskedastic standard errors...") standardError <- "heteroskedastic" } if (any(class(tmpSE) == "try-error")) stop("Standard errors cannot be computed due to singular matrices.") if( standardError == "heteroskedastic") { # default cat("Using the heteroskedastic asymptotic variance-covariance matrix... \n") #omega <- diag( diag( xi_out %*% t(xi_out) ) ) #b<- t(Z) %*% omega %*% Z omega <- xi_out^2 b<- as.matrix(t(Z) %*% Diagonal(length(omega),omega) %*% Z) COV <- tmpSE %*% a %*% W %*% b %*% W %*% t(a) %*% tmpSE } if( standardError == "homoskedastic") { cat("Using the homoskedastic asymptotic variance-covariance matrix... \n") COV <- c( (t(xi_out) %*% xi_out)/nobs ) * tmpSE } if( standardError == "cluster") { group_structure <- blp_data$data$group_structure if( any(is.na(group_structure)) || is.null(group_structure) ) stop("Valid group_structure is not availalbe in blp_data. Clustered standard errors require a variable that describes the cluster affiliation.") group_structure <- data.frame(group_structure=group_structure) group_matrix <- model.matrix( as.Formula("~0+group_structure"), group_structure ) tmp <- c(xi_out) * group_matrix omega <- tmp %*% t(tmp) b<- t(Z) %*% omega %*% Z COV <- tmpSE %*% a %*% W %*% b %*% W %*% t(a) %*% tmpSE } seLinear_out <- sqrt(diag(COV))[1: lin_len ] seRc_out <- sqrt(diag(COV))[-(1:lin_len )] ## Waldstatistic WaldStatistic<- t(matrix( theta_rc_out )) %*% solve(COV[-(1:lin_len), -(1:lin_len) ]) %*% matrix( theta_rc_out ) ## extremum Check if( extremumCheck ) { hessian <- invisible(hessian(func = gmm_obj, x = res$par, indices=start_theta2$indices, blp_results=blp_results, blp_data=blp_data, printLevel = 0)) hessianEig <- eigen(hessian)$values isMin_out <- sum(hessianEig > 0) == start_theta2$total_par isMin_out <- if(isMin_out) 'positive' else 'negative' cat( paste( "Extremum Check:" , isMin_out)) } else { isMin_out <- NA } output<- list("theta_rc" = theta_rc_out, # solver results... "theta_lin" = theta_lin_out, "se_rc" = seRc_out, "se_linear" = seLinear_out, "local_min" = local_min_out, "gradient" = gradient_out, "time" = time, "outer_it" = outer_it_out, "inner_it" = innerItAll_out, "delta" = delta_out, "xi" = xi_out, "#demogrCoef"= blp_data$parameters$total_demogr, "#nmkt" = blp_data$parameters$nmkt, "#nobs" = nobs, "#exoCoef" = length(colnames(blp_data$data$X_exg)), "indices" = start_theta2$indices, "rand_coef_rownames" = start_theta2$final_row_names_par, "rand_coef_colnames" = start_theta2$final_col_names_par, "drawsRcMktShape" = blp_data$integration$draws_mktShape, "drawsDemMktShape" = blp_data$integration$dD, "weights" = blp_data$integration$weights, "sij" = sij_out, "WaldStatistic" = WaldStatistic, # Postestimation... "IslocalMin" = isMin_out, "outerCrit" = solver_reltol, "innerCrit" = blp_data$parameters$inner_tol, "intMethod" = blp_data$integration$method, "intdraws" = length(blp_data$integration$weights), "standardErrorMethod" = standardError, "call" = call_arguments ) class(output) <- 'blp_est' return( output ) }
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/estimateBLP.R
#' @importFrom stats dnorm #' @importFrom stats pnorm #' @importFrom stats qnorm #' @importFrom stats rnorm #' @importFrom stats runif #' @importFrom stats pchisq #' @importFrom stats na.omit #' @importFrom methods is #' #' @export summary.blp_est <- function( object , ... ){ if( !is(object,"blp_est")) stop("Argument is not of class blp.") ans <- object est.linear <- ans$theta_lin se.linear <- ans$se_linear tval.linear <- est.linear/se.linear est.rc <- ans$theta_rc se.rc <- ans$se_rc tval.rc <- est.rc/se.rc ans$LinCoefficients <- as.data.frame( cbind( est.linear , se.linear , tval.linear , 2 * pnorm(abs(tval.linear ), lower.tail = FALSE)) ) ans$AmountLinCoef <- length(est.linear) ans$RcCoefficients <- as.data.frame( cbind(est.rc, se.rc, tval.rc, 2 * pnorm(abs(tval.rc), lower.tail = FALSE)) ) ans$amount_par <- length(est.rc) # Rownames mit Demographic / RC angabe names_par <- kronecker( ans$rand_coef_colnames , ans$rand_coef_rownames, paste, sep="*") relevantRcDem_index <- ans$indices[,"row"] + max( ans$indices[,"row"] ) * ( ans$indices[,"col"] - 1 ) names( ans$LinCoefficients ) <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)") names( ans$RcCoefficients ) <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)") rownames( ans$RcCoefficients ) <- names_par[ relevantRcDem_index ] # Waldstat ans$wald_pvalue <- pchisq( ans$WaldStatistic , df = ans$amount_par, lower.tail = FALSE ) class(ans) <- "blpSummary" return(ans) # printing ans } #' @export print.blpSummary <- function( x, ... ){ if( !is(x,"blpSummary")) stop("Argument is not of class *blpSummary* .") ## x$call_arguments ## cat("\nData information:\n") cat( "\n\t", paste0( x[['#nmkt']] , " market(s) with " , x[['#nobs']] , " products") , "\n" ) cat( "\t", paste0( x[['AmountLinCoef']] , " linear coefficient(s) (" , x[['#exoCoef']] , " exogenous coefficients)") , "\n" ) cat( "\t", paste0( x[['amount_par']] , " non-linear parameters related to random coefficients" ) , "\n" ) cat( "\t", paste0( x[['#demogrCoef']] , " demographic variable(s)" ) , "\n" ) ## cat("\nEstimation results:\n") cat("\n Linear Coefficients\n") if( x$AmountLinCoef <= 20){ print( x$LinCoefficients ) } else { print( x$LinCoefficients[1:20, ] ) cat( "\n...\n" ) cat( paste("\n", x$AmountLinCoef - 20, "estimates are omitted. They are available in the LinCoefficients generated by summary.\n" ) ) } cat("\n Random Coefficients\n") if( x$amount_par <= 20){ print( x$RcCoefficients ) } else { print( x$RcCoefficients[1:20, ] ) cat( "\n...\n" ) cat( paste("\n", x$amount_par - 20, "estimates are omitted. They are available in the x generated by summary.\n" ) ) } cat("\n Wald Test\n") cat( paste( round( x$WaldStatistic , 4 ), "on ", x$amount_par , "DF, p-value:" , x$wald_pvalue, "\n" )) ## cat("\nComputational Details: \n") cat( "\t", paste0("Solver converged with ", x$outer_it," iterations to a minimum at ", round( x$local_min, 4 )) ,".\n" ) cat( "\t", paste0("Local minima check: ", x$IslocalMin , "\n" )) cat( "\t", paste0("stopping criterion outer loop: " , x$outerCrit, "\n" )) cat( "\t", paste0("stopping criterion inner loop: " , x$innerCrit , "\n" )) cat( "\t", paste0("Market shares are integrated with " , x$intMethod , " and ", x$intdraws ," draws. \n" )) cat( "\t", paste0("Method for standard errors: " , x$standardErrorMethod , "\n" )) }
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/generics.R
#' @useDynLib BLPestimatoR #' @importFrom Rcpp sourceCpp NULL #' Calculates elasticities for a given variable and market. #' #' @param blp_data data object created by the function \code{BLP_data}, #' @param share_info object with individual and aggregated choice probabilities created by the function \code{getShareInfo}, #' @param theta_lin linear parameter of the variable for which elasticities are calculated for, #' @param variable character specifying a variable for which elasticities are calculated for, #' @param products optional: character vector of specific products, #' @param market character specifying the market in which elasticities are calculated #' @param printLevel level of output information (default = 1) #' #' @return Returns a matrix with elasticities. Value in row j and col i for a variable x, #' gives the effect of a change in product i's characteristic x on the share of product j. #' #' @importFrom stats dnorm #' @importFrom stats pnorm #' @importFrom stats qnorm #' @importFrom stats rnorm #' @importFrom stats runif #' @importFrom stats pchisq #' @importFrom stats na.omit #' @importFrom stats optim #' @importFrom stats model.frame #' @importFrom stats model.matrix #' @importFrom stats model.response #' @importFrom stats na.fail #' @importFrom stats optim #' @importFrom Formula as.Formula #' @importFrom mvQuad createNIGrid #' @importFrom mvQuad rescale #' @importFrom mvQuad getWeights #' @importFrom mvQuad getNodes #' @importFrom numDeriv hessian #' @importFrom randtoolbox halton #' #' #' @examples #' K<-2 #number of random coefficients #' data <- simulate_BLP_dataset(nmkt = 25, nbrn = 20, #' Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), #' Xexo = c("x1", "x2", "x3", "x4", "x5"), #' Xrandom = paste0("x",1:K),instruments = paste0("iv",1:10), #' true.parameters = list(Xlin.true.except.price = rep(0.2,5), #' Xlin.true.price = -0.2, #' Xrandom.true = rep(2,K), #' instrument.effects = rep(2,10), #' instrument.Xexo.effects = rep(1,5)), #' price.endogeneity = list( mean.xi = -2, #' mean.eita = 0, #' cov = cbind( c(1,0.7), c(0.7,1))), #' printlevel = 0, seed = 234234 ) #' #' #' model <- as.formula("shares ~ price + x1 + x2 + x3 + x4 + x5 | #' x1 + x2 + x3 + x4 + x5 | #' 0+ x1 + x2 | #' iv1 + iv2 + iv3 + iv4 + iv5 + iv6 + iv7 + iv8 +iv9 +iv10" ) #' #' blp_data <- BLP_data(model = model, market_identifier="cdid", #' product_id = "prod_id", #' productData = data, #' integration_method = "MLHS" , #' integration_accuracy = 40, #' integration_seed = 1) #' #' theta_guesses <- matrix(c(0.5,2), nrow=2) #' rownames(theta_guesses) <- c("x1","x2") #' colnames(theta_guesses) <- "unobs_sd" #' #' shareObj <- getShareInfo( blp_data=blp_data, #' par_theta2 = theta_guesses, #' printLevel = 1) #' #' #' get_elasticities(blp_data=blp_data, #' share_info = shareObj , #' theta_lin = 1, #' variable = "price", #' products = c("4","20"), #' market = 1) #' #' @importFrom methods is #' #' @export get_elasticities <- function( blp_data, share_info, theta_lin, variable, products , market, printLevel = 1){ if( !is(blp_data,"blp_data")) stop("blp_data has wrong class. Call BLP_data() first.") if( !is(share_info,"shareInfo")) stop("share_info has wrong class. Call getShareInfo() first.") if( missing(variable )) stop("Specify variable for which elasticities should be calculated for.") if( length(variable) != 1) stop("Only one variable can be specified.") if( missing(market )) stop("Specify mkt in which elasticities should be calculated.") if( length(market) != 1) stop("Only one market can be specified.") X_rand_colNames <- colnames(blp_data$data$X_rand) X_lin_colNames <- colnames(blp_data$data$X_lin) if ( !( variable %in% c(X_lin_colNames, X_rand_colNames ) ) ){ stop( paste( "Provided variable", variable , "is not in the set of variables. Constants must be named (Intercept).") ) } ## elasticity calculation---- original_market_id <- blp_data$parameters$market_id_char_in # market_id_char_in is in order market <- as.character(market) if( !any(market %in% original_market_id) ) stop("Market is not available in provided dataset.") relevant_Obs_Mkt <- which( market == original_market_id) relevant_Mkt <- which( market == unique(original_market_id)) nobs <- nrow( blp_data$data$X_lin ) amountDraws <- blp_data$integration$amountDraws K <- blp_data$parameters$K totalDemographics <- blp_data$parameters$total_demogr nprod_Mkt <- length( relevant_Obs_Mkt ) implShare_Mkt <- share_info$shares[ relevant_Obs_Mkt ] product_id_Mkt <- blp_data$parameters$product_id[ relevant_Obs_Mkt ] if( missing(products) ){ product_selector <- 1:nprod_Mkt }else{ product_selector <- products } if( any(!(product_selector %in% product_id_Mkt ))){ if(printLevel >0) cat("At least one specified product is not available in the market... selecting all.\n") product_selector <- 1:nprod_Mkt } if( length(theta_lin)!=1 || !is.numeric(theta_lin) || is.na( theta_lin ) ) stop( "Provide a valid linear parameter theta_lin. If the variable does not enter linerarly, provide zero.") # if changingVariable is not modelled as a random coefficient, i.e. use logit: if ( !( variable %in% colnames(blp_data$data$X_rand)) ) { changingVariable_Mkt <- blp_data$data$X_lin[ relevant_Obs_Mkt , variable, drop = F ] # Cols contain the variables that are changed by 1%, and rows contain effects on other products in the choice set. EtaMkt <- - matrix( changingVariable_Mkt * theta_lin * implShare_Mkt , nrow = nprod_Mkt, ncol = nprod_Mkt, byrow = TRUE) diag(EtaMkt) <- theta_lin * changingVariable_Mkt * (1 - implShare_Mkt) } else { # if changingVariable is modelled as a random coefficient: changingVariable_Mkt <- blp_data$data$X_rand[ relevant_Obs_Mkt , variable, drop = F ] drawsRCMktShape_Mkt <- blp_data$integration$drawsRcMktShape[ relevant_Mkt , , drop = FALSE] # only one line, bec. nodes are used for every product in one market drawsRC_tableform_Mkt <- matrix(drawsRCMktShape_Mkt, nrow = amountDraws, ncol = K ) colnames(drawsRC_tableform_Mkt) <- X_rand_colNames #X_lin_colNames, X_rand_colNames if ( totalDemographics > 0 ) { drawsDemMktShape_Mkt <- blp_data$integration$drawsDemMktShape[ relevant_Mkt, , drop = FALSE] demographicReshape_Mkt <- matrix( drawsDemMktShape_Mkt , ncol = totalDemographics , nrow = amountDraws ) } else { drawsDemMktShape_Mkt <- matrix( NA ) demographicReshape_Mkt <- matrix( NA ) } sij_Mkt <- share_info$sij[ relevant_Obs_Mkt , ,drop=FALSE] weights <- blp_data$integration$weights theta2Mat <- share_info$theta2 sigma <- theta2Mat[ variable , "unobs_sd" ] # unobsevered_part contains all individual specific effect parts due to unobs. herterog. unobserved_part <- theta_lin + sigma * drawsRC_tableform_Mkt[ , variable ] # get individual price effects if ( totalDemographics > 0 ) { # extract relevant demographic effects ( ie pick a line of theta2Mat) demographic_effects <- matrix( theta2Mat[ variable, -1], # first col is unobs_sd ncol = totalDemographics, nrow = amountDraws, byrow = TRUE ) # multiply every demographic coefficient with all demogr. draws: observed_part <- rowSums( demographic_effects * demographicReshape_Mkt ) } else { observed_part <- 0 } beta_i <- observed_part + unobserved_part Omega <- matrix( NA_real_ , nrow = nprod_Mkt, ncol = nprod_Mkt) scalar <- - matrix( 1/implShare_Mkt ) %*% matrix( changingVariable_Mkt , nrow = 1) diag( scalar ) <- - diag( scalar ) Omega[, ] <- ( t( beta_i )[ rep(1, nprod_Mkt) , ] * t( weights )[ rep( 1 , nprod_Mkt), ] * sij_Mkt ) %*% t( sij_Mkt ) diag( Omega ) <- c( ( t( beta_i )[rep( 1, nprod_Mkt ), ] * sij_Mkt * (1 - sij_Mkt) ) %*% weights ) EtaMkt <- Omega * scalar } # after if/esle: rownames( EtaMkt ) <- colnames( EtaMkt ) <- product_id_Mkt return( EtaMkt[product_selector,product_selector] ) } # get_elasticities <- function( blp_data, blp_estimation, variable, products , market){ # # if(class(blp_data) != "blp_data") # stop("blp_data has wrong class. Call BLP_data() first.") # # if(class(blp_estimation) != "blp_est") # stop("blp_estimation has wrong class. Call estimate_BLP() first.") # # if( missing(variable )) # stop("Specify variable for which elasticities should be calculated for.") # # if( length(variable) != 1) # stop("Only one variable can be specified.") # # if( missing(market )) # stop("Specify mkt in which elasticities should be calculated.") # # if( length(market) != 1) # stop("Only one market can be specified.") # # X_rand_colNames <- colnames(blp_data$data$X_rand) # X_lin_colNames <- colnames(blp_data$data$X_lin) # # if ( !( variable %in% c(X_lin_colNames, X_rand_colNames ) ) ){ # stop( paste( "Provided variable", variable , "is not in the set of variables. Constants must be named (Intercept).") ) # } # # ## elasticity calculation---- # # original_market_id <- blp_data$parameters$market_id_char_in # market_id_char_in is in order # market <- as.character(market) # # if( !any(market %in% original_market_id) ) # stop("Market is not available in provided dataset.") # # relevant_Obs_Mkt <- which( market == original_market_id) # relevant_Mkt <- which( market == unique(original_market_id)) # # nobs <- nrow( blp_data$data$X_lin ) # amountDraws <- blp_data$integration$amountDraws # K <- blp_data$parameters$K # totalDemographics <- blp_data$parameters$total_demogr # nprod_Mkt <- length( relevant_Obs_Mkt ) # obsShare_Mkt <- blp_data$data$shares[ relevant_Obs_Mkt ] # product_id_Mkt <- blp_data$parameters$product_id[ relevant_Obs_Mkt ] # delta_mkt <- blp_estimation$delta[ relevant_Obs_Mkt ] # # if( missing(products) ){ # product_selector <- 1:nprod_Mkt # }else{ # product_selector <- products # } # # if( any(!(product_selector %in% product_id_Mkt ))){ # cat("At least one specified product is not available in the market... selecting all.\n") # product_selector <- 1:nprod_Mkt # } # # # betabar <- blp_estimation$theta_lin[ variable, ] # if( is.na( betabar ) ){ # # in case that the variable does not enter linerarly # betabar <- 0 # } # # # if changingVariable is not modelled as a random coefficient, ie use logit: # if ( !( variable %in% colnames(blp_data$data$X_rand)) ) { # # changingVariable_Mkt <- blp_data$data$X_lin[ relevant_Obs_Mkt , variable, drop = F ] # # # Cols contain the variables that are changed by 1%, and rows contain effects on other products in the choice set. # EtaMkt <- - matrix( changingVariable_Mkt * betabar * obsShare_Mkt , # nrow = nprod_Mkt, # ncol = nprod_Mkt, byrow = TRUE) # # diag(EtaMkt) <- betabar * changingVariable_Mkt * (1 - obsShare_Mkt) # # } else { # # if changingVariable is modelled as a random coefficient: # changingVariable_Mkt <- blp_data$data$X_rand[ relevant_Obs_Mkt , variable, drop = F ] # # drawsRCMktShape_Mkt <- blp_data$integration$drawsRcMktShape[ relevant_Mkt , , drop = FALSE] # only one line, bec. nodes are used for every product in one market # drawsRC_tableform_Mkt <- matrix(drawsRCMktShape_Mkt, # nrow = amountDraws, # ncol = K ) # colnames(drawsRC_tableform_Mkt) <- X_rand_colNames #X_lin_colNames, X_rand_colNames # # if ( totalDemographics > 0 ) { # drawsDemMktShape_Mkt <- blp_data$integration$drawsDemMktShape[ relevant_Mkt, , drop = FALSE] # demographicReshape_Mkt <- matrix( drawsDemMktShape_Mkt , # ncol = totalDemographics , # nrow = amountDraws ) # } else { # drawsDemMktShape_Mkt <- matrix( NA ) # demographicReshape_Mkt <- matrix( NA ) # } # # # sij_Mkt <- blp_estimation$sij[ relevant_Obs_Mkt , ] # weights <- blp_data$integration$weights # # theta2Mat <- .get.theta2.reshape(theta2.in = blp_estimation$theta_rc, # totalRC = K, # total.demogr.in = totalDemographics, # indices.in = blp_estimation$indices, # fill = 0) # NA are replaced by zeros to simplify x * par in getExpMu # rownames(theta2Mat) <- blp_estimation$rand_coef_rownames # colnames(theta2Mat) <- blp_estimation$rand_coef_colnames # # sigma <- theta2Mat[ variable , "unobs_sd" ] # # # unobsevered_part contains all individual specific effect parts due to unobs. herterog. # unobseved_part <- betabar + # sigma * drawsRC_tableform_Mkt[ , variable ] # get individual price effects # # if ( totalDemographics > 0 ) { # # extract relevant demographic effects ( ie pick a line of theta2Mat) # demographic_effects <- matrix( theta2Mat[ variable, -1], # first col is unobs_sd # ncol = totalDemographics, # nrow = amountDraws, # byrow = TRUE ) # # # multiply every demographic coefficient with all demogr. draws: # observed_part <- rowSums( demographic_effects * # demographicReshape_Mkt ) # # } else { # observed_part <- 0 # } # # beta_i <- observed_part + unobseved_part # Omega <- matrix( NA_real_ , # nrow = nprod_Mkt, # ncol = nprod_Mkt) # scalar <- - matrix( 1/obsShare_Mkt ) %*% matrix( changingVariable_Mkt , nrow = 1) # diag( scalar ) <- - diag( scalar ) # Omega[, ] <- ( t( beta_i )[ rep(1, nprod_Mkt) , ] * # t( weights )[ rep( 1 , nprod_Mkt), ] * sij_Mkt ) %*% t( sij_Mkt ) # diag( Omega ) <- c( # ( t( beta_i )[rep( 1, nprod_Mkt ), ] * sij_Mkt * (1 - sij_Mkt) ) # %*% weights # ) # EtaMkt <- Omega * scalar # # } # # # after if/esle: # rownames( EtaMkt ) <- colnames( EtaMkt ) <- product_id_Mkt # # return( EtaMkt[product_selector,product_selector] ) # # } # # # #
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/get_elasticities.R
#' @useDynLib BLPestimatoR #' @importFrom Rcpp sourceCpp NULL #' @importFrom stats dnorm #' @importFrom stats pnorm #' @importFrom stats qnorm #' @importFrom stats rnorm #' @importFrom stats runif #' @importFrom stats pchisq #' @importFrom stats na.omit #' @importFrom stats optim #' @importFrom stats model.frame #' @importFrom stats model.matrix #' @importFrom stats model.response #' @importFrom stats na.fail #' @importFrom stats optim #' @importFrom Formula as.Formula #' @importFrom mvQuad createNIGrid #' @importFrom mvQuad rescale #' @importFrom mvQuad getWeights #' @importFrom mvQuad getNodes #' @importFrom numDeriv hessian #' @importFrom randtoolbox halton #' @importFrom methods is #' gmm_obj <- function(par_theta2 , indices, blp_results, blp_data, printLevel){ if( !is(blp_data,"blp_data")) stop("Input has wrong class. Call BLP_data() first.") deltaOld <- blp_results$deltaOld # delta vector from previous run (without the blp.results environment) theta2Mat<- .get.theta2.reshape(theta2.in = par_theta2, totalRC = blp_data$parameters$K, total.demogr.in = blp_data$parameters$total_demogr, indices.in = indices, fill = 0 ) # NA are replaced by zeros to simplify x * par in getExpMu #Call C++ function: tmp <- getDelta( theta2 = theta2Mat, cdid = blp_data$parameters$market_id, cdindex = blp_data$parameters$cdindex, innerCrit = blp_data$parameters$inner_tol, indices = indices, innerMaxit= blp_data$parameters$inner_maxit, Xrandom = blp_data$data$X_rand, obsshare = blp_data$data$shares, deltaOld = blp_results$deltaOld, nodesDemMktShape = blp_data$integration$drawsDemMktShape , nodesRcMktShape = blp_data$integration$drawsRcMktShape, weights = blp_data$integration$weights, printLevel = printLevel) delta <- tmp$delta counter <- tmp$counter bet <- NA xi <- NA sij<- NA jacobian <- NA gradient <- NA delta_has_na <- any(is.na(delta)) if (delta_has_na) { gradient <- rep(Inf, length(par_theta2)) f_out <- Inf } else { #save delta as start solution for the next run, #only if contraction mapping converged in the previous step: blp_results$deltaOld <- delta ## Objective bet <- blp_data$data$invxzwzx %*% (blp_data$data$xzwz %*% delta) xi <- delta - blp_data$data$X_lin %*% bet tmp2 <- t(xi) %*% blp_data$data$Z f_out <- c(tmp2 %*% blp_data$data$W %*% t(tmp2)) ## Gradient sij <- matrix(NA_real_, nrow = blp_data$parameters$nobs , ncol = blp_data$integration$amountDraws ) sij[ , ] <- tmp$sij jacobian <- jacob_c(sij = sij, indices = indices, blp_data = blp_data$data, blp_parameters = blp_data$parameters, blp_integration = blp_data$integration, printLevel = printLevel) if (any(is.na(jacobian))) { if (printLevel > 0) { cat("\t gradient contains Na's --> objective value and gradient replaced by Inf \n")} gradient <- rep(Inf, length(par_theta2)) f_out <- Inf }else{ gradient <- 2 * t(jacobian) %*% blp_data$data$Z %*% blp_data$data$W %*% t(blp_data$data$Z) %*% xi } #is.na(jacobian) } #end is.na(delta) if (printLevel >= 1) { cat("gmm objective:", round(f_out, 4)) if ( delta_has_na ) cat(" [delta contains NaN's] ") cat("\n") } if (printLevel >= 2) { cat("\t theta (RC): ") cat( round(theta2Mat[ ,1] , 2), "\n") if( length(par_theta2) >0 ){ cat("\t theta (demogr.): ") cat( round(c(theta2Mat[ ,-1]) , 2), "\n") } cat("\t inner iterations: ") cat( counter, "\n") cat("\t gradient: " ) cat( round(gradient,4), "\n") } # save to environment (not provided as return object, because optim only accepts a single number as output) blp_results$bet <- bet blp_results$gradient <- gradient blp_results$jacobian <- jacobian blp_results$xi <- xi blp_results$innerItAll <- c(blp_results$innerItAll, tmp$counter) blp_results$sij <- sij if(tmp$negShares==TRUE) blp_results$negShares <- TRUE return(f_out) } gmm_gr <- function(par_theta2 , indices, blp_results, blp_data, printLevel) { return(blp_results$gradient) }
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/gmm.R
#' @importFrom stats dnorm #' @importFrom stats pnorm #' @importFrom stats qnorm #' @importFrom stats rnorm #' @importFrom stats runif #' @importFrom stats pchisq #' @importFrom stats na.omit #' @importFrom stats optim #' @importFrom stats model.frame #' @importFrom stats model.matrix #' @importFrom stats model.response #' @importFrom stats na.fail #' @importFrom stats optim #' @importFrom Formula as.Formula #' @importFrom mvQuad createNIGrid #' @importFrom mvQuad rescale #' @importFrom mvQuad getWeights #' @importFrom mvQuad getNodes #' @importFrom numDeriv hessian #' @importFrom randtoolbox halton .get.theta2.reshape <- function(theta2.in, totalRC, total.demogr.in, indices.in, fill, printLevel) { theta2.matrix.out <- matrix(fill, nrow = totalRC, ncol = total.demogr.in + 1) for (i in 1:length(theta2.in)) { theta2.matrix.out[indices.in[i, 1], indices.in[i, 2]] <- theta2.in[i] } return(theta2.matrix.out) } .MLHS <- function(D, N) { draws <- numeric(N) shuffleddraws <- matrix(NA, nrow=N, ncol=D) for (i in 1:D) { draws <- ((1:N)-1)/N + runif(1)/N; shuffle = sample(N) shuffleddraws[ , i] <- draws[shuffle] } return( shuffleddraws ) } .Halton<-function(D, N , randomized){ if(randomized == F){ out<- halton( N , D )} else { out<- (halton(N, D ) + matrix( rep(runif(D,0,1), N ),nrow=N,byrow = T))%%1 } return(out) } get_integration_input <- function(dim, method, accuracy, nmkt, seed) { if (!missing(seed) && !is.na(seed) ) set.seed(seed) if (!(method %in% c("MLHS","Halton","randHalton","MC","sgGH","sgNH"))){ stop("Integration method not available! Choose *MLHS*, *Halton*, *randHalton*, *MC*, *sgGH*, *sgNH*.") } ## Calc. Nodes and Weights ---- output <- list() attributes(output) <- list(method = method, accuracy = accuracy) accuracy <- na.fail(accuracy) # MLHS if (method == "MLHS") { nodes <- replicate(nmkt, .MLHS(dim, accuracy)) nodes_allMkt <- qnorm(t(sapply(1:nmkt,function(x){ c(nodes[,,x]) } ))) weights <- matrix(1/accuracy, ncol = 1, nrow= accuracy) } # Halton if (method == "Halton") { nodes <- replicate(nmkt, .Halton(dim, accuracy, randomized = FALSE)) nodes_allMkt <- qnorm(t(sapply(1:nmkt,function(x){ c(nodes[,,x]) } ))) weights <- matrix(1/accuracy, ncol = 1, nrow= accuracy) } # randomized Halton if (method == "randHalton") { nodes <- replicate(nmkt, .Halton(dim, accuracy, randomized = TRUE)) nodes_allMkt <- qnorm(t(sapply(1:nmkt,function(x){ c(nodes[,,x]) } ))) weights <- matrix(1/accuracy, ncol = 1, nrow= accuracy) } # MC if (method == "MC") { nodes_allMkt <- matrix(rnorm(nmkt * accuracy * dim), nrow=nmkt) weights <- matrix(1/accuracy, ncol = 1, nrow= accuracy) } # Gauss Hermite if (method == "sgGH") { grid <- createNIGrid(dim, type = "GHN", level = accuracy, ndConstruction = "sparse") nodes <- c(getNodes(grid)) nodes_allMkt <- t(replicate(nmkt, nodes)) weights<-as.matrix(getWeights(grid), ncol = 1) } # Gauss Hermite nested if (method == "sgNH") { grid <- createNIGrid(dim, "nHN", accuracy, ndConstruction = "sparse") nodes <- c(getNodes(grid)) nodes_allMkt <- t(replicate(nmkt, nodes)) weights<-as.matrix(getWeights(grid), ncol = 1) } # Sparse Grids Trapezoidal if (method == "sgTr1") { grid <- createNIGrid(dim, 'cNC1', level = accuracy, ndConstruction = "sparse", level.trans = TRUE) rescale(grid,cbind(rep(-3.5,dim),rep(+3.5,dim))) nodes <- getNodes(grid) nodes_allMkt <- t(replicate(nmkt, c(nodes))) weights<-as.matrix(getWeights(grid), ncol = 1) tmp <- apply(nodes, MARGIN = 1, FUN = function(x){ prod(dnorm(x)) }) weights <- weights * tmp } if (method == "sgTr") { grid <- createNIGrid(dim, 'Trapez', level = accuracy, ndConstruction = "sparse", level.trans = TRUE) rescale(grid,cbind(rep(-3.5,dim),rep(+3.5,dim))) nodes <- getNodes(grid) nodes_allMkt <- t(replicate(nmkt, c(nodes))) weights<-as.matrix(getWeights(grid), ncol = 1) tmp <- apply(nodes, MARGIN = 1, FUN = function(x){ prod(dnorm(x)) }) weights <- weights * tmp } output$nodesMktShape <- nodes_allMkt output$weights <- weights return(output) } .draws_listToMatrix <- function( drawList, amountDraws, market_identifier_pd , market_identifier_list_name, use ){ list_names <- names( drawList ) # checks drawsInList<- unlist( lapply( drawList, function(i){ draws <- ncol(i) -1 # minus 1 because of market id col. return(draws) })) if( !all( drawsInList == amountDraws ) ){ cat( paste(use ,":") ) stop("Number of draws for at least one list entry is smaller as the provided integration accuracy. Include draws accordingly.") } # extract data tmp<- lapply( list_names, function(i){ drawset_i <- na.fail( as.data.frame( get( i , drawList ) ) ) marketid_list_i <- get( market_identifier_list_name, drawset_i) if( any( table(marketid_list_i) >1) ){ cat( paste(use ,":") ) stop("Draws in one list entry are not unique for at least one market.") } # reorder according to ordered markets pD_order <- match( unique(market_identifier_pd), marketid_list_i) if( any( is.na( pD_order ) ) ){ cat( paste(use ,":") ) stop("Draws in one list entry are not available for at least one market.") } drawset_i[[market_identifier_list_name]] <- NULL tmpList <- as.matrix( drawset_i[ pD_order , 1:amountDraws ]) # now, tmpDemogr.data has roworder as pD colnames(tmpList) <- paste0(use ,"_draw",1:amountDraws,"_", i ) if( ! all( is.finite( tmpList ) ) ){ cat( paste(use ,":") ) stop("Draws contain non-numeric values.") } return(tmpList) } ) # Demographic data is arranged in a matrix, with the draws of # different variables next to each other : D <- do.call(cbind,tmp) return(D) } .indexing.markets <- function( cdidOld ) { cdidOld <- as.character(cdidOld) unique.ids <- unique(cdidOld) nmkt <- length(unique.ids) cdid <- numeric(length(cdidOld)) for(i in 1:nmkt ){ relevantMarkets <- cdidOld == unique.ids[i] cdid[ relevantMarkets ] <- i } return( cdid ) } .prepare_theta2 <- function(par_theta2, final_col_names_par, final_row_names_par , K , M){ if( missing(par_theta2) ){ par_theta2 <- matrix( 0, nrow = K, ncol = 1 + M ) } else{ if( !setequal( colnames(par_theta2) , final_col_names_par )) stop("Colnames of par_theta2 do not match with names of obs. and unobs. heterogeneity. Remember to name column of unobs. heterogeneity \"unobs_sd\" .\n") if( !setequal( rownames(par_theta2) , final_row_names_par )) stop("Rownames of par_theta2 do not match with random coefficients. Remember to name any constant \"(Intercept)\" .\n") #reorder accoring to demographic_names and random coefficients from formula col_order <- match( final_col_names_par, colnames(par_theta2) ) row_order <- match( final_row_names_par, rownames(par_theta2) ) par_theta2 <- par_theta2[ row_order , col_order ,drop=FALSE] } rownames(par_theta2) <- final_row_names_par colnames(par_theta2) <- final_col_names_par # indices for saving col and row structure indices <- which( !is.na( par_theta2 ), arr.ind = TRUE ) # NA's are excluded from the estimation algorithm par_theta2 <- na.omit( c( par_theta2) ) total_par <- length(par_theta2) if( any (!is.finite( par_theta2 ) )) stop( "Provide finite starting guesses for par_theta2.") out <- list(par_theta2=par_theta2, indices=indices, total_par =total_par, final_row_names_par= final_row_names_par, final_col_names_par=final_col_names_par ) return( out ) }
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/helperFunctions.R
#' @useDynLib BLPestimatoR #' @importFrom Rcpp sourceCpp NULL #' This function creates a simulated BLP dataset. #' @useDynLib BLPestimatoR #' @param nmkt number of markets #' @param nbrn number of products #' @param Xlin character vector specifying the set of linear variables #' @param Xexo character vector specifying the set of exogenous variables (subset of \code{Xlin}) #' @param Xrandom character vector specifying the set of random coefficients (subset of \code{Xlin}) #' @param instruments character vector specifying the set of instrumental variables #' #' @param true.parameters list with parameters of the DGP #' \describe{ #' \item{\code{Xlin.true.except.price}}{"true" linear coefficients in utility function except price} #' \item{\code{Xlin.true.price}}{"true" linear price coefficient in utility function} #' \item{\code{Xrandom.true}}{"true" set of random coefficients} #' \item{\code{instrument.effects}}{"true" coefficients of instrumental variables to explain endogenous price} #' \item{\code{instrument.Xexo.effects}}{"true" coefficients of exogenous variables to explain endogenous price} } #' #' @param price.endogeneity list with arguments of the multivariate normal #' distribution #' \describe{ #' \item{\code{mean.xi}}{controls for the mean of the error term in the utility function} #' \item{\code{mean.eita}}{controls for the mean of the error term in the price function} #' \item{\code{cov}}{controls for the covariance of \code{xi} and \code{eita} }} #' #' @param printlevel 0 (no output) 1 (summary of generated data) #' @param seed seed for the random number generator #' #' @return Returns a simulated BLP dataset. #' #' @details The dataset is balanced, so every market has the same amount of products. #' Only unobserved heterogeneity can be considered. #' Variables that enter the equation as a Random Coefficient or #' exogenously must be included in the set of linear variables. #' The \code{parameter.list} argument specifies the "true" effect on the #' individual utility for each component. Prices are generated endogenous #' as a function of exogenous variables and instruments, where the #' respective effect sizes are specified in \code{instrument.effects} #' and \code{instrument.Xexo.effects}. Error terms \code{xi} and \code{eita} #' are drawn from a multivariate normal distribution, whose #' parameters can be set in \code{price.endogeneity}. Market shares #' are generated by MLHS integration rule with 10000 nodes. #' #' #' @importFrom stats dnorm #' @importFrom stats pnorm #' @importFrom stats qnorm #' @importFrom stats rnorm #' @importFrom stats runif #' @importFrom stats pchisq #' @importFrom stats na.omit #' @importFrom stats optim #' @importFrom stats model.frame #' @importFrom stats model.matrix #' @importFrom stats model.response #' @importFrom stats na.fail #' @importFrom stats optim #' @importFrom Formula as.Formula #' @importFrom mvQuad createNIGrid #' @importFrom mvQuad rescale #' @importFrom mvQuad getWeights #' @importFrom mvQuad getNodes #' @importFrom numDeriv hessian #' @importFrom randtoolbox halton #' #' @examples #' K<-2 #number of random coefficients # example_data <- simulate_BLP_dataset(nmkt = 25, # nbrn = 20, # Xlin = c("price", "x1", "x2", "x3", "x4", "x5"), # Xexo = c("x1", "x2", "x3", "x4", "x5"), # Xrandom = paste0("x",1:K), # instruments = paste0("iv",1:10), # true.parameters = list(Xlin.true.except.price = rep(0.2,5), # Xlin.true.price = -0.2, # Xrandom.true = rep(2,K), # instrument.effects = rep(2,10), # instrument.Xexo.effects = rep(1,5)), # price.endogeneity = list( mean.xi = -2, # mean.eita = 0, # cov = cbind( c(1,0.7), c(0.7,1))), # printlevel = 0, seed = 234234 ) #' #' @export simulate_BLP_dataset <- function( nmkt, nbrn, Xlin, Xexo, Xrandom, instruments, true.parameters = list(), price.endogeneity = list( mean.xi = -2, mean.eita = 0, cov = cbind( c(1,0.7), c(0.7,1))), printlevel=1 , seed) { ## input checks ---- ### Existence check of necessary arguments # (collecting all arguments as a list, implicitly requires the arguments to be non-empty, # and lapply checks whether list arguments are non empty, i.e. are null) if( missing(nmkt) || missing(nbrn) || missing(Xlin) || missing(Xexo) || missing(Xrandom) || missing(instruments)){ stop("Include arguments nmkt, nbrn, Xlin, Xexo, Xrandom and instruments") } toBeTested<- list("true.parameters$Xlin.true.except.price" = true.parameters$Xlin.true.except.price, "true.parameters$Xlin.true.price" = true.parameters$Xlin.true.price, "true.parameters$Xrandom.true" = true.parameters$Xrandom.true, "true.parameters$instrument.effects" = true.parameters$instrument.effects, "true.parameters$instrument.Xexo.effects" = true.parameters$instrument.Xexo.effects, "price.endogeneity$mean.xi" = price.endogeneity$mean.xi, "price.endogeneity$mean.eita" = price.endogeneity$mean.eita, "price.endogeneity$cov" = price.endogeneity$cov) names_toBeTested <- names(toBeTested) lapply(names_toBeTested, function(i){ if(is.null(toBeTested[[i]])) { stop(paste("Argument",i,"is missing.")) } } ) ### Content check if (is.numeric(seed) & (length(seed)>0) ) { set.seed(seed) } else { seed <- NA } if (!("price" %in% Xlin)) stop("Linear parameters must include a variable named *price* . ") if (!all(Xexo %in% Xlin) || !all(Xrandom %in% Xlin)) stop("Linear parameters must include random coefficients and exogenous variables.") if (length(Xlin) != (length(true.parameters$Xlin.true.except.price) + 1)) stop("Number of linear parameters and true effects must match.") if (length(Xrandom) != length(true.parameters$Xrandom.true)) stop("Number of random coefficients and true effects must match.") if (length(instruments) != length(true.parameters$instrument.effects)) stop("Number of instruments and true effects must match.") ## intializing vectors ---- nobs <- nmkt * nbrn nlin <- length(Xlin) # number of lin. parameters ninst <- length(instruments) # number of instruments K <- length(true.parameters$Xrandom.true) cdid <- sort(rep(1:nmkt, nbrn)) prod_id <- rep(1:nbrn, nmkt) cdindex <- c(0, cumsum(table(cdid))) total.demographics <- 0 ## eita & xi ---- cov.xi.eita <- price.endogeneity$cov random.rv <- cbind(rnorm(nobs), rnorm(nobs)) choleski <- t(chol(cov.xi.eita)) xi.eita <- t(apply( random.rv , 1 ,function(x) choleski %*% x) ) xi <- xi.eita[, 1] + price.endogeneity$mean.xi eita <- xi.eita[, 2] + price.endogeneity$mean.eita ## generate data matrices ---- instruments.data <- vapply(instruments, function(x) runif(nobs, 0, 2), numeric(nobs)) #;% cost shifters Xlin.data <- vapply(Xlin, function(x) runif(nobs, 0, 2), numeric(nobs)) Xlin.data[, "price"] <- NA # %; price is generated as a dependent variable of instruments and Xexo Xexo.data <- vapply(Xexo, function(x) Xlin.data[, x], numeric(nobs)) Xlin.data[, "price"] <- instruments.data %*% true.parameters$instrument.effects + Xexo.data %*% true.parameters$instrument.Xexo.effects + eita Xrandom.data <- vapply(Xrandom, function(x) Xlin.data[, x], numeric(nobs)) # true marketshares ---- integration.list <- get_integration_input(dim = K, method = "MLHS", accuracy = 10000, nmkt = nmkt, seed = seed) draws_mktShape <- integration.list$nodesMktShape deltatrue <- c(Xlin.data[, "price"] * true.parameters$Xlin.true.price + Xlin.data[, -which(Xlin == "price"), drop = FALSE] %*% matrix(true.parameters$Xlin.true.except.price) + xi) deltatrue.exp <- exp(deltatrue) theta2.matrix <- matrix(true.parameters$Xrandom.true) # ;% could also include demographic effects in subsequent columns expmu <- getExpMu(theta2.matrix, draws_mktShape, Xrandom.data, cdid, demographics = matrix(NA)) sij <- getSij(expmu, deltatrue.exp, cdindex) shares <- c(sij %*% matrix(integration.list$weights)) ## message and output ---- if (printlevel > 1) { cat("You are now working with:", "\n", K, "random coefficient(s) with true values: ", true.parameters$Xrandom.true, ",\n", total.demographics, "demographics, \n", nmkt, "markets,", "\n", nbrn, "products and", "\n", ninst, "instruments.", "\n", "The following variables enter the problem linearly: ", Xlin, "\n", "Exogenous (i.e. no correlation to the structural error term) variables are:", Xexo, "\n", "Variables that are used as random coefficient: ", Xrandom, "\n") } BLP.data <- cbind(Xlin.data, instruments.data, cdid, shares, deltatrue,prod_id) BLP.dataframe <- data.frame(BLP.data) return(BLP.dataframe) }
/scratch/gouwar.j/cran-all/cranData/BLPestimatoR/R/simulate_BLP_dataset.R