content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' repmat #' #' This function repeats copies of a matrix #' @param X numeric: a matrix #' @param m numeric: number of times to repeat the X matrix in row and column dimension #' @param n numeric: repetition factor for each dimesion #' @return matrice: repeated matrix #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @export repmat <- function(X,m,n){ if(is.vector(X)==TRUE){ mx <- 1 nx <- 1 } else{ mx <- dim(X)[1] nx <- dim(X)[2]} matrice<-matrix(t(matrix(X,mx,nx*n)),mx*m,nx*n,byrow=T) return(matrice) }
/scratch/gouwar.j/cran-all/cranData/Arothron/R/repmat.R
#' @title example dataset #' @description POVs sampled inside the maxillary sinus cavity #' @name sinus_set #' @docType data #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @keywords Arothron #' @usage data(sinus_set) NULL
/scratch/gouwar.j/cran-all/cranData/Arothron/R/sinus_set.R
#' spherical.flipping #' #' Internal spherical flippping function #' @param C numeric: coordinates of the point of view #' @param mesh object of class mesh3d #' @param param1 numeric: first parameter for spherical flipping (usually ranged from 0.1 to 3, try!) #' @param param2 numeric second paramter for spherical flipping (don't change it!) #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @references Profico, A., Schlager, S., Valoriani, V., Buzi, C., Melchionna, M., Veneziano, A., ... & Manzi, G. (2018). #' Reproducing the internal and external anatomy of fossil bones: Two new automatic digital tools. American Journal of Physical Anthropology, 166(4), 979-986.#' @export #' #' Katz, S., Tal, A., & Basri, R. (2007). Direct visibility of point sets. In ACM SIGGRAPH 2007 papers (pp. 24-es). #' @export spherical.flipping<-function(C,mesh,param1,param2){ C<-matrix(as.vector(C),ncol=3,nrow=1) mesh<-mesh P<-t(mesh$vb)[,-4] numVer<-dim(P)[2] numDim<-dim(P)[1] P2<-P-(repmat(C,numDim,1)) normp<-rowSums(P2^2) normp<-sqrt(normp) param<-param1 R2<-matrix(repmat(max(normp)*(param2^param1),numDim, 1)) SF<-P2+2*repmat(R2-cbind(normp),1,3)* P2/(repmat(cbind(normp),1,3)) return(SF) }
/scratch/gouwar.j/cran-all/cranData/Arothron/R/spherical.flipping.R
#' trasf.mesh #' #' This function centers a mesh on the barycenter coordinates #' @param mesh a 3D mesh of class "mesh3d" #' @param barycenter numeric: coordinates of the center #' @return mesh a 3D mesh of class "mesh3d" #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @export trasf.mesh=function(mesh,barycenter){ mesh$vb[1,]=mesh$vb[1,]+(barycenter[1]) mesh$vb[2,]=mesh$vb[2,]+(barycenter[2]) mesh$vb[3,]=mesh$vb[3,]+(barycenter[3]) return(mesh) }
/scratch/gouwar.j/cran-all/cranData/Arothron/R/trasf.mesh.R
#' twodvarshape #' Calculates the shape variation associated to a value of PC scores associated to a specific combined landmark configuration or view #' @param twodviews_ob object from twodviews() #' @param scores numeric: the values of the PC scores for which the visualization is called #' @param PC PC chosen #' @param view numeric: which landmark configuration will be used to build the shape variation #' @return mat matrix of coordinates associated to the called shape variation #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @references Profico, A., Piras, P., Buzi, C., Del Bove, A., Melchionna, M., Senczuk, G., ... & Manzi, G. (2019). #' Seeing the wood through the trees. Combining shape information from different landmark configurations. Hystrix, 157-165. #' @examples #' library(Arothron) #' #load the 2D primate dataset #' data("Lset2D_list") #' #combine the 2D datasets and PCA #' combin2D<-twodviews(Lset2D_list,scale=TRUE,vector=c(1:5)) #' #calculate the shape variation associated to the negative extreme value of PC1 #' min_PC1<-twodvarshape(combin2D,min(combin2D$PCscores[,1]),1,5) #' plot(min_PC1,asp=1) #' #calculate the shape variation associated to the positive extreme value of PC1 #' max_PC1<-twodvarshape(combin2D,max(combin2D$PCscores[,1]),1,5) #' plot(max_PC1,asp=1) #' @export twodvarshape<-function (twodviews_ob, scores, PC, view) { pos_pcs <- twodviews_ob$dims * twodviews_ob$dimm if (view == 1) { sel_pcs <- 1:pos_pcs[view] } if (view == (length(twodviews_ob$dims))) { sel_pcs <- (sum(pos_pcs[1:(view - 1)]) + 1):sum(pos_pcs[1:view]) } if (view != 1 & view != (length(view))) { sel_pcs <- (sum(pos_pcs[1:(view - 1)]) + 1):(sum(pos_pcs[1:(view - 1)]) + pos_pcs[view]) } mshape <- twodviews_ob$mshapes[[view]] * sqrt(twodviews_ob$dims[view] * twodviews_ob$dimm[view]) PCs <- twodviews_ob$PCs[sel_pcs, PC] mat <- restoreShapes(scores, PCs, mshape) return(mat) }
/scratch/gouwar.j/cran-all/cranData/Arothron/R/twodvarshape.R
#' twodviews #' Combine and calculate the PCscores matrix from a list of different landmark configurations to be combined #' @param twodlist a list containing the landmark configurations of each anatomical view stored as separated lists #' @param scale logical: TRUE for shape-space, FALSE for form-space #' @param vector numeric vector: defines which views are to be used #' @return PCscores PC scores #' @return PCs Pricipal Components (eigenvector matrix) #' @return Variance table of the explained variance by the PCs #' @return size vector containing the Centroid Size of each configuration #' @return mshapes a list containing the mean shape of each landmark configuration #' @return dims number of landmarks of each configuration #' @return dimm dimension (2D or 3D) of each combined landmark configuration #' @return twodlist the list used as input #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @references Profico, A., Piras, P., Buzi, C., Del Bove, A., Melchionna, M., Senczuk, G., ... & Manzi, G. (2019). #' Seeing the wood through the trees. Combining shape information from different landmark configurations. Hystrix, 157-165. #' @examples #' library(Morpho) #' #load the 2D primate dataset #' data("Lset2D_list") #' length(Lset2D_list) #' #combine the 2D datasets and PCA #' combin2D<-twodviews(Lset2D_list,scale=TRUE,vector=c(1:5)) #' combin2D$size #' #plot of the first two Principal Components #' plot(combin2D$PCscores) #' text(combin2D$PCscores,labels=rownames(combin2D$PCscores)) #' #load the 3D primate dataset #' data("Lset3D_array") #' #GPA and PCA #' GPA_3D<-procSym(Lset3D_array) #' #plot of the first two Principal Components #' plot(GPA_3D$PCscores) #' text(GPA_3D$PCscores,labels=rownames(GPA_3D$PCscores)) #' @export #' twodviews<-function (twodlist, scale = TRUE, vector = NULL) { if(is.null(vector)){ vector<-1:length(twodlist) } sizes = matrix(NA, ncol = length(vector), nrow = dim(listtoarray(twodlist[[1]]))[3]) mshapes = list() dims = NULL Rots = NULL count <- 0 dimm<-NULL for (h in vector) { count <- count + 1 coo <- listtoarray(twodlist[[h]]) dimm<-c(dimm,dim(coo)[2]) dims <- c(dims, dim(coo)[1]) if (scale == FALSE) { gpa <- procSym(coo, scale = FALSE, CSinit = FALSE) } else { gpa <- procSym(coo, scale = TRUE) } coo.r <- gpa$orpdata Rots <- cbind(Rots, (vecx(coo.r) * sqrt(dim(coo)[1] * dimm[h]))) sizes[, count] <- gpa$size mshapes[[count]] <- gpa$mshape } Rots_pca <- prcomp(Rots, scale. = FALSE) values <- 0 eigv <- Rots_pca$sdev^2 values <- eigv[which(eigv > 1e-16)] lv <- length(values) PCs <- Rots_pca$rotation[, 1:lv] PCscores <- as.matrix(Rots_pca$x[, 1:lv]) rownames(PCscores) <- names(twodlist[[1]]) Variance <- cbind(sqrt(eigv), eigv/sum(eigv), cumsum(eigv)/sum(eigv)) * 100 Variance <- Variance[1:lv, ] rotated <- list() for(i in 1:length(vector)){ if(i ==1){ rotated[[i]]<-vecx(Rots[,1:(dims[i]*dimm[i])],byrow = FALSE,revert = TRUE,lmdim = dimm[i])}else{ rotated[[i]]<-vecx(Rots[,((dims[i-1]*dimm[i])+1):(sum(dims[(i-1):i])*dimm[i])],byrow = FALSE,revert = TRUE,lmdim = dimm[i]) } } out <- list(PCscores = PCscores, PCs = PCs, Variance = Variance, size = sizes, rotated = rotated, mshapes = mshapes, dims = dims, twodlist = twodlist, dimm=dimm) return(out) }
/scratch/gouwar.j/cran-all/cranData/Arothron/R/twodviews.R
#' volendo #' #' Calculate the volume of a mesh by using a voxel-based method #' @param mesh object of class mesh3d #' @param alpha_vol numeric: alpha shape for construction external concave hull #' @param ncells numeric: approximative number of cell for 3D grid construction #' @return vol numeric: volume of the mesh expressed in cc #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @examples #' \dontrun{ #' #load the human skull #' library(rgl) #' data(human_skull) #' sapendo<-endomaker(human_skull,param1_endo = 1.0,vol=FALSE, num.cores=NULL) #' volsap<-volendo(sapendo$endocast) #' } #' @export volendo<-function (mesh, alpha_vol = 100, ncells = 1e+05) { if (checkFaceOrientation(mesh) == TRUE){ mesh<-invertFaces(mesh) } conv_endo <- ashape3d(vert2points(mesh), alpha = alpha_vol, pert = TRUE, eps = 1e-06) x <- conv_endo selrows = x$triang[, 8 + 1] %in% 2L tr <- x$triang[selrows, c("tr1", "tr2", "tr3")] m = rgl::tmesh3d(vertices = t(x$x), indices = t(tr), homogeneous = FALSE) sn = alphashape3d::surfaceNormals(x, indexAlpha = 1) fn = facenormals(m) dp = dot((fn$normals), t(sn$normals)) m$it[, dp < 0] = m$it[c(1, 3, 2), dp < 0] conv_sur <- m bbox <- meshcube(mesh) bbox_w <- sqrt(sum((bbox[1, ] - bbox[3, ])^2)) bbox_h <- sqrt(sum((bbox[1, ] - bbox[2, ])^2)) bbox_l <- sqrt(sum((bbox[1, ] - bbox[5, ])^2)) bbox_c <- prod(bbox_w, bbox_h, bbox_l) voxelsize <- (bbox_c/ncells)^(1/3) xbox <- seq(min(bbox[, 1]), max(bbox[, 1]), by = voxelsize) ybox <- seq(min(bbox[, 2]), max(bbox[, 2]), by = voxelsize) zbox <- seq(min(bbox[, 3]), max(bbox[, 3]), by = voxelsize) GRID <- as.matrix(expand.grid(xbox, ybox, zbox)) DISCR <- vcgClostKD(GRID, mesh, sign = TRUE) voxels_inside_1 <- GRID[which(DISCR$quality <= 0), ] out_endo <- vcgClostKD(voxels_inside_1, conv_sur, sign = TRUE) voxels_inside_2 <- voxels_inside_1[which(out_endo$quality >= 0), ] count_cells <- nrow(voxels_inside_2) vol <- ((voxelsize^3) * count_cells)/1000 return(vol) }
/scratch/gouwar.j/cran-all/cranData/Arothron/R/volendo.R
#' @title example dataset #' @description Landmark set on Yoda #' @name yoda_set #' @docType data #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @keywords Arothron #' @usage data(yoda_set) NULL
/scratch/gouwar.j/cran-all/cranData/Arothron/R/yoda_set.R
#' @title example dataset #' @description Mesh of Yoda #' @name yoda_sur #' @docType data #' @author Antonio Profico, Costantino Buzi, Marina Melchionna, Paolo Piras, Pasquale Raia, Alessio Veneziano #' @keywords Arothron #' @usage data(yoda_sur) NULL
/scratch/gouwar.j/cran-all/cranData/Arothron/R/yoda_sur.R
#' Daily prices and total returns for 24 ETFs. #' #' Data set containing daily prices and total returns for 37 exchange-traded #' funds (ETFs) as well as daily returns for U.S. Treasury bills #' (risk-free asset). #' #' @docType data #' #' @usage data(ETFs) #' #' @format An object of class \code{"list"} #' \describe{ #' \item{Prices}{\code{xts} object with daily prices} #' \item{Returns}{\code{xts} object with daily total returns} #' \item{Description}{\code{data.frame} with information about the ETFs} #' \item{risk_free}{\code{xts} object with daily returns of U.S. Treasury bills} #' } #' #' @keywords datasets #' @examples #' #' data(ETFs) #' head(ETFs$Prices) #' ETFs$Description #' "ETFs"
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/ETFs.R
#' Backtesting of asset allocation strategies #' #' \code{backtest_allocation} computes a backtest of a given portfolio #' allocation rule. #' #' The function first determines the rebalancing dates based #' on \code{strat$rebalance_frequency}. Then, it cycles through intermediate #' dates and calculates daily returns based on the allocation. If the optional #' parameter \code{start_date} is provided, the backtest will start on that #' date. Otherwise, it will start from the date from which data on all assets #' becomes available. #' #' @param strat A list representing an asset allocation strategy. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' asset, or a scalar numeric with the annual risk-free rate in decimals. #' @param start_date Optional starting date #' #' @examples #' # Example 1: backtesting one of the asset allocations in the package #' us_60_40 <- asset_allocations$static$us_60_40 #' bt_us_60_40 <- backtest_allocation(us_60_40, #' ETFs$Prices, #' ETFs$Returns, #' ETFs$risk_free) #' #' # show table with performance metrics #' bt_us_60_40$table_performance #' # Example 2: creating and backtesting an asset allocation from scratch #' #' # create a strategy that invests equally in momentum (MTUM), value (VLUE), #' # low volatility (USMV) and quality (QUAL) ETFs. #' #' factor_strat <- list(name = "EW Factors", #' tickers = c("MTUM", "VLUE", "USMV", "QUAL"), #' default_weights = c(0.25, 0.25, 0.25, 0.25), #' rebalance_frequency = "month", #' portfolio_rule_fn = "constant_weights") #' #' # get data for tickers using getSymbols #' factor_ETFs <- get_data_from_tickers(factor_strat$tickers, #' starting_date = "2020-01-01") #' # backtest the strategy #' bt_factor_strat <- backtest_allocation(factor_strat, #' factor_ETFs$P, #' factor_ETFs$R) #' # show table with performance metrics #' bt_factor_strat$table_performance #' @return An object of class \code{"List"} with the following elements: #' \item{strat}{The strat provided to the function} #' \item{returns}{An xts object with the daily returns of the strategy} #' \item{table_performance}{A table with performance metrics} #' \item{rebalance_dates}{Vector of rebalancing dates} #' \item{rebalance_weights}{Vector of rebalancing dates} #' @export #' @import xts #' @importFrom PerformanceAnalytics table.AnnualizedReturns #' @importFrom PerformanceAnalytics table.DownsideRiskRatio #' @importFrom PerformanceAnalytics table.DownsideRisk backtest_allocation <- function(strat, P, R, risk_free = 0, start_date = NULL){ # check that strat contains the expected elements if (!("tickers" %in% names(strat))){ stop("Expected tickers in the strat object") } if (!("default_weights" %in% names(strat))){ stop("Expected default_weights in the strat object") } if (!("rebalance_frequency" %in% names(strat))){ stop("Expected rebalance_frequency in the strat object") } if (!("portfolio_rule_fn" %in% names(strat))){ stop("Expected portfolio_rule_fn in the strat object") } # if risk-free was provided, check it has the correct size rf_len <- length(risk_free) if (rf_len > 1){ if (rf_len != nrow(R)){ stop("risk_free must be the same length nrows(R).") } } else { risk_free <- xts(rep(risk_free/252, nrow(R)), order.by = index(P)) } # check if R is an xts object. If not, throw error if (!any(class(R)=="xts")){ stop("R must be an xts object.") } # check if P is an xts object. If not, throw error if (!any(class(P)=="xts")){ stop("P must be an xts object.") } # check dimensions of R and P match if (any(dim(R) != dim(P))){ stop("Dimensions of P and R don't match.") } # check that P contains columns matching the tickers in strat n_assets <- length(strat$tickers) for (i in 1:n_assets){ if (!(strat$tickers[i] %in% colnames(P))){ stop(paste0("Ticker ", strat$tickers[i], " not found in P")) } } # check that R contains columns matching the tickers in strat n_assets <- length(strat$tickers) for (i in 1:n_assets){ if (!(strat$tickers[i] %in% colnames(R))){ stop(paste0("Ticker ", strat$tickers[i], " not found in R")) } } # check if user provided params. If not, initialize if (!"params" %in% names(strat)){ strat$params <- list() } # get dates, number of assets, rebalancing dates dates <- index(R) rebal_dates <- get_rebalance_dates(dates, strat$rebalance_frequency) if (!is.null(start_date)){ rebal_dates <- rebal_dates[rebal_dates >= start_date] } # starting date R <- R[, strat$tickers] P <- P[, strat$tickers] # check if portfolio rule different from identity. # in this case, increase start date by one year if (!any(grepl("identity", deparse(strat$portfolio_rule_fn)))){ first_date <- dates[which.max((!is.na(rowSums(R)))) + 252] } else{ first_date <- dates[which.max((!is.na(rowSums(R))))] } rebal_dates <- rebal_dates[rebal_dates >= first_date] # figure out allocations on rebal_dates weights <- xts(matrix(0, length(rebal_dates), n_assets), order.by = rebal_dates) colnames(weights) <- strat$tickers for (i_date in seq(from = 1, to = length(rebal_dates))){ this_reb_date <- rebal_dates[i_date] weights[i_date, ] <- do.call(strat$portfolio_rule_fn, list(strat, this_reb_date, P, R, risk_free)) } # calculation of daily returns strat_returns <- xts(rep(NA, length(dates)), order.by = dates) colnames(strat_returns) <- make.names(strat$name) for (i_date in seq(from = 1, to = length(rebal_dates)-1)){ # find dates between this and the next rebalance date dates_between <- dates[dates > rebal_dates[i_date] & dates <= rebal_dates[i_date+1]] if (i_date == length(rebal_dates)-1){ dates_between <- dates[dates > rebal_dates[i_date] & dates <= dates[length(dates)]] } weight_risk_assets <- sum(weights[i_date]) weight_risk_free <- 1 - weight_risk_assets risk_free_account <- cumprod(c(weight_risk_free, 1 + risk_free[dates_between])) risk_account <- daily_account_calc(weights[i_date], R[dates_between,]) total_account <- risk_free_account + risk_account strat_returns[dates_between] <- total_account[2:length(total_account)]/ total_account[1:length(total_account) -1 ] - 1 } strat_returns <- strat_returns[paste0(as.character(first_date), "/")] risk_free <- risk_free[paste0(as.character(first_date), "/")] # calculate some statistics table1 <- table.AnnualizedReturns(strat_returns, Rf = risk_free) table2 <- table.DownsideRiskRatio(strat_returns, MAR = mean(risk_free)) table3 <- table.DownsideRisk(strat_returns, Rf = mean(risk_free)) table_metrics <- rbind(table1, table2, table3) return(list(strat = strat, returns = strat_returns, table_performance = table_metrics, rebalance_dates = rebal_dates, rebalance_weights = weights)) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/backtest_allocation.R
#' Returns constant weights for static asset allocations #' #' \code{constant_weights} applies the identity function to the default weights #' in a strategy. #' #' #' @param strat A list representing an asset allocation strategy. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' asset, or a scalar numeric with the annual risk-free rate in decimals. #' #' @examples #' us_60_40 <- asset_allocations$static$us_60_40 #' reb_date <- as.Date("2022-03-31") #' constant_weights(us_60_40, #' reb_date, #' ETFs$Prices[, us_60_40$tickers], #' ETFs$Returns[, us_60_40$tickers], #' ETFs$risk_free) #' @return A numeric vector of weights after applying the rule. #' @export # function for constant weights constant_weights <- function(strat, reb_date = NULL, P, R, risk_free){ identity(strat$default_weights)}
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/constant_weights.R
#' Calculation of account value for backtesting asset allocation strategies #' #' \code{daily_account_calc} is a helper function used by #' \code{backtest_allocation} to calculate theoretical the theoretical account #' value given an initial allocation to assets. It is not intended to be called #' directly by the user. #' #' The function simulates the value of a theoretical account from the initial #' weights and the daily returns of a set of assets. #' #' @param w A vector of weights #' @param R An xts object with daily returns of the tickers in strat. #' @return A numeric vector with the daily value of the account. #' #' @export daily_account_calc <- function(w, R){ # some dimension checks if (length(w) != ncol(R)){ stop('Number of elements of w should match the number of columns in R') } if (sum(w==0) == length(w)){ return(rep(0, nrow(R) + 1)) } else{ n_assets <- ncol(R) n_periods <- nrow(R) PortMatrix <- matrix(0, nrow = n_periods + 1, ncol = n_assets) # Assume initial position in each stock equals the initial weight PortMatrix[1, 1:n_assets] <- w PortMatrix[2:nrow(PortMatrix), 1:n_assets] <- 1 + R PortMatrix <- apply(PortMatrix, 2, cumprod) if (n_assets == 1){ port_notional <- PortMatrix[, 1:n_assets] } else{ port_notional <- rowSums(PortMatrix[, 1:n_assets]) } return(port_notional) } }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/daily_account_calc.R
#' Downloads prices in xts format from a list of tickers from Yahoo Finance (<https://finance.yahoo.com/>). #' #' \code{get_data_from_tickers} retrieves adjusted closing prices from Yahoo Finance #' for a set of tickers and returns the prices and returns. #' #' The function retrieves data from Yahoo Finance (<https://finance.yahoo.com/>) #' using the getSymbols function from the \code{quantmod} package. It calculates #' returns from adjusted prices. The ticker names must correspond to those found #' in Yahoo Finance. #' #' @param tickers A vector containing a tickers. #' @param starting_date A date on which the allocation rule is applied. #' @examples #' ## download data for the following exchange-traded-funds: MTUM, VLUE, USMV, and QUAL. #' factor_ETFs <- get_data_from_tickers(c("MTUM", "VLUE", "USMV", "QUAL"), #' starting_date = "2020-01-01") #' @return An object of class \code{"List"} containing two objects of class #' \code{"xts"} with respectively the prices and returns of the assets, #' with column names corresponding to the tickers. #' @export #' @importFrom quantmod getSymbols #' @importFrom PerformanceAnalytics CalculateReturns #' @importFrom zoo index na.locf #' @importFrom curl has_internet #' @import xts get_data_from_tickers <- function(tickers, starting_date = "2007-01-01"){ # attempt to retrieve data from Yahoo Finance. Fail "gracefully" # in case there's an issue if (!has_internet()) { message("Problem connecting to Yahoo Finance. Check internet connection or try again later.") return(invisible(NULL)) } else{ suppressWarnings({ msg <- "Problem retrieving data from Yahoo. Check server status or that tickers are valid." x <- try(silent = TRUE, getSymbols(tickers, from = starting_date, source = 'yahoo')) if (inherits(x, "try-error")) { message( msg ) return(FALSE) } }) } # align all prices into one xts object prices <- xts() for (i in seq(from = 1, to = length(tickers))){ prices <- merge.xts(prices, get(tickers[i])[,6]) } colnames(prices) <- tickers # consider only weekdays: makes it possible # to mix exchange-traded and crypto w_days <- weekdays(index(prices)) w_ends <- which(w_days == "Saturday" | (w_days == "Sunday")) if (length(w_ends) > 0){ prices <- prices[-w_ends, ] } # take care of missing prices to avoid NA returns prices <- na.locf(prices) #calculate returns returns <- CalculateReturns(prices) # format as date zoo::index(returns) <- as.Date(zoo::index(returns)) zoo::index(prices) <- as.Date(zoo::index(prices)) return(list(P = prices, R = returns)) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/get_data_from_tickers.R
#' Portfolio rebalancing dates #' #' \code{get_rebalance_dates} determines rebalancing dates based on rebalancing #' frequency chosen by the user. This is a helper function used by #' \code{backtest_allocation} and is not intended to be called directly by the user. #' #' @param dates A vector of dates #' @param reb_freq Character with rebalancing frequency. Options are #' \code{"days"}, \code{"weeks"}, \code{"months"}, \code{"quarters"}, #' and \code{"years"} #' @param k An integer with number of periods to skip. #' @return A vector of dates. #' @export #' @importFrom xts endpoints get_rebalance_dates <- function(dates, reb_freq, k = 1){ # for now, we rebalance at the end of period in reb_freq # reb_freq must be one of the following: # "days", "weeks", "months", "quarters", and "years" reb_dates <- dates[endpoints(dates, on = reb_freq, k)] return(reb_dates[seq(from = 1, to = length(reb_dates)-1)]) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/get_rebalance_dates.R
#' Returns minimum variance portfolio weights on a given date #' #' \code{min_variance} determines asset allocations that minimize the variance #' of aportfolio. #' #' The function calculates the covariance matrix of returns using the last two #' years (or minimum of one year) of daily returns. It relies on the #' \code{minvar} function from the \code{NMOF} package. #' @param strat A list representing an asset allocation strategy. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' asset, or a scalar numeric with the annual risk-free rate in decimals. #' #' @examples #' ivy <- asset_allocations$tactical$ivy #' reb_date <- as.Date("2022-03-31") #' risk_parity(ivy, reb_date, ETFs$Prices[, ivy$tickers], ETFs$Returns[, ivy$tickers]) #' @return A numeric vector of weights after applying the rule. #' @export #' @importFrom xts endpoints #' @importFrom zoo rollmean #' @importFrom RiskPortfolios covEstimation #' @importFrom NMOF minvar # Ivy portfolio allocation min_variance <- function(strat, reb_date, P, R, risk_free = NULL){ # check that user supplied a specific window for cov estimation # if not, use the default 2 years (2*252 days) if (length(strat$params) > 0){ # check that there is element n_days_cov in params if ("n_days_cov" %in% names(strat$params)){ n_days_cov <- strat$params$n_days_cov } else{ warning("n_days_cov not found in strat$params. Defaulting to 252*2") n_days_cov <- 252*2 # default cov estimations window in days } } else { n_days_cov <- 252*2 # default cov estimations window in days } # check that user supplied a specific lambda for EMWA # if not, use the default if (length(strat$params) > 0){ # check that there is element n_days_cov in params if ("n_days_cov" %in% names(strat$params)){ n_days_cov <- strat$params$n_days_cov } else{ warning("n_days_cov not found in strat$params. Defaulting to 252*2") n_days_cov <- 252*2 # default cov estimations window in days } } else { n_days_cov <- 252*2 # default cov estimations window in days } # calculations use data until the reb_date ava_dates <- paste0("/", reb_date) R <- R[ava_dates] R <- R[seq(from = max(1, nrow(R) - n_days_cov + 1), to = nrow(R)), ] n_assets <- length(strat$tickers) # get valid data R <- R[seq(from = which.max(!is.na(rowSums(R))), to = nrow(R)), ] # check if there is at least one year of daily data if (nrow(R) > 252){ # estimate covariance matrix with available data cov_mat <- covEstimation(R, control = list(type = 'ewma', lambda = 0.98851)) mvp_weights <- minvar(cov_mat) } else { mvp_weights <- rep(0, n_assets) } return(mvp_weights) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/min_variance.R
#' Returns risk parity weights on a given date #' #' \code{risk_parity} determines asset allocations using a risk parity rule. #' It obtains the weights such that all assets provide the same risk #' contribution to the risk of the portfolio. #' #' The function calculates the covariance matrix of returns using the last two #' years (or minimum of one year) of daily returns. #' @param strat A list representing an asset allocation strategy. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' asset, or a scalar numeric with the annual risk-free rate in decimals. #' @examples #' ivy <- asset_allocations$tactical$ivy #' reb_date <- as.Date("2022-03-31") #' risk_parity(ivy, reb_date, ETFs$Prices[, ivy$tickers], ETFs$Returns[, ivy$tickers]) #' @return A numeric vector of weights after applying the rule. #' @export #' @importFrom xts endpoints #' @importFrom zoo rollmean #' @importFrom RiskPortfolios covEstimation #' @importFrom riskParityPortfolio riskParityPortfolio risk_parity <- function(strat, reb_date, P, R, risk_free = NULL){ # check that user supplied a specific window for cov estimation # if not, use the default 2 years (2*252 days) if (length(strat$params) > 0){ # check that there is element n_days_cov in params if ("n_days_cov" %in% names(strat$params)){ n_days_cov <- strat$params$n_days_cov } else{ warning("n_days_cov not found in strat$params. Defaulting to 252*2") n_days_cov <- 252*2 # default cov estimations window in days } } else { n_days_cov <- 252*2 # default cov estimations window in days } # check that user supplied a specific lambda for EMWA # if not, use the default if (length(strat$params) > 0){ # check that there is element n_days_cov in params if ("n_days_cov" %in% names(strat$params)){ n_days_cov <- strat$params$n_days_cov } else{ warning("n_days_cov not found in strat$params. Defaulting to 252*2") n_days_cov <- 252*2 # default cov estimations window in days } } else { n_days_cov <- 252*2 # default cov estimations window in days } # calculations use data until the reb_date ava_dates <- paste0("/", reb_date) R <- R[ava_dates] R <- R[seq(from = max(1, nrow(R) - n_days_cov + 1), to = nrow(R)), ] n_assets <- length(strat$tickers) # get valid data R <- R[seq(from = which.max(!is.na(rowSums(R))), to = nrow(R)), ] # check if there is at least one year of daily data if (nrow(R) > 252){ # estimate covariance matrix with available data cov_mat <- covEstimation(R, control = list(type = 'ewma', lambda = 0.98851)) # risk parity optimization rp_weight <- riskParityPortfolio(cov_mat, b = strat$default_weights)$w } else { rp_weight <- rep(0, n_assets) } return(rp_weight) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/risk_parity.R
#' Returns allocations for the Adaptive Asset Allocation strategy on a given #' date #' #' \code{tactical_AAA} determines asset allocations according to the Adaptive #' Asset Allocation approach described in Butler, Philbrick, Gordillo, and #' Varadi (2012) <https://dx.doi.org/10.2139/ssrn.2328254>. #' #' The Adaptive Asset Allocation strategy sorts a specific list of assets #' based on 6-month momentum, selects the top 5 assets, and then calculates #' weights that yield the minimum portfolio variance. The parameters controlling #' the number of months for the momentum calculation (\code{n_months_mom}, #' default = 6), number of months of daily data used to estimate the covariance #' matrix (\code{n_months_mom}, default value = 1), and the number of assets to #' select using the momentum rule (\code{n_assets}, default = 5) can be changed #' by adding them to a list called \code{param} in the \code{strat} object. This #' allows the user to apply the simple principle of the strategy (momentum and #' minimum variance) to any set of assets. #' #' @param strat A list representing an asset allocation strategy. For this #' particular strategy, \code{strat$asset_class} must contain a character vector #' containing the corresponding asset classes. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' asset, or a scalar numeric with the annual risk-free rate in decimals. #' #' @return A numeric vector of weights after applying the rule. #' @export #' @import xts #' @importFrom xts endpoints #' @importFrom PerformanceAnalytics Return.cumulative #' @importFrom stats cov tactical_AAA <- function(strat, reb_date, P, R, risk_free){ # check that user supplied a specific window. # if not, use the default 6 months if (length(strat$params) > 0){ # check that there is element n_months_mom in params if ("n_months_mom" %in% names(strat$params)){ n_months_mom <- strat$params$n_months_mom } else{ print("n_months_mom not found in strat$params. Defaulting to 6") n_months_mom <- 6 # default look-back } # check if user provided a number of months for cov matrix calculation if ("n_months_cov" %in% names(strat$params)){ n_months_cov <- strat$params$n_months_cov } else{ print("n_months_cov not found in strat$params. Defaulting to 1") n_months_cov <- 1 # default vol look-back } # check if user provided a number of assets to be selected if ("n_assets" %in% names(strat$params)){ n_assets <- strat$params$n_assets } else{ if (length(strat$tickers) > 5){ print("n_assets not found in strat$params. Defaulting to 5") n_assets <- 5 } else{ stop("Number of assets less than 5 with no default for n_assets") } } } else { n_months_mom <- 6 # default momentum look-back n_months_cov <- 1 # default vol look-back if (length(strat$tickers) > 5){ n_assets <- 5 } else { print("Number < 5 with no default for n_assets. Momentum will not be applied") n_assets <- length(strat$tickers) } } # comparison will be made using dates until the reb_date ava_dates <- paste0("/", reb_date) P <- P[ava_dates] R <- R[ava_dates] R_m <- apply.monthly(R, Return.cumulative) R_cum <- R_m[seq(from = nrow(R_m) - n_months_mom + 1, to = nrow(R_m))] R_cum <- cumprod(1+R_cum[seq(from = nrow(R_cum) - n_months_mom + 1, to = nrow(R_cum)), ]) R_cum <- R_cum[nrow(R_cum), ] # select top n_assets based on R_cum sort_mom <- rank(as.numeric(R_cum)) sel_assets <- which(sort_mom > length(strat$tickers) - n_assets) # check if there is at least one year of daily data if (nrow(R) > n_months_cov*21){ # it's not clear what window is used to estimate vol. Will use default 1 month #D <- diag(apply(R[seq(from = nrow(R) - n_months_cov*21 + 1, # to = nrow(R)), sel_assets], 2, sd)) #C <- cor(R[seq(from = nrow(R) - 6*21 + 1, # to = nrow(R)), sel_assets]) #cov_mat <- D %*% C %*% D cov_mat <- stats::cov(R[seq(from = nrow(R) - n_months_cov*21 + 1, to = nrow(R)), sel_assets]) # apply minimum variance optimization mvp_weights <- minvar(cov_mat) } else { mvp_weights <- rep(0, n_assets) } weights <- rep(0, length(strat$tickers)) weights[sel_assets] <- mvp_weights return(weights) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/tactical_AAA.R
#' Returns allocations for the dual momentum strategy on a given date #' #' \code{tactical_DualMomentum} determines asset allocations for a strategy #' according to the dual momentum approach described in Antonacci (2016) #' <https://dx.doi.org/10.2139/ssrn.2042750>. #' #' Dual momentum sorts assets within each asset class described in \code{strat} #' on a relative basis (i.e. which asset outperforms others within the same #' asset class) over the last 12 months, as well as whether an asset has #' positive excess return over the last 12 months. Dual momentum invests in the #' top performing asset within the asset class, as long as it also has positive #' excess return over the risk-free rate. Otherwise, the allocation is shifted #' to the risk-free asset. #' Any amounts not allocated to risky assets are allocated to the risk-free #' asset as implemented in the \code{backtest_allocation} function. #' #' @param strat A list representing an asset allocation strategy. For this #' particular strategy, \code{strat$asset_class} must contain a character vector #' containing the corresponding asset classes. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' asset, or a scalar numeric with the annual risk-free rate in decimals. #' #' @examples #' dual_mom <- asset_allocations$tactical$dual_mom #' reb_date <- as.Date("2022-03-31") #' tactical_DualMomentum(dual_mom, #' reb_date, #' ETFs$Prices[, dual_mom$tickers], #' ETFs$Returns[, dual_mom$tickers], #' ETFs$risk_free) #' @return A numeric vector of weights after applying the rule. #' @export #' @import xts #' @importFrom xts endpoints #' @importFrom PerformanceAnalytics Return.cumulative tactical_DualMomentum <- function(strat, reb_date, P, R, risk_free){ # check that user supplied a specific window. # if not, use the default 12 months if (length(strat$params) > 0){ # check that there is element n_months in params if ("n_months" %in% names(strat$params)){ n_months <- strat$params$n_months } else{ warning("n_months not found in strat$params. Defaulting to 12") n_months <- 12 # default look-back } } else { n_months <- 12 # default look-back } # check that user has provided asset classes if (!"asset_class" %in% names(strat)){ stop("strat$asset_class missing") } asset_classes <- unique(strat$asset_class) n_asset_classes <- length(asset_classes) # comparison will be made using dates until the reb_date ava_dates <- paste0("/", reb_date) P <- P[ava_dates] R <- R[ava_dates] risk_free <- risk_free[ava_dates] R_e <- R - matrix(rep(risk_free, ncol(R)), nrow = nrow(risk_free), ncol = ncol(R)) R_e <- apply.monthly(R_e, Return.cumulative) # need to calculate moving average of prices at the end of each month # as well as cumulative excess return w <- strat$default_weights if (nrow(R_e) >= n_months){ R_e <- R_e[seq(from = nrow(R_e) - n_months + 1, to = nrow(R_e))] tsmom <- cumprod(1 + R_e) - 1 tsmom <- as.numeric(tsmom[nrow(tsmom)]) for (ac in asset_classes){ inds <- which(strat$asset_class == ac) if (length(inds) ==1){ # only one asset in asset class, just check excess ret > 0 if (tsmom[inds] < 0){ w[inds] <- 0 } } else { # best asset best_ind <- which.max(tsmom[inds]) # zero allocation to worst performers w[inds[-best_ind]] <- 0 # zero allocation if best asset has negative excess return w[inds[best_ind]] <- ifelse(tsmom[best_ind] < 0, 0, w[inds[best_ind]]) } } } return(w) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/tactical_DualMomentum.R
#' Calculates asset allocations for the JPMorgan ETF Efficiente® 5 portfolio. #' #' \code{tactical_JPM5} determines asset allocations using a replication #' of the JPMorgan ETF Efficiente® 5 index methodology described in publicly #' available documentation (<https://sp.jpmorgan.com/spweb/content/307403.pdf>). #' #' The strategy uses a window of six months of daily data to compute inputs to #' perform a constrained mean-variance optimization. It relies on the #' \code{mvFrontier} function from the \code{NMOF} package. #' @param strat A list representing an asset allocation strategy. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' asset, or a scalar numeric with the annual risk-free rate in decimals. #' #' @examples #' JPM_Eff5 <- asset_allocations$tactical$JPM_Eff5 #' reb_date <- as.Date("2022-03-31") #' tactical_JPM5(JPM_Eff5, reb_date, ETFs$Prices[, JPM_Eff5$tickers], ETFs$Returns[, JPM_Eff5$tickers]) #' @return A numeric vector of weights after applying the rule. #' @export #' @importFrom xts endpoints #' @importFrom zoo rollmean #' @importFrom RiskPortfolios covEstimation #' @importFrom NMOF mvFrontier # Ivy portfolio allocation tactical_JPM5 <- function(strat, reb_date, P, R, risk_free = NULL){ # check that user supplied a specific window for cov estimation # if not, use the default 2 years (2*252 days) if (length(strat$params) > 0){ # check that there is element n_days_cov in params if ("n_days_cov" %in% names(strat$params)){ n_days_cov <- strat$params$n_days_cov } else{ warning("n_days_cov not found in strat$params. Defaulting to 126 (six months)") n_days_cov <- 126*1 # default cov estimations window in days } } else { n_days_cov <- 126*1 # default cov estimations window in days } # asset caps LB <- rep(0, 13) UB <- c(0.20, 0.10, 0.20 ,0.20, 0.20, 0.20, 0.20, 0.20, 0.20, 0.10, 0.10, 0.50, 0.50) # sectors and sector caps sectors <- list( 1:3, # Group 1 = Developed Equity 4:6, # Group 2 = Bonds 7:8, # Group 3 = Emerging Markets 9:11, # Group 4 = Alternative Investments 12:13 # Group 5 = Inflation Protected Bonds/Cash ) sectors_LB <- rep(0, 5) sectors_UB <- c(0.50, 0.50, 0.25, 0.25, 0.50) # calculations use data until the reb_date ava_dates <- paste0("/", reb_date) R <- R[ava_dates] R <- R[seq(from = max(1, nrow(R) - n_days_cov + 1), to = nrow(R)), ] n_assets <- length(strat$tickers) # get valid data R <- R[seq(from = which.max(!is.na(rowSums(R))), to = nrow(R)), ] # check if there is at least six months of daily data if (nrow(R) >= 120){ # estimate inputs m <- colMeans(R)*252 # estimate covariance matrix with available data cov_mat <- covEstimation(R, control = list(type = 'naive'))*252 p1 <- mvFrontier(m, cov_mat, wmin = LB, wmax = UB, groups = sectors, groups.wmin = sectors_LB, groups.wmax = sectors_UB,n = 50) # select first portfolio on the efficient frontier with vol of 5% sel_port <- which(p1$volatility>= 0.05) # if no portfolio attained 5% vol, take the last portfolio in the frontier # otherwise, select the first portfolio with volatility >= 5% if (length(sel_port) == 0){ port_weights <- p1$portfolios[, ncol(p1$portfolios)] } else { port_weights <- p1$portfolios[, min(sel_port)] } } else { port_weights <- rep(0, n_assets) } return(port_weights) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/tactical_JPM5.R
#' Returns allocations for the Robust Asset Allocation on a given date #' #' \code{tactical_RAA} determines asset allocations for a strategy according to #' the Robust Asset Allocation (RAA) approach of Gray and Vogel (2015, #' ISBN:978-1119071501). #' #' RAA uses two trend-following rules. The first one is based on comparing the #' current price of assets with their 12-month moving average. The second one #' compares returns with the returns of the risk-free asset. The allocation rule #' keeps either 100%, 50%, or 0% of the default weight for each risky asset #' if both rules provide a positive signal, only one rule provided a positive #' signal, or both rules provide a negative signal, respectively. Any amounts #' not allocated to risky assets are allocated to the risk-free asset as #' implemented in the \code{backtest_allocation} function. #' #' @param strat A list representing an asset allocation strategy. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' asset, or a scalar numeric with the annual risk-free rate in decimals. #' #' @examples #' raa <- asset_allocations$tactical$raa #' reb_date <- as.Date("2022-03-31") #' tactical_RAA(raa, #' reb_date, #' ETFs$Prices[, raa$tickers], #' ETFs$Returns[, raa$tickers], #' ETFs$risk_free) #' @return A numeric vector of weights after applying the rule. #' @export #' @import xts #' @importFrom xts endpoints #' @importFrom zoo rollmean # raa portfolio allocation tactical_RAA <- function(strat, reb_date, P, R, risk_free){ # check that user supplied a specific window. # if not, use the default 12 months if (length(strat$params) > 0){ # check that there is element n_months in params if ("n_months" %in% names(strat$params)){ n_months <- strat$params$n_months } else{ warning("n_months not found in strat$params. Defaulting to 12") n_months <- 12 # default look-back } } else { n_months <- 12 # default look-back } # comparison will be made using dates until the reb_date ava_dates <- paste0("/", reb_date) P <- P[ava_dates] R <- R[ava_dates] risk_free <- risk_free[ava_dates] R_e <- R - matrix(rep(risk_free, ncol(R)), nrow = nrow(risk_free), ncol = ncol(R)) R_e <- apply.monthly(R_e, Return.cumulative) # need to calculate moving average of prices at the end of each month # as well as cumulative excess return P_month_ends <- P[endpoints(P, on = "months"), ] R_e <- R_e[seq(from = nrow(R_e) - n_months + 1, to = nrow(R_e))] w <- strat$default_weights if (nrow(P_month_ends) >= n_months){ P_moving <- rollmean(P_month_ends, k = n_months, fill = NA, align= "right") tsmom <- cumprod(1 + R_e) - 1 tsmom <- tsmom[nrow(tsmom)] # indicators to decide exposure ind_moving <- (P_month_ends[nrow(P_month_ends), ] > P_moving[nrow(P_moving), ])*1 ind_tsmom <- (tsmom > 0)*1 ind_tactical <- (ind_moving + ind_tsmom)/2 # tactical RAA rule if (!any(is.na(P_moving[nrow(P_moving), ]))){ w <- w * ind_tactical } } return(w) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/tactical_RAA.R
#' Returns allocations for the Ivy Portfolio on a given date #' #' \code{tactical_TrendFriend} determines asset allocations for a strategy #' according to the strategy in Clare et al (2016, #' <https://doi.org/10.1016/j.jbef.2016.01.002)>. #' #' The allocation strategy proposed in the paper is based on using a a time #' series momentum rule to select assets from a universe, and an allocation #' rule which gives weights proportional to the inverse volatility of the assets. #' The time-series (trend) momentum rule is based on whether the price of the #' asset on the rebalancing date is above its 10-month moving average. If not, #' the corresponding allocation in \code{strat$default_weights} is set to zero ( #' and is therefore allocated to the risk-free asset). #' #' @param strat A list representing an asset allocation strategy. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' @examples #' trend_friend <- asset_allocations$tactical$trend_friend #' reb_date <- as.Date("2022-03-31") #' tactical_TrendFriend(trend_friend, #' reb_date, #' ETFs$Prices[, trend_friend$tickers], #' ETFs$Returns[, trend_friend$tickers] #' ) #' @return A numeric vector of weights after applying the rule. #' @export #' @import xts #' @importFrom xts endpoints #' @importFrom zoo rollmean # Ivy portfolio allocation tactical_TrendFriend <- function(strat, reb_date, P, R, risk_free = NULL){ # comparison will be made using dates until the reb_date # Ivy is supposed to be rebalanced monthly # If user supplies different rebalancing frequency, # will use last month end. ava_dates <- paste0("/", reb_date) P <- P[ava_dates] R <- R[ava_dates] # first need to calculate moving average of prices at the end of each month P_month_ends <- P[endpoints(P, on = "months"), ] # check that user supplied a specific window. # if not, use the default 10 months if (length(strat$params) > 0){ # check that there is element n_months_trend in params if ("n_months_trend" %in% names(strat$params)){ n_months_trend <- strat$params$n_months_trend } else{ warning("n_months_trend not found in strat$params. Defaulting to 12") n_months_trend <- 12 # default look-back for Ivy } } else { n_months_trend <- 12 # default look-back for Ivy } # check that user supplied a specific window for vol calculation # if not, use the default 12 months of daily data if (length(strat$params) > 0){ # check that there is element n_months_trend in params if ("n_months_vol" %in% names(strat$params)){ n_months_vol <- strat$params$n_months_vol } else{ warning("n_months_vol not found in strat$params. Defaulting to 12") n_months_vol <- 12 # default look-back for Ivy } } else { n_months_vol <- 12 # default look-back for Ivy } # step 1: calculation of weights using inverse-volatility weights if (nrow(P_month_ends) >= max(n_months_trend, n_months_vol)){ w <- 1/apply(R[seq(from = nrow(R) - 21*n_months_vol +1, to = nrow(R)), ], 2, stats::sd) w <- w/sum(w) # step 2: trend following rule P_moving <- rollmean(P_month_ends, k = n_months_trend, fill = NA, align= "right") if (!any(is.na(P_moving[nrow(P_moving), ])) & !any(is.na(w))){ assets_tf <- which(P_month_ends[nrow(P_month_ends), ] > P_moving[nrow(P_moving), ]) w[-assets_tf] <- 0 } } return(w) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/tactical_TrendFriend.R
#' Returns allocations for the Ivy Portfolio on a given date #' #' \code{tactical_TrendFriend_RP} determines asset allocations for a strategy #' according to a modified version of the the strategy in Clare et al (2016, #' <https://doi.org/10.1016/j.jbef.2016.01.002)>. The modified version uses full #' risk parity instead of the inverse-volatility rule in the paper. #' #' The allocation strategy proposed in the paper is based on using a a time #' series momentum rule to select assets from a universe, and an allocation #' rule which gives weights proportional to the inverse volatility of the assets. #' The time-series (trend) momentum rule is based on whether the price of the #' asset on the rebalancing date is above its 10-month moving average. If not, #' the corresponding allocation in \code{strat$default_weights} is set to zero ( #' and is therefore allocated to the risk-free asset). #' #' @param strat A list representing an asset allocation strategy. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' @examples #' trend_friend <- asset_allocations$tactical$trend_friend #' reb_date <- as.Date("2022-03-31") #' tactical_TrendFriend_RP(trend_friend, #' reb_date, #' ETFs$Prices[, trend_friend$tickers], #' ETFs$Returns[, trend_friend$tickers] #' ) #' @return A numeric vector of weights after applying the rule. #' @export #' @import xts #' @importFrom xts endpoints #' @importFrom zoo rollmean # Ivy portfolio allocation tactical_TrendFriend_RP <- function(strat, reb_date, P, R, risk_free = NULL){ # comparison will be made using dates until the reb_date # Ivy is supposed to be rebalanced monthly # If user supplies different rebalancing frequency, # will use last month end. ava_dates <- paste0("/", reb_date) P <- P[ava_dates] R <- R[ava_dates] # first need to calculate moving average of prices at the end of each month P_month_ends <- P[endpoints(P, on = "months"), ] # check that user supplied a specific window. # if not, use the default 10 months if (length(strat$params) > 0){ # check that there is element n_months_trend in params if ("n_months_trend" %in% names(strat$params)){ n_months_trend <- strat$params$n_months_trend } else{ warning("n_months_trend not found in strat$params. Defaulting to 12") n_months_trend <- 12 # default look-back for Ivy } } else { n_months_trend <- 12 # default look-back for Ivy } # check that user supplied a specific value for number of months to estimate # covariance matrix. if not, use n_months_trend if (length(strat$params) > 0){ # check that there is element n_days_cov in params if ("n_days_cov" %in% names(strat$params)){ n_months_cov <- strat$params$n_months_cov } else{ warning("n_days_cov not found in strat$params. Defaulting to 12 months") n_months_cov <- 12 # default cov estimations window in months } } else { n_months_cov <- 12 # default cov estimations window in months } # step 1: calculation of weights using full risk parity n_days_cov = n_months_cov*21 if (nrow(R) >= n_days_cov){ R <- R[seq(from = max(1, nrow(R) - n_days_cov + 1), to = nrow(R)), ] n_assets <- length(strat$tickers) # get valid data R <- R[seq(from = which.max(!is.na(rowSums(R))), to = nrow(R)), ] # check if there is at least one year of daily data if (nrow(R) >= 220){ # estimate covariance matrix with available data cov_mat <- covEstimation(R, control = list(type = 'ewma', lambda = 0.98851)) # risk parity optimization w <- riskParityPortfolio(cov_mat, b = rep(1/n_assets, n_assets))$w } else { w <- rep(0, n_assets) } # step 2: trend following rule P_moving <- rollmean(P_month_ends, k = n_months_trend, fill = NA, align= "right") if (!any(is.na(P_moving[nrow(P_moving), ])) & !any(is.na(w))){ assets_tf <- which(P_month_ends[nrow(P_month_ends), ] > P_moving[nrow(P_moving), ]) w[-assets_tf] <- 0 } } return(w) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/tactical_TrendFriend_RP.R
#' Returns allocations for the Ivy Portfolio on a given date #' #' \code{tactical_ivy} determines asset allocations for a strategy according to #' the Ivy Portfolio rule of Faber (2013, ISBN:978-1118008850). #' #' The function compares prices at the end of a month to their moving averages. #' If the price of an asset is below its moving average, the corresponding #' allocation in \code{strat$default_weights} is set to zero. #' @param strat A list representing an asset allocation strategy. #' @param reb_date A date on which the allocation rule is applied. #' @param P An xts object with daily prices of the tickers in strat. #' @param R An xts object with daily returns of the tickers in strat. #' @param risk_free Either an xts object with daily returns of the risk-free #' @examples #' ivy <- asset_allocations$tactical$ivy #' reb_date <- as.Date("2022-03-31") #' tactical_ivy(ivy, reb_date, ETFs$Prices[, ivy$tickers], ETFs$Returns[, ivy$tickers]) #' @return A numeric vector of weights after applying the rule. #' @export #' @import xts #' @importFrom xts endpoints #' @importFrom zoo rollmean # Ivy portfolio allocation tactical_ivy <- function(strat, reb_date, P, R, risk_free = NULL){ # comparison will be made using dates until the reb_date # Ivy is supposed to be rebalanced monthly # If user supplies different rebalancing frequency, # will use last month end. ava_dates <- paste0("/", reb_date) P <- P[ava_dates] R <- R[ava_dates] # first need to calculate moving average of prices at the end of each month P_month_ends <- P[endpoints(P, on = "months"), ] # check that user supplied a specific window. # if not, use the default 10 months if (length(strat$params) > 0){ # check that there is element n_months in params if ("n_months" %in% names(strat$params)){ n_months <- strat$params$n_months } else{ warning("n_months not found in strat$params. Defaulting to 10") n_months <- 10 # default look-back for Ivy } } else { n_months <- 10 # default look-back for Ivy } w <- strat$default_weights if (nrow(P_month_ends) >= n_months){ P_moving <- rollmean(P_month_ends, k = n_months, fill = NA, align= "right") if (!any(is.na(P_moving[nrow(P_moving), ]))){ w[which(P_month_ends[nrow(P_month_ends), ] < P_moving[nrow(P_moving), ])] <- 0 } } return(w) }
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/R/tactical_ivy.R
## ----include=FALSE------------------------------------------------------------ knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.width=7, fig.height=5 ) ## ----setup-------------------------------------------------------------------- library(AssetAllocation) library(PerformanceAnalytics) names(asset_allocations$static) ## ----all weather 1------------------------------------------------------------ asset_allocations$static$all_weather ## ----all weather 2------------------------------------------------------------ # define strategy all_weather <- asset_allocations$static$all_weather # backtest strategy bt_all_weather <- backtest_allocation(all_weather, ETFs$Prices, ETFs$Returns, ETFs$risk_free) ## ----all weather 3------------------------------------------------------------ # plot cumulative returns charts.PerformanceSummary(bt_all_weather$returns, main = all_weather$strat$name) ## ----all weather 4------------------------------------------------------------ # table with performance metrics bt_all_weather$table_performance ## ----all weather 5------------------------------------------------------------ chart.StackedBar(bt_all_weather$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", all_weather$name)) ## ----factors EW setup--------------------------------------------------------- factors_EW <- list(name = "EW Factors", tickers = c("MTUM", "VLUE", "USMV", "QUAL"), default_weights = c(0.25, 0.25, 0.25, 0.25), rebalance_frequency = "month", portfolio_rule_fn = "constant_weights") ## ----factors EW data, message=FALSE, warning=FALSE---------------------------- factor_ETFs_data <- get_data_from_tickers(factors_EW$tickers, starting_date = "2013-08-01") ## ----factors EW bt------------------------------------------------------------ # backtest the strategy bt_factors_EW <- backtest_allocation(factors_EW,factor_ETFs_data$P, factor_ETFs_data$R) # plot returns charts.PerformanceSummary(bt_factors_EW$returns, main = bt_factors_EW$strat$name, ) # table with performance metrics bt_factors_EW$table_performance ## ----tactical setup----------------------------------------------------------- # define strategies ivy <- asset_allocations$tactical$ivy raa <- asset_allocations$tactical$raa dual_mo <- asset_allocations$tactical$dual_mo aaa <- asset_allocations$tactical$aaa # run backtests bt_ivy <- backtest_allocation(ivy, ETFs$Prices,ETFs$Returns, ETFs$risk_free) bt_raa <- backtest_allocation(raa, ETFs$Prices,ETFs$Returns, ETFs$risk_free) bt_dual_mo <- backtest_allocation(dual_mo, ETFs$Prices,ETFs$Returns, ETFs$risk_free) bt_aaa <- backtest_allocation(aaa, ETFs$Prices,ETFs$Returns, ETFs$risk_free) ret_strats <- merge.xts(bt_ivy$returns, bt_raa$returns, bt_dual_mo$returns, bt_aaa$returns) # find index from which all strats are available min_ind <- which.max(!is.na(rowSums(ret_strats))) charts.PerformanceSummary(ret_strats[min_ind:nrow(ret_strats)]) cbind(bt_ivy$table_performance, bt_raa$table_performance, bt_dual_mo$table_performance, bt_aaa$table_performance) ## ----tactical allocations----------------------------------------------------- chart.StackedBar(bt_ivy$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_ivy$strat$name)) chart.StackedBar(bt_raa$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_raa$strat$name)) chart.StackedBar(bt_dual_mo$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_dual_mo$strat$name)) chart.StackedBar(bt_aaa$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_aaa$strat$name)) ## ----mvp, message=FALSE, warning=FALSE---------------------------------------- # Minimum variance portfolio us_mvp <- list(name = "US MinVar", tickers = c("VTI", "BND"), default_weights = c(0.5, 0.5), rebalance_frequency = "month", portfolio_rule_fn = min_variance) bt_us_mvp <- backtest_allocation(us_mvp, ETFs$Prices, ETFs$Returns, ETFs$risk_free) charts.PerformanceSummary(bt_us_mvp$returns) bt_us_mvp$table_performance ## ----mvp plot----------------------------------------------------------------- chart.StackedBar(bt_us_mvp$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", us_mvp$name)) ## ----risk parity setup-------------------------------------------------------- rp <- list(name = "US Risk Parity", tickers = c("TIP", "VTI", "EFA", "EEM", "DBC", "GLD", "IEF"), default_weights = c(0.25, 0.25/3, 0.25/3, 0.25/3, 0.25/2, 0.25/2, 0.25), rebalance_frequency = "month", portfolio_rule_fn = "risk_parity") bt_rp <- backtest_allocation(rp, ETFs$Prices, ETFs$Returns, ETFs$risk_free) ## ----risk parity perf--------------------------------------------------------- charts.PerformanceSummary(bt_rp$returns) bt_rp$table_performance ## ----risk parity compare, message=FALSE, warning=FALSE------------------------ rpar <- get_data_from_tickers("RPAR") rp_compare <- merge.xts(bt_rp$returns, rpar$R, join = "right") rp_compare <- na.omit(rp_compare) cor(rp_compare) ## ----risk parity stats-------------------------------------------------------- table.AnnualizedReturns(rp_compare) ## ----risk parity rescale------------------------------------------------------ rp_rescale_factor <- table.AnnualizedReturns(rp_compare)[2,2]/table.AnnualizedReturns(rp_compare)[2,1] rp_compare[, 1] <- rp_compare[, 1] * rp_rescale_factor charts.PerformanceSummary(rp_compare)
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/inst/doc/AssetAllocation.R
--- title: "Testing Asset Allocation Strategies" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{AssetAllocation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r include=FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.width=7, fig.height=5 ) ``` This vignette illustrates some ways to use the `AssetAllocation` package to backtest different asset allocation strategies. # Rationale for the package There are several alternatives to backtesting systematic/quantitative investment strategies in R. The aim of this package is to provide a simplified way to backtest simple asset allocation rules. That is, with a few lines of code, the user can create and backtest simple static or dynamic (tactical) asset allocation strategies. The package comes with a set of pre-loaded static and tactical strategies, which are in the `asset_allocations` object. However, the user can easily create their own strategies, either by choosing specific allocations in a static asset allocation, or by creating their own custom function that implements a dynamic strategy. # Basic definitions Within the context of the package, an asset allocation strategy is an object of the type `list` which contains the following elements: 1. `name`: an object of type `character` with the name of the strategy 2. `tickers`: a vector of type `character` containing the tickers of the assets to be used. These must either correspond to the column names in the user-provided data to be used to backtest the strategy, or to tickers in Yahoo Finance. 3. `default_weights`: a vector of type `numeric` containing the default weights to invest in each asset in decimals. The sum of the weights should be less than or equal to one. Any amount not invested in the assets is automatically assumed to be invested in the risk-free rate. If the rebalance function is `risk_parity`, this field should contain the risk budgets (in decimals, the sum should equal one). 4. `rebalance_frequency`: an object of type `character` which determines the rebalancing frequency. Options are "days", "weeks", "months", "quarters", and "years". 5. `portfolio_rule_fn`: an object of type `character` containing the name of the rebalancing function that determines allocations for the next period. A valid rebalancing function takes as inputs a strategy, a rebalancing date, an `xts` matrix of prices, an `xts` matrix of returns, and an `xts` vector of returns on a risk-free asset. The function returns a vector of type `numeric` with the same number of elements as the object `strat$tickers`. Some specific cases that come with the package are: - static asset allocation strategies: `"constant_weights"` - Ivy Portfolio: `"tactical_ivy"` - Dual Momentum: `"tactical_DualMomentum"` - Robust Asset Allocation: `"tactical_RAA"` - Adaptive Asset Allocation: `"tactical_AAA"` - Minimum variance: `"min_variance"` - Risk parity: `"risk_parity"` A few comments: - The rebalancing function for tactical asset allocation strategies may contain specific choices regarding calculation of covariance matrices, look-back periods and so on. Consult the help for each rebalancing function for details. - Some rebalancing function require additional elements. For example, the "Dual Momentum" strategy requires the asset classes of each ticker. I've tried to make the package generic enough to allow users to backtest allocation rules with other assets, while at the same time maintaining a (hopefully) simple syntax. # Basic workflow To use the package, the user follows basically two steps: 1. Create a strategy with the elements described above (or choose one of the pre-loaded strategies) 2. Backtest the strategy by creating a new object using the function `backtest_allocation` and some data. The `backtest_allocation` function expects a strategy with the elements described above, as well as an `xts` matrix of prices, an `xts` matrix of returns, and an optional `xts` vector of returns on a risk-free asset. The user can also provide an optional starting date. Importantly, the tickers in the strategy should correspond to valid columns of the price and return objects. # Pre-loaded strategies As defined above, an asset allocation strategy is a portfolio comprised of a set of assets, with portfolios weights determined by a specific rule, rebalanced at some frequency. The package comes with several pre-loaded asset allocation strategies, which generally come from published sources. These are in the object `asset_allocations`. # Data All of the pre-loaded asset allocation strategies are defined in terms of exchange-traded funds, data for which are available in the `ETFs` data set. Users can type `?ETFs` to obtain more information. The purpose of the pre-loaded strategies and data is to demonstrate how to use the package. Users can test their own strategies using their own data, or they can also specify their own assets and have the package retrieve data automatically from Yahoo Finance. # Testing asset allocations We load the package and inspect the available pre-loaded static (i.e., constant-weight) asset allocations: ```{r setup} library(AssetAllocation) library(PerformanceAnalytics) names(asset_allocations$static) ``` ## Example 1: Ray Dalio's All Weather Portfolio One of the pre-loaded static asset allocations is Ray Dalio's All Weather Portfolio. The strategy invests 30% in U.S. stocks (represented by the SPY ETF), 40% in long-term U.S. Treasuries (TLT), 15% in intermediate-term U.S. Treasuries (IEF), 7.5% in gold (GLD), and 7.5% in commodities (DBC). ```{r all weather 1} asset_allocations$static$all_weather ``` To backtest this strategy with the data in the `ETFs` object, we simply do: ```{r all weather 2} # define strategy all_weather <- asset_allocations$static$all_weather # backtest strategy bt_all_weather <- backtest_allocation(all_weather, ETFs$Prices, ETFs$Returns, ETFs$risk_free) ``` The output from `backtest_allocation` contains the daily returns of the strategy in the `$returns` object. A convenient way to visualize the results is by using the `charts.PerformanceSummary` function from the `PerformanceAnalytics` package: ```{r all weather 3} # plot cumulative returns charts.PerformanceSummary(bt_all_weather$returns, main = all_weather$strat$name) ``` A basic set of performance statistics is provided in `$table_performance`: ```{r all weather 4} # table with performance metrics bt_all_weather$table_performance ``` The allocations over time are stored in `$weights`. Of course, for static, buy-and-hold asset allocations, the portfolio weights always remains the same: ```{r all weather 5} chart.StackedBar(bt_all_weather$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", all_weather$name)) ``` As should be clear from the graph above, the weights that are stored in `$weights` are the weights on the rebalancing dates. Even for a static, buy-and-hold strategy, the actual weights between rebalancing dates will fluctuate. The other pre-loaded static asset allocations may be tested analogously. ## Example 2: Creating and testing a custom static asset allocation In this example, we create a custom strategy from scratch. The strategy invests equally in momentum (MTUM), value (VLUE), low volatility (USMV) and quality (QUAL) ETFs. We first set up this custom strategy as follows: ```{r factors EW setup} factors_EW <- list(name = "EW Factors", tickers = c("MTUM", "VLUE", "USMV", "QUAL"), default_weights = c(0.25, 0.25, 0.25, 0.25), rebalance_frequency = "month", portfolio_rule_fn = "constant_weights") ``` Next, we can automatically download data from Yahoo Finance using the `get_data_from_tickers` function: ```{r factors EW data, message=FALSE, warning=FALSE} factor_ETFs_data <- get_data_from_tickers(factors_EW$tickers, starting_date = "2013-08-01") ``` Finally, we backtest the strategy and show the results: ```{r factors EW bt} # backtest the strategy bt_factors_EW <- backtest_allocation(factors_EW,factor_ETFs_data$P, factor_ETFs_data$R) # plot returns charts.PerformanceSummary(bt_factors_EW$returns, main = bt_factors_EW$strat$name, ) # table with performance metrics bt_factors_EW$table_performance ``` ## Example 3: Testing tactical asset allocation strategies In this example, we test and compare four pre-loaded tactical asset allocation strategies: the Ivy Portfolio, the Robust Asset Allocation strategy, the Dual Momentum strategy, and the Adaptive Asset Allocation strategy. A brief description of each strategy (as well as appropriate references) is provided in the corresponding rebalancing functions. ```{r tactical setup} # define strategies ivy <- asset_allocations$tactical$ivy raa <- asset_allocations$tactical$raa dual_mo <- asset_allocations$tactical$dual_mo aaa <- asset_allocations$tactical$aaa # run backtests bt_ivy <- backtest_allocation(ivy, ETFs$Prices,ETFs$Returns, ETFs$risk_free) bt_raa <- backtest_allocation(raa, ETFs$Prices,ETFs$Returns, ETFs$risk_free) bt_dual_mo <- backtest_allocation(dual_mo, ETFs$Prices,ETFs$Returns, ETFs$risk_free) bt_aaa <- backtest_allocation(aaa, ETFs$Prices,ETFs$Returns, ETFs$risk_free) ret_strats <- merge.xts(bt_ivy$returns, bt_raa$returns, bt_dual_mo$returns, bt_aaa$returns) # find index from which all strats are available min_ind <- which.max(!is.na(rowSums(ret_strats))) charts.PerformanceSummary(ret_strats[min_ind:nrow(ret_strats)]) cbind(bt_ivy$table_performance, bt_raa$table_performance, bt_dual_mo$table_performance, bt_aaa$table_performance) ``` Visualizing allocations for all strategies: ```{r tactical allocations} chart.StackedBar(bt_ivy$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_ivy$strat$name)) chart.StackedBar(bt_raa$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_raa$strat$name)) chart.StackedBar(bt_dual_mo$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_dual_mo$strat$name)) chart.StackedBar(bt_aaa$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_aaa$strat$name)) ``` ## Example 4: Minimum variance portfolio In this example, we create a strategy that uses the minimum variance portfolio rule using U.S. stocks and bonds. At each rebalancing date, this strategy uses optimization to determine the weights that yield the minimum variance possible. ```{r mvp, message=FALSE, warning=FALSE} # Minimum variance portfolio us_mvp <- list(name = "US MinVar", tickers = c("VTI", "BND"), default_weights = c(0.5, 0.5), rebalance_frequency = "month", portfolio_rule_fn = min_variance) bt_us_mvp <- backtest_allocation(us_mvp, ETFs$Prices, ETFs$Returns, ETFs$risk_free) charts.PerformanceSummary(bt_us_mvp$returns) bt_us_mvp$table_performance ``` As expected, this strategy would invest heavily in bonds: ```{r mvp plot} chart.StackedBar(bt_us_mvp$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", us_mvp$name)) ``` ## Example 5: Risk Parity Finally, in this example, we test a risk parity portfolio inspired on the [RPAR ETF](https://www.rparetf.com/rpar). As described in the prospectus, this ETF targets the following risk allocations: ![](RPAR.png) Risk parity is implemented in the `risk_parity` rebalancing function, which considers as risk budgets the values in the default_weights element of the strategy.[^1] Our "clone" RPAR strategy uses the following ETFs: [^1]: The `risk_parity` function itself uses functions from the `riskParityPortfolio` package. - TIPS: TIP - Global Equities - US equities: VTI - Non-U.S. Developed Markets Equities: EFA - Emerging Markets Equities: EEM - Commodities - Commodities: DBC - Gold: GLD - U.S. Treasuries: IEF The risk budgets are set to 25% for each of the four categories above, and equally per asset within each category: ```{r risk parity setup} rp <- list(name = "US Risk Parity", tickers = c("TIP", "VTI", "EFA", "EEM", "DBC", "GLD", "IEF"), default_weights = c(0.25, 0.25/3, 0.25/3, 0.25/3, 0.25/2, 0.25/2, 0.25), rebalance_frequency = "month", portfolio_rule_fn = "risk_parity") bt_rp <- backtest_allocation(rp, ETFs$Prices, ETFs$Returns, ETFs$risk_free) ``` As expected, the volatility is quite low, since 50% of the risk budget is allocated to fixed income: ```{r risk parity perf} charts.PerformanceSummary(bt_rp$returns) bt_rp$table_performance ``` We can check the correlation with the actual RPAR ETF: ```{r risk parity compare, message=FALSE, warning=FALSE} rpar <- get_data_from_tickers("RPAR") rp_compare <- merge.xts(bt_rp$returns, rpar$R, join = "right") rp_compare <- na.omit(rp_compare) cor(rp_compare) ``` Despite the high correlation, RPAR has about twice the volatility of our strategy. The reason is that the ETF can rely on leverage through futures: ```{r risk parity stats} table.AnnualizedReturns(rp_compare) ``` We can rescale the clone to have the same ex-post volatility for an apples-to-apples comparison: ```{r risk parity rescale} rp_rescale_factor <- table.AnnualizedReturns(rp_compare)[2,2]/table.AnnualizedReturns(rp_compare)[2,1] rp_compare[, 1] <- rp_compare[, 1] * rp_rescale_factor charts.PerformanceSummary(rp_compare) ``` # Final thoughts - creating your own strategies Creating your own static asset allocation strategies is straightforward. Just add the tickers and set `portfolio_rule_fn = "constant_weights"`. To create your own dynamic/tactical rebalancing functions, you can look at the provided functions to get some ideas. One important thing to keep in mind is to ensure that the rebalancing function only uses data until each rebalancing date, in order to avoid look-ahead bias. Take a look at the tactical rebalancing functions in the package to see one way to achieve this.
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/inst/doc/AssetAllocation.Rmd
--- title: "Testing Asset Allocation Strategies" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{AssetAllocation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r include=FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>", fig.width=7, fig.height=5 ) ``` This vignette illustrates some ways to use the `AssetAllocation` package to backtest different asset allocation strategies. # Rationale for the package There are several alternatives to backtesting systematic/quantitative investment strategies in R. The aim of this package is to provide a simplified way to backtest simple asset allocation rules. That is, with a few lines of code, the user can create and backtest simple static or dynamic (tactical) asset allocation strategies. The package comes with a set of pre-loaded static and tactical strategies, which are in the `asset_allocations` object. However, the user can easily create their own strategies, either by choosing specific allocations in a static asset allocation, or by creating their own custom function that implements a dynamic strategy. # Basic definitions Within the context of the package, an asset allocation strategy is an object of the type `list` which contains the following elements: 1. `name`: an object of type `character` with the name of the strategy 2. `tickers`: a vector of type `character` containing the tickers of the assets to be used. These must either correspond to the column names in the user-provided data to be used to backtest the strategy, or to tickers in Yahoo Finance. 3. `default_weights`: a vector of type `numeric` containing the default weights to invest in each asset in decimals. The sum of the weights should be less than or equal to one. Any amount not invested in the assets is automatically assumed to be invested in the risk-free rate. If the rebalance function is `risk_parity`, this field should contain the risk budgets (in decimals, the sum should equal one). 4. `rebalance_frequency`: an object of type `character` which determines the rebalancing frequency. Options are "days", "weeks", "months", "quarters", and "years". 5. `portfolio_rule_fn`: an object of type `character` containing the name of the rebalancing function that determines allocations for the next period. A valid rebalancing function takes as inputs a strategy, a rebalancing date, an `xts` matrix of prices, an `xts` matrix of returns, and an `xts` vector of returns on a risk-free asset. The function returns a vector of type `numeric` with the same number of elements as the object `strat$tickers`. Some specific cases that come with the package are: - static asset allocation strategies: `"constant_weights"` - Ivy Portfolio: `"tactical_ivy"` - Dual Momentum: `"tactical_DualMomentum"` - Robust Asset Allocation: `"tactical_RAA"` - Adaptive Asset Allocation: `"tactical_AAA"` - Minimum variance: `"min_variance"` - Risk parity: `"risk_parity"` A few comments: - The rebalancing function for tactical asset allocation strategies may contain specific choices regarding calculation of covariance matrices, look-back periods and so on. Consult the help for each rebalancing function for details. - Some rebalancing function require additional elements. For example, the "Dual Momentum" strategy requires the asset classes of each ticker. I've tried to make the package generic enough to allow users to backtest allocation rules with other assets, while at the same time maintaining a (hopefully) simple syntax. # Basic workflow To use the package, the user follows basically two steps: 1. Create a strategy with the elements described above (or choose one of the pre-loaded strategies) 2. Backtest the strategy by creating a new object using the function `backtest_allocation` and some data. The `backtest_allocation` function expects a strategy with the elements described above, as well as an `xts` matrix of prices, an `xts` matrix of returns, and an optional `xts` vector of returns on a risk-free asset. The user can also provide an optional starting date. Importantly, the tickers in the strategy should correspond to valid columns of the price and return objects. # Pre-loaded strategies As defined above, an asset allocation strategy is a portfolio comprised of a set of assets, with portfolios weights determined by a specific rule, rebalanced at some frequency. The package comes with several pre-loaded asset allocation strategies, which generally come from published sources. These are in the object `asset_allocations`. # Data All of the pre-loaded asset allocation strategies are defined in terms of exchange-traded funds, data for which are available in the `ETFs` data set. Users can type `?ETFs` to obtain more information. The purpose of the pre-loaded strategies and data is to demonstrate how to use the package. Users can test their own strategies using their own data, or they can also specify their own assets and have the package retrieve data automatically from Yahoo Finance. # Testing asset allocations We load the package and inspect the available pre-loaded static (i.e., constant-weight) asset allocations: ```{r setup} library(AssetAllocation) library(PerformanceAnalytics) names(asset_allocations$static) ``` ## Example 1: Ray Dalio's All Weather Portfolio One of the pre-loaded static asset allocations is Ray Dalio's All Weather Portfolio. The strategy invests 30% in U.S. stocks (represented by the SPY ETF), 40% in long-term U.S. Treasuries (TLT), 15% in intermediate-term U.S. Treasuries (IEF), 7.5% in gold (GLD), and 7.5% in commodities (DBC). ```{r all weather 1} asset_allocations$static$all_weather ``` To backtest this strategy with the data in the `ETFs` object, we simply do: ```{r all weather 2} # define strategy all_weather <- asset_allocations$static$all_weather # backtest strategy bt_all_weather <- backtest_allocation(all_weather, ETFs$Prices, ETFs$Returns, ETFs$risk_free) ``` The output from `backtest_allocation` contains the daily returns of the strategy in the `$returns` object. A convenient way to visualize the results is by using the `charts.PerformanceSummary` function from the `PerformanceAnalytics` package: ```{r all weather 3} # plot cumulative returns charts.PerformanceSummary(bt_all_weather$returns, main = all_weather$strat$name) ``` A basic set of performance statistics is provided in `$table_performance`: ```{r all weather 4} # table with performance metrics bt_all_weather$table_performance ``` The allocations over time are stored in `$weights`. Of course, for static, buy-and-hold asset allocations, the portfolio weights always remains the same: ```{r all weather 5} chart.StackedBar(bt_all_weather$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", all_weather$name)) ``` As should be clear from the graph above, the weights that are stored in `$weights` are the weights on the rebalancing dates. Even for a static, buy-and-hold strategy, the actual weights between rebalancing dates will fluctuate. The other pre-loaded static asset allocations may be tested analogously. ## Example 2: Creating and testing a custom static asset allocation In this example, we create a custom strategy from scratch. The strategy invests equally in momentum (MTUM), value (VLUE), low volatility (USMV) and quality (QUAL) ETFs. We first set up this custom strategy as follows: ```{r factors EW setup} factors_EW <- list(name = "EW Factors", tickers = c("MTUM", "VLUE", "USMV", "QUAL"), default_weights = c(0.25, 0.25, 0.25, 0.25), rebalance_frequency = "month", portfolio_rule_fn = "constant_weights") ``` Next, we can automatically download data from Yahoo Finance using the `get_data_from_tickers` function: ```{r factors EW data, message=FALSE, warning=FALSE} factor_ETFs_data <- get_data_from_tickers(factors_EW$tickers, starting_date = "2013-08-01") ``` Finally, we backtest the strategy and show the results: ```{r factors EW bt} # backtest the strategy bt_factors_EW <- backtest_allocation(factors_EW,factor_ETFs_data$P, factor_ETFs_data$R) # plot returns charts.PerformanceSummary(bt_factors_EW$returns, main = bt_factors_EW$strat$name, ) # table with performance metrics bt_factors_EW$table_performance ``` ## Example 3: Testing tactical asset allocation strategies In this example, we test and compare four pre-loaded tactical asset allocation strategies: the Ivy Portfolio, the Robust Asset Allocation strategy, the Dual Momentum strategy, and the Adaptive Asset Allocation strategy. A brief description of each strategy (as well as appropriate references) is provided in the corresponding rebalancing functions. ```{r tactical setup} # define strategies ivy <- asset_allocations$tactical$ivy raa <- asset_allocations$tactical$raa dual_mo <- asset_allocations$tactical$dual_mo aaa <- asset_allocations$tactical$aaa # run backtests bt_ivy <- backtest_allocation(ivy, ETFs$Prices,ETFs$Returns, ETFs$risk_free) bt_raa <- backtest_allocation(raa, ETFs$Prices,ETFs$Returns, ETFs$risk_free) bt_dual_mo <- backtest_allocation(dual_mo, ETFs$Prices,ETFs$Returns, ETFs$risk_free) bt_aaa <- backtest_allocation(aaa, ETFs$Prices,ETFs$Returns, ETFs$risk_free) ret_strats <- merge.xts(bt_ivy$returns, bt_raa$returns, bt_dual_mo$returns, bt_aaa$returns) # find index from which all strats are available min_ind <- which.max(!is.na(rowSums(ret_strats))) charts.PerformanceSummary(ret_strats[min_ind:nrow(ret_strats)]) cbind(bt_ivy$table_performance, bt_raa$table_performance, bt_dual_mo$table_performance, bt_aaa$table_performance) ``` Visualizing allocations for all strategies: ```{r tactical allocations} chart.StackedBar(bt_ivy$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_ivy$strat$name)) chart.StackedBar(bt_raa$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_raa$strat$name)) chart.StackedBar(bt_dual_mo$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_dual_mo$strat$name)) chart.StackedBar(bt_aaa$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", bt_aaa$strat$name)) ``` ## Example 4: Minimum variance portfolio In this example, we create a strategy that uses the minimum variance portfolio rule using U.S. stocks and bonds. At each rebalancing date, this strategy uses optimization to determine the weights that yield the minimum variance possible. ```{r mvp, message=FALSE, warning=FALSE} # Minimum variance portfolio us_mvp <- list(name = "US MinVar", tickers = c("VTI", "BND"), default_weights = c(0.5, 0.5), rebalance_frequency = "month", portfolio_rule_fn = min_variance) bt_us_mvp <- backtest_allocation(us_mvp, ETFs$Prices, ETFs$Returns, ETFs$risk_free) charts.PerformanceSummary(bt_us_mvp$returns) bt_us_mvp$table_performance ``` As expected, this strategy would invest heavily in bonds: ```{r mvp plot} chart.StackedBar(bt_us_mvp$rebalance_weights, date.format = "%Y", main = paste0("Allocations, ", us_mvp$name)) ``` ## Example 5: Risk Parity Finally, in this example, we test a risk parity portfolio inspired on the [RPAR ETF](https://www.rparetf.com/rpar). As described in the prospectus, this ETF targets the following risk allocations: ![](RPAR.png) Risk parity is implemented in the `risk_parity` rebalancing function, which considers as risk budgets the values in the default_weights element of the strategy.[^1] Our "clone" RPAR strategy uses the following ETFs: [^1]: The `risk_parity` function itself uses functions from the `riskParityPortfolio` package. - TIPS: TIP - Global Equities - US equities: VTI - Non-U.S. Developed Markets Equities: EFA - Emerging Markets Equities: EEM - Commodities - Commodities: DBC - Gold: GLD - U.S. Treasuries: IEF The risk budgets are set to 25% for each of the four categories above, and equally per asset within each category: ```{r risk parity setup} rp <- list(name = "US Risk Parity", tickers = c("TIP", "VTI", "EFA", "EEM", "DBC", "GLD", "IEF"), default_weights = c(0.25, 0.25/3, 0.25/3, 0.25/3, 0.25/2, 0.25/2, 0.25), rebalance_frequency = "month", portfolio_rule_fn = "risk_parity") bt_rp <- backtest_allocation(rp, ETFs$Prices, ETFs$Returns, ETFs$risk_free) ``` As expected, the volatility is quite low, since 50% of the risk budget is allocated to fixed income: ```{r risk parity perf} charts.PerformanceSummary(bt_rp$returns) bt_rp$table_performance ``` We can check the correlation with the actual RPAR ETF: ```{r risk parity compare, message=FALSE, warning=FALSE} rpar <- get_data_from_tickers("RPAR") rp_compare <- merge.xts(bt_rp$returns, rpar$R, join = "right") rp_compare <- na.omit(rp_compare) cor(rp_compare) ``` Despite the high correlation, RPAR has about twice the volatility of our strategy. The reason is that the ETF can rely on leverage through futures: ```{r risk parity stats} table.AnnualizedReturns(rp_compare) ``` We can rescale the clone to have the same ex-post volatility for an apples-to-apples comparison: ```{r risk parity rescale} rp_rescale_factor <- table.AnnualizedReturns(rp_compare)[2,2]/table.AnnualizedReturns(rp_compare)[2,1] rp_compare[, 1] <- rp_compare[, 1] * rp_rescale_factor charts.PerformanceSummary(rp_compare) ``` # Final thoughts - creating your own strategies Creating your own static asset allocation strategies is straightforward. Just add the tickers and set `portfolio_rule_fn = "constant_weights"`. To create your own dynamic/tactical rebalancing functions, you can look at the provided functions to get some ideas. One important thing to keep in mind is to ensure that the rebalancing function only uses data until each rebalancing date, in order to avoid look-ahead bias. Take a look at the tactical rebalancing functions in the package to see one way to achieve this.
/scratch/gouwar.j/cran-all/cranData/AssetAllocation/vignettes/AssetAllocation.Rmd
analyze_AssetCorr<-function(DTS,N,B=NA,DB=NA,JC=FALSE,CI_Boot=NA,Adjust=0.0001,type="bca", Intra=c("AMM","FMM","CMM","JDP1","JDP2","MLE","AMLE","Beta","Mode"), Inter=c("Copula","Cov","JDP","MLE")){ NS=ncol(DTS) NT=nrow(DTS) sector_names=colnames(DTS) if(length(sector_names)==0){ sector_names=rep("",NS)} #Input checks: if(NS>1){ for(k in 1:(NS)){ d1=DTS[,k] n1=N[,k] if(is.numeric(d1)){d1=d1}else{stop(paste("d in column ", k ," is not numeric", sep = ""))} if(is.numeric(n1)){n1=n1}else{stop(paste("n in column ", k ," is not numeric", sep = ""))} } } #intra correlations will be estimated via MLE (best approach) rho=numeric(NS) for(k in 1:NS) rho[k]=intraMLE(d = DTS[,k],n = N[,k])$Original #Intra-Schaetzer for(k in 1:NS){ temp=intraALL(d = DTS[,k],n = N[,k],B = B,DB = DB,JC = JC,CI_Boot = CI_Boot,type = type,Estimator = Intra,Adjust=Adjust) if(k==1) Estimators_Intra=data.frame(Sektor=k,Sektor_Name=sector_names[k],temp) else Estimators_Intra=rbind(Estimators_Intra,data.frame(Sektor=k,Sektor_Name=sector_names[k],temp)) } #Inter-Schaetzer if(NS>1){ for(k in 1:(NS-1)){ for(l in (k+1):NS){ temp=interALL(d1 = DTS[,k],n1 = N[,k],d2 = DTS[,l],n2 = N[,l],rho1 = rho[k],rho2 = rho[l],B = B,DB = DB,JC = JC,CI_Boot = CI_Boot,Estimator = Inter) if(k==1 && l==2) Estimators_Inter=data.frame(Sektor_1=k,Sektor_Name_1=sector_names[k],Sektor_2=l,Sektor_Name_2=sector_names[l],temp) else Estimators_Inter=rbind(Estimators_Inter,data.frame(Sektor_1=k,Sektor_Name_1=sector_names[k],Sektor_2=l,Sektor_Name_2=sector_names[l],temp)) } } } else{ Estimators_Inter=data.frame()} multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { # Make the panel # ncol: Number of columns of plots # nrow: Number of rows needed, calculated from # of cols layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } } #Data preperation Defaults=NULL Sector=NULL for (t in 1:NS){ Defaults=c(Defaults,DTS[,t]) Name=paste0("S",t) Sector=c(Sector,rep(Name,NT)) } Time=rep(1:NT,NS) Plot<-data.frame(Time,Defaults,Sector) Mean=apply(DTS,2,mean,na.rm=TRUE) DTS_plot=ggplot(Plot,aes(x=Time,y=Defaults,col=Sector))+ geom_line()+theme_bw()+ geom_hline(data = data.frame(Sector=unique(Sector),Mean=Mean),mapping = aes(yintercept=Mean,col=Sector))+ylab("Default Time Series") colnames(Estimators_Intra)<-c("Sector","Sector_Name", "Estimator","Estimate","Type","correction","B","DB","CI_Boot","CI") colnames(Estimators_Inter)<-c("Sector_1","Sector_Name_1","Sector_2","Sector_Name_2", "Estimator","Estimate","Type","correction","B","DB","CI_Boot","CI") if(!is.na(CI_Boot)){ Estimator=Estimators_Intra$Estimator Estimate=Estimators_Intra$Estimate CI=Estimators_Intra$CI correction=Estimators_Intra$correction Intra_plot=ggplot(data = Estimators_Intra,aes(x=Estimator,y=Estimate,shape=CI,col=correction))+theme_bw() +geom_point()+facet_grid(.~Sector)+ylab("Intra Asset Correlation") Estimator=Estimators_Inter$Estimator Estimate=Estimators_Inter$Estimate CI=Estimators_Inter$CI correction=Estimators_Inter$correction Inter_plot=ggplot(data = Estimators_Inter,aes(x=Estimator,y=Estimate,col=correction,shape=CI))+theme_bw()+geom_point()+facet_grid(Sector_1~Sector_2)+ylab("Inter Correlation") }else{ Estimator=Estimators_Intra$Estimator Estimate=Estimators_Intra$Estimate correction=Estimators_Intra$correction Intra_plot=ggplot(data = Estimators_Intra,aes(x=Estimator,y=Estimate,col=correction))+theme_bw()+geom_point()+facet_grid(.~Sector)+ylab("Intra Asset Correlation") Estimator=Estimators_Inter$Estimator Estimate=Estimators_Inter$Estimate correction=Estimators_Inter$correction Inter_plot=ggplot(data = Estimators_Inter,aes(x=Estimator,y=Estimate,col=correction))+theme_bw()+geom_point()+facet_grid(Sector_1~Sector_2)+ylab("Inter Correlation") } multiplot(DTS_plot,Intra_plot,Inter_plot,layout = matrix(c(1,2,3,3),ncol = 1)) return(list(Estimators_Intra=Estimators_Intra,Estimators_Inter=Estimators_Inter)) }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/analyze_AssetCorr.R
defaultTimeseries <- function(N,AC,Years,PD){ Psi=rnorm(Years) PDcond1=pnorm((qnorm(PD)-sqrt(AC)*Psi)/sqrt(1-AC)) size_p=rep(N,Years) D1=rbinom(Years,size_p,PDcond1) return(D1) }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/defaultTimeseries.R
interALL<-function(d1,n1,d2,n2,rho1,rho2,B=NA,DB=NA,JC=FALSE,CI_Boot=NA,plot=FALSE,type="bca",Estimator=c("Copula","Cov","JDP","MLE"),show_progress=FALSE){ if(is.numeric(d1)){d1=d1}else{stop("d1 is not numeric")} if(is.numeric(n1)){n1=n1}else{stop("n1 is not numeric")} if(is.numeric(d2)){d2=d2}else{stop("d2 is not numeric")} if(is.numeric(n2)){n2=n2}else{stop("n2 is not numeric")} if(is.numeric(rho1)){rho1=rho1}else{stop("rho1 is not numeric")} if(is.numeric(rho2)){rho2=rho2}else{stop("rho1 is not numeric")} if(length(d1)==length(n1) && length(d2)==length(n2) && length(d1)==length(d2)){}else{stop("Input vectors do not have the same length")} NT1=length(d1) NT2=length(d2) NE=length(Estimator) #Punktschaetzer mit Jackknife und Standardschaetzer PEST=data.frame(Estimator=Estimator,value=NA,Type=c("PEST"),correction=rep("none",NE),B=NA,DB=NA,CI_Boot=NA,lower_upper_ci=c(""),stringsAsFactors = FALSE) JackEST=data.frame(Estimator=Estimator,value=NA,Type=c("CEST"),correction=rep("Jackknife",NE),B=NA,DB=NA,CI_Boot=NA,lower_upper_ci=c(""),stringsAsFactors = FALSE) if(show_progress){ if(JC) cat("original and jackknife corrected estimators\n") else cat("original estimators\n") pb=txtProgressBar(style = 3) } for(i in 1:NE){ try({ temp= switch (JackEST$Estimator[i], "Copula" = interCopula(df1 = d1/n1,df2=d2/n2,JC = JC), "Cov" = interCov(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,JC = JC), "JDP" = interJDP(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,JC = JC), "MLE" = interMLE(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,JC = JC), NA ) PEST$value[i]=temp$Original if(JC) JackEST$value[i]=temp$Jackknife }) if(show_progress) setTxtProgressBar(pb,value = i/NE) } if(show_progress) cat("\n") Estimators=PEST if(JC) Estimators=rbind(Estimators,JackEST) if(!is.na(B)){ #Punkt-/Intervallschaetzer mit Bootstrap BootEST=data.frame(Estimator=Estimator,value=NA,Type=c("CEST"),correction=rep("Bootstrap",NE),B=B,DB=NA,CI_Boot=NA,lower_upper_ci=c(""),stringsAsFactors = FALSE) LIBootEST=data.frame(Estimator=Estimator,value=NA,Type=c("IEST"),correction=rep("Bootstrap",NE),B=B,DB=NA,CI_Boot=CI_Boot,lower_upper_ci=rep("lower",NE),stringsAsFactors = FALSE) UIBootEST=data.frame(Estimator=Estimator,value=NA,Type=c("IEST"),correction=rep("Bootstrap",NE),B=B,DB=NA,CI_Boot=CI_Boot,lower_upper_ci=rep("upper",NE),stringsAsFactors = FALSE) if(show_progress){ cat("bootstrap corrected estimators\n") pb=txtProgressBar(style = 3) } if(!is.na(CI_Boot)){ for(i in 1:NE){ try({ temp= switch (LIBootEST$Estimator[i], "Copula" = interCopula(df1 = d1/n1,df2=d2/n2,B = B,CI_Boot = CI_Boot), "Cov" = interCov(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,B=B,CI_Boot = CI_Boot), "JDP" = interJDP(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,B=B,CI_Boot = CI_Boot), "MLE" = interMLE(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,B=B,CI = CI_Boot), NA ) BootEST$value[i]=temp$Bootstrap LIBootEST$value[i]=temp$CI_Boot[1] UIBootEST$value[i]=temp$CI_Boot[2] }) if(show_progress) setTxtProgressBar(pb,value = i/NE) } if(show_progress) cat("\n") Estimators=rbind(Estimators,BootEST,LIBootEST,UIBootEST) } else{ for(i in 1:NE){ try({ temp= switch (LIBootEST$Estimator[i], "Copula" = interCopula(df1 = d1/n1,df2=d2/n2,B = B), "Cov" = interCov(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,B=B), "JDP" = interJDP(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,B=B), "MLE" = interMLE(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,B=B), NA ) BootEST$value[i]=temp$Bootstrap }) if(show_progress) setTxtProgressBar(pb,value = i/NE) } if(show_progress) cat("\n") Estimators=rbind(Estimators,BootEST) } } if(all(!is.na(DB))){ #Punktschaetzer mit Double-Bootstrap DBootEST=data.frame(Estimator=Estimator,value=NA,Type=c("CEST"),correction=rep("Double Bootstrap",NE),B=DB[1],DB=DB[2],CI_Boot=NA,lower_upper_ci=c(""),stringsAsFactors = FALSE) if(show_progress){ cat("double bootstrap corrected estimators\n") pb=txtProgressBar(style = 3) } for(i in 1:NE){ try({ DBootEST$value[i]= switch (DBootEST$Estimator[i], "Copula" = interCopula(df1 = d1/n1,df2=d2/n2,DB = DB)$Double_Bootstrap, "Cov" = interCov(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,DB=DB)$Double_Bootstrap, "JDP" = interJDP(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,DB=DB)$Double_Bootstrap, "MLE" = interMLE(d1 = d1,n1 = n1,d2 = d2,n2 = n2,rho1 = rho1,rho2 = rho2,DB=DB)$Double_Bootstrap, NA ) }) if(show_progress) setTxtProgressBar(pb,value = i/NE) } if(show_progress) cat("\n") Estimators=rbind(Estimators,DBootEST) } if(plot==TRUE){ multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { # Make the panel # ncol: Number of columns of plots # nrow: Number of rows needed, calculated from # of cols layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } } DTS=cbind(d1,d2) N=cbind(n1,n2) NT=length(d1) Time=rep(1:NT,2) Defaults=c(DTS[,1],DTS[,2]) Sector=c(rep("S1",NT),rep("S2",NT)) Mean=apply(DTS,2,mean,na.rm=TRUE) DTS_plot=ggplot(data.frame(Time=Time,Defaults=Defaults,Sector=Sector),aes(x=Time,y=Defaults,col=Sector))+ geom_line()+ geom_hline(data = data.frame(Sector=c("S1","S2"),Mean=Mean),mapping = aes(yintercept=Mean,col=Sector))+ylab("Default Time Series") if(!is.na(CI_Boot)){ colnames(Estimators)<-c("Estimator","Estimate","Type","correction","B","DB","CI_Boot","CI") Estimate=Estimators$Estimate CI=Estimators$CI correction=Estimators$correction EST_plot=ggplot(data = Estimators,aes(x="",y=Estimate,shape=CI,col=correction))+theme_bw() +geom_point()+facet_grid(.~Estimator)+theme(axis.title.x = element_blank())+ylab("Inter Correlation") }else{ colnames(Estimators)<-c("Estimator","Estimate","Type","correction","B","DB","CI_Boot","CI") Estimate=Estimators$Estimate correction=Estimators$correction EST_plot=ggplot(data = Estimators,aes(x="",y=Estimate,col=correction))+theme_bw() +geom_point()+facet_grid(.~Estimator)+theme(axis.title.x = element_blank())+ylab("Inter Correlation") } multiplot(DTS_plot,EST_plot,layout = matrix(c(1,2,2,2),ncol = 1)) } return(Estimators) }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/interALL.R
interCMM <- function(d1,n1,d2,n2,rho,l=0, B=0, DB=c(0,0), JC=FALSE,CI_Boot, type="bca", plot=FALSE){ if(is.numeric(d1)){d1=d1}else{stop("d1 is not numeric")} if(is.numeric(n1)){n1=n1}else{stop("n1 is not numeric")} if(is.numeric(d2)){d2=d2}else{stop("d2 is not numeric")} if(is.numeric(n2)){n2=n2}else{stop("n2 is not numeric")} if(is.numeric(rho)){rho=rho}else{stop("rho is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d1)==length(n1) && length(d2)==length(n2) && length(d1)==length(d2)){}else{stop("Input vectors do not have the same length")} def1=d1/n1 def2=d2/n2 pd_mean1=mean(def1) pd_mean2=mean(def2) CI=0 estimate=function(def1,def2,CI){ s=qnorm(pd_mean1) t=qnorm(pd_mean2) Term1<- 1/(2*pi*sqrt(1-rho^2)) Term2<- exp(-((0.5*s^2-rho*s*t+0.5*t^2)/(1-rho^2))) ABL1<- Term1*Term2 Term3_N<-s*t+ rho*(1-s^2-t^2) + s*t*rho^2 -rho^3 Term3_D<- 2*pi*(1-rho^2)^(5/2) ABL2<- (Term3_N/Term3_D)*Term2 TS=def1*def2 T<-length(def1) if(l>0){ tryCatch(AC<-(acf(TS, plot = FALSE, type = "covariance")$acf)[(1:l),1,1], error = function(e) 0) Sum=NULL for (z in 1:l){ Sum[z]<-(1-z/T)*AC[z] } AB=sum(Sum)} else{AB=0} var2=var(TS) Res<- rho +(ABL2/(T*ABL1^3))*(var2/2 + AB) if(CI==0){Res2=Res Est<-list(Original =Res2) }else{Est<-list(Original =Res, CI=c(Res-(qt(1-(1-CI)/2,T-1)*abs(ABL1))/sqrt(T)*sqrt(var2+2*(AB)),Res+(qt(1-(1-CI)/2,T-1)*abs(ABL1))/sqrt(T)*sqrt(var2+2*(AB)))) } } Estimate_Standard<- estimate(def1,def2,CI) DEF<-rbind(def1,def2) if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(def1) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db1<-def1[Ib] Db2<-def2[Ib] try(theta1[i]<-estimate(Db1,Db2,CI)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement Db3<-Db1[Ic] Db4<-Db2[Ic] try( theta2[c,i]<-estimate(Db3,Db4,CI)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(B>0){ N<-length(def1) N<-length(def1) convert=function(d){ G=length(d) y1=list() for (y in 1:G){ y1[[y]]=as.matrix((c(d[y]))) } return(y1) } d1<-convert(def1) d2<-convert(def2) DEF_JC<-cbind(d1,d2) estimate2=function(X,CI){ def1=NULL N=length(X)/2 for(t in 1:N){ def1[t]<-X[[t]] } N1=2*N def2=NULL for(p in N:N1){ def2[p]<-X[[p]] } def2<-def2[-(1:(N))] pd_mean1=mean(def1) pd_mean2=mean(def2) s=qnorm(pd_mean1) t=qnorm(pd_mean2) Term1<- 1/(2*pi*sqrt(1-rho^2)) Term2<- exp(-((0.5*s^2-rho*s*t+0.5*t^2)/(1-rho^2))) ABL1<- Term1*Term2 Term3_N<-s*t+ rho*(1-s^2-t^2) + s*t*rho^2 -rho^3 Term3_D<- 2*pi*(1-rho^2)^(5/2) ABL2<- (Term3_N/Term3_D)*Term2 TS=def1*def2 T<-length(def1) if(l>0){ tryCatch(AC<-(acf(TS, plot = FALSE, type = "covariance")$acf)[(1:l),1,1], error = function(e) 0) Sum=NULL for (z in 1:l){ Sum[z]<-(1-z/T)*AC[z] } AB=sum(Sum)} else{AB=0} var2=var(TS) Res<- rho +(ABL2/(T*ABL1^3))*(var2/2 + AB) if(missing(CI)){Res2=Res}else{Res2=Res+(qt(1-(1-CI)/2,T-1)/abs(ABL1))/sqrt(T)*sqrt(var2+2*(AB))} return(Res2)} BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate2(d),error=function(e)NA) #try(estimate2(d)) } boot1<- boot(data = DEF_JC, statistic = BCA, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} CI=CI_Boot estimate4=function(def1,def2,CI){ s=qnorm(pd_mean1) t=qnorm(pd_mean2) Term1<- 1/(2*pi*sqrt(1-rho^2)) Term2<- exp(-((0.5*s^2-rho*s*t+0.5*t^2)/(1-rho^2))) ABL1<- Term1*Term2 Term3_N<-s*t+ rho*(1-s^2-t^2) + s*t*rho^2 -rho^3 Term3_D<- 2*pi*(1-rho^2)^(5/2) ABL2<- (Term3_N/Term3_D)*Term2 TS=def1*def2 T<-length(def1) if(l>0){ tryCatch(AC<-(acf(TS, plot = FALSE, type = "covariance")$acf)[(1:l),1,1], error = function(e) 0) Sum=NULL for (z in 1:l){ Sum[z]<-(1-z/T)*AC[z] } AB=sum(Sum)} else{AB=0} var2=var(TS) Res<- rho +(ABL2/(T*ABL1^3))*(var2/2 + AB) if(CI==0){Res2=Res Est<-list(Original =Res2) }else{Est<-list(Original =Res, CI=c(Res-(qt(1-(1-CI)/2,T-1)/abs(ABL1))/sqrt(T)*sqrt(var2+2*(AB)),Res+(qt(1-(1-CI)/2,T-1)/abs(ABL1))/sqrt(T)*sqrt(var2+2*(AB)))) } } CI1<- estimate4(def1,def2,CI)$CI Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI=CI1,CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(JC==TRUE){ N<-length(def1) Test=NULL for(v in 1:N){ d1<-def1[-v] d2<-def2[-v] try(Test[v]<-estimate(d1,d2,CI)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/interCMM.R
interCopula <- function(df1,df2,B=0, DB=c(0,0), JC=FALSE,CI,CI_Boot,type="bca", plot=FALSE){ if(is.numeric(df1)){df1=df1}else{stop("df1 is not numeric")} if(is.numeric(df2)){df2=df2}else{stop("df2 is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(df1)==length(df2)){}else{stop("Input vectors do not have the same length")} estimate=function(df1,df2,CI){ if(missing(CI)){ u1=pobs(as.matrix(df1)) u2=pobs(as.matrix(df2)) InterCor<-BiCopEst(u1,u2,family = 1) Res2<-InterCor$par Est<-list(Original =Res2)}else{ u1=pobs(as.matrix(df1)) u2=pobs(as.matrix(df2)) InterCor<-BiCopEst(u1,u2,family = 1,se=T) a=sum(qnorm(u1)^2+qnorm(u2)^2) b=sum(qnorm(u1)*qnorm(u2)) x=InterCor$par Hessian=(a*(3*x^2+1)-2*b*x^3-6*b*x+length(u1)*x^4-length(u1))/(x^2-1)^3 SD=sqrt(1/(-Hessian)) CI=1-(1-CI)/2 Est<-list(Original =InterCor$par, CI=c(InterCor$par-qnorm(CI)*SD,InterCor$par+qnorm(CI)*SD)) } } Estimate_Standard<-estimate(df1,df2,CI) DEF<-rbind(df1,df2) if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(df1) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db1<-df1[Ib] Db2<-df2[Ib] try(theta1[i]<-estimate(Db1,Db2,CI)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement Db3<-Db1[Ic] Db4<-Db2[Ic] try( theta2[c,i]<-estimate(Db3,Db4,CI)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(B>0){ N<-length(df1) convert=function(d){ G=length(d) y1=list() for (y in 1:G){ y1[[y]]=as.matrix((c(d[y]))) } return(y1) } df1<-convert(df1) df2<-convert(df2) DEF_JC<-cbind(df1,df2) estimate2=function(X){ def1=NULL N=length(X)/2 for(t in 1:N){ def1[t]<-X[[t]] } N1=2*N def2=NULL for(p in N:N1){ def2[p]<-X[[p]] } def2<-def2[-(1:(N))] u1=pobs(as.matrix(def1)) u2=pobs(as.matrix(def2)) InterCor<-BiCopEst(u1,u2,family = 1) Res2<-InterCor$par return(Res2) } BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate2(d),error=function(e)NA) #try(estimate2(d)) } boot1<- boot(data = DEF_JC, statistic = BCA, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} CI=CI_Boot estimate_1=function(X,CI){ def1=NULL N=length(X)/2 for(t in 1:N){ def1[t]<-X[[t]] } N1=2*N def2=NULL for(p in N:N1){ def2[p]<-X[[p]] } def2<-def2[-(1:(N))] Res2=list() u1=pobs(as.matrix(def1)) u2=pobs(as.matrix(def2)) InterCor<-BiCopEst(u1,u2,family = 1,se=TRUE) Res2[[1]]<-InterCor$par CI=1-(1-CI)/2 Res2[[2]]<-c(InterCor$par-qnorm(CI)*InterCor$se,InterCor$par+qnorm(CI)*InterCor$se) return(Res2) } CI1<-estimate_1(DEF_JC,CI) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI=CI1[[2]],CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(JC==TRUE){ N<-length(df1) Test=NULL for(v in 1:N){ d1<-df1[-v] d2<-df2[-v] try(Test[v]<-estimate(d1,d2,CI)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/interCopula.R
interCov <- function(d1,n1,d2,n2,rho1,rho2,B=0,DB=c(0,0), JC=FALSE, CI_Boot, type="bca", plot=FALSE){ if(is.numeric(d1)){d1=d1}else{stop("d1 is not numeric")} if(is.numeric(n1)){n1=n1}else{stop("n1 is not numeric")} if(is.numeric(d2)){d2=d2}else{stop("d2 is not numeric")} if(is.numeric(n2)){n2=n2}else{stop("n2 is not numeric")} if(is.numeric(rho1)){rho1=rho1}else{stop("rho1 is not numeric")} if(is.numeric(rho2)){rho2=rho2}else{stop("rho1 is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d1)==length(n1) && length(d2)==length(n2) && length(d1)==length(d2)){}else{stop("Input vectors do not have the same length")} def1<- (d1/n1) def2<- (d2/n2) estimate=function(def1,def2){ cov_est<-cov(def1,def2) probOneDefault1<- mean(def1) probOneDefault2<- mean(def2) Inter_Est=function(R2){ corr=matrix(c(1,R2,R2,1),2) integrand=function(u){ pnorm((qnorm(probOneDefault2)-R2*sqrt(rho1* rho2)*u)/sqrt(1-R2^2*rho1* rho2))*dnorm(u) } E_D=integrate(integrand,-Inf,qnorm(probOneDefault1))$value return(abs(E_D-probOneDefault1*probOneDefault2-cov_est)) } InterCor <-optimise(Inter_Est, interval = c(-1, 1), maximum = FALSE)$minimum Est<-list(Original =InterCor)} Estimate_Standard<-estimate(def1,def2) DEF<-rbind(def1,def2) if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d1) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db1<-def1[Ib] Db2<-def2[Ib] try(theta1[i]<-estimate(Db1,Db2)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement Db3<-Db1[Ic] Db4<-Db2[Ic] try( theta2[c,i]<-estimate(Db3,Db4)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(B>0){ N<-length(n1) convert=function(d){ G=length(d) y1=list() for (y in 1:G){ y1[[y]]=as.matrix((c(d[y]))) } return(y1) } d1<-convert(def1) d2<-convert(def2) DEF_JC<-cbind(d1,d2) estimate2=function(X){ def1=NULL N=length(X)/2 for(t in 1:N){ def1[t]<-X[[t]] } N1=2*N def2=NULL for(p in N:N1){ def2[p]<-X[[p]] } def2<-def2[-(1:(N))] cov_est<-cov(def1,def2) probOneDefault1<- mean(def1) probOneDefault2<- mean(def2) Inter_Est=function(R2){ corr=matrix(c(1,R2,R2,1),2) integrand=function(u){ pnorm((qnorm(probOneDefault2)-R2*sqrt(rho1* rho2)*u)/sqrt(1-R2^2*rho1* rho2))*dnorm(u) } E_D=integrate(integrand,-Inf,qnorm( probOneDefault1))$value return(abs(E_D-probOneDefault1*probOneDefault2-cov_est)) } InterCor <-optimise(Inter_Est, interval = c(-1, 1), maximum = FALSE)$minimum return(InterCor)} BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate2(d),error=function(e)NA) #try(estimate2(d)) } boot1<- boot(data = DEF_JC, statistic = BCA, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(JC==TRUE){ N<-length(n1) Test=NULL for(v in 1:N){ d1<-def1[-v] d2<-def2[-v] try(Test[v]<-estimate(d1,d2)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/interCov.R
interJDP <- function(d1,n1,d2,n2,rho1,rho2,B=0, DB=c(0,0),JC=FALSE,CI_Boot,type="bca", plot=FALSE){ if(is.numeric(d1)){d1=d1}else{stop("d1 is not numeric")} if(is.numeric(n1)){n1=n1}else{stop("n1 is not numeric")} if(is.numeric(d2)){d2=d2}else{stop("d2 is not numeric")} if(is.numeric(n2)){n2=n2}else{stop("n2 is not numeric")} if(is.numeric(rho1)){rho1=rho1}else{stop("rho1 is not numeric")} if(is.numeric(rho2)){rho2=rho2}else{stop("rho1 is not numeric")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d1)==length(n1) && length(d2)==length(n2) && length(d1)==length(d2)){}else{stop("Input vectors do not have the same length")} def1<- (d1/n1) def2<- (d2/n2) estimate=function(def1,def2){ numPeriods <- length(def1) probOneDefault1<- mean(def1) probOneDefault2<- mean(def2) JDP_matching= function(rho){ Prod_ODF=0 temp=NULL Emp_JDP=NULL for (y in 1:numPeriods){ temp[y]<- ((def1[y]* def2[y])) } Prod_ODF=sum(temp) Emp_JDP<- Prod_ODF/numPeriods integrand=function(u){ pnorm((qnorm(probOneDefault2)-rho*sqrt(rho1*rho2)*u)/sqrt(1-rho^2*rho1*rho2))*dnorm(u) } prob2=integrate(integrand,-Inf,qnorm(probOneDefault1))$value return(abs(prob2-Emp_JDP)) } InterCor<- optimise(JDP_matching, interval = c(-1, 1), maximum = FALSE)$minimum Est<-list(Original =InterCor) } Estimate_Standard<- estimate(def1,def2) DEF<-rbind(def1,def2) if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d1) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db1<-def1[Ib] Db2<-def2[Ib] try(theta1[i]<-estimate(Db1,Db2)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement Db3<-Db1[Ic] Db4<-Db2[Ic] try( theta2[c,i]<-estimate(Db3,Db4)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(B>0){ N<-length(def1) N<-length(def1) convert=function(d){ G=length(d) y1=list() for (y in 1:G){ y1[[y]]=as.matrix((c(d[y]))) } return(y1) } d1<-convert(def1) d2<-convert(def2) DEF_JC<-cbind(d1,d2) estimate2=function(X){ def1=NULL N=length(X)/2 for(t in 1:N){ def1[t]<-X[[t]] } N1=2*N def2=NULL for(p in N:N1){ def2[p]<-X[[p]] } def2<-def2[-(1:(N))] numPeriods <- length(def1) probOneDefault1<- mean(def1) probOneDefault2<- mean(def2) JDP_matching= function(rho){ Prod_ODF=0 temp=NULL Emp_JDP=NULL for (y in 1:numPeriods){ temp[y]<- ((def1[y]* def2[y])) } Prod_ODF=sum(temp) Emp_JDP<- Prod_ODF/numPeriods integrand=function(u){ pnorm((qnorm(probOneDefault2)-rho*sqrt(rho1*rho2)*u)/sqrt(1-rho^2*rho1*rho2))*dnorm(u) } prob2=integrate(integrand,-Inf,qnorm(probOneDefault1))$value return(abs(prob2-Emp_JDP)) } InterCor<- optimise(JDP_matching, interval = c(-1, 1), maximum = FALSE)$minimum return(InterCor) } BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate2(d),error=function(e)NA) #try(estimate2(d)) } boot1<- boot(data = DEF_JC, statistic = BCA, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(JC==TRUE){ N<-length(n1) Test=NULL for(v in 1:N){ d1<-def1[-v] d2<-def2[-v] try(Test[v]<-estimate(d1,d2)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/interJDP.R
interMLE <- function(d1,n1,d2,n2,rho1,rho2,B=0, DB=c(0,0), JC=FALSE,CI=-1, plot=FALSE){ Estimate_Bootstrap=NULL Estimate_Jackknife=NULL Estimate_Standard=NULL if(is.numeric(d1)){d1=d1}else{stop("d1 is not numeric")} if(is.numeric(n1)){n1=n1}else{stop("n1 is not numeric")} if(is.numeric(d2)){d2=d2}else{stop("d2 is not numeric")} if(is.numeric(n2)){n2=n2}else{stop("n2 is not numeric")} if(is.numeric(rho1)){rho1=rho1}else{stop("rho1 is not numeric")} if(is.numeric(rho2)){rho2=rho2}else{stop("rho1 is not numeric")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d1)==length(n1) && length(d2)==length(n2) && length(d1)==length(d2)){}else{stop("Input vectors do not have the same length")} def1=rbind(d1,n1) def2=rbind(d2,n2) estimate=function(def1,def2,CI){ d1<-def1[1,] n1<-def1[2,] d2<-def2[1,] n2<-def2[2,] integral=NULL nll=function(rho){ ll=0 PD1=mean(d1/n1) PD2=mean(d2/n2) integral=NULL for(i in 1:length(d1)){ d1i=d1[i] n1i=n1[i] d2i=d2[i] n2i=n2[i] integrand=function(x){ PDcond1=pnorm((qnorm(PD1)-sqrt(rho1)*x[,1])/sqrt(1-rho1)) PDcond2=pnorm((qnorm(PD2)-sqrt(rho2)*x[,2])/sqrt(1-rho2)) as.matrix(dbinom(d1i,n1i,PDcond1)*dbinom(d2i,n2i,PDcond2)*dmvnorm(x,sigma=matrix(c(1,rho,rho,1),2))) } myGrid <- createNIGrid(dim=2, type="GHe", level=45) integral[i]=quadrature(integrand, myGrid) if(is.na(integral[i])){integral[i]=1} ll=ll+log(integral[i]) } # print(-ll) -ll } Res2=list() Res1<- optimise(nll, interval = c(-1, 1), maximum = FALSE)$minimum if(CI!=-1){hessian1<-hessian(nll,Res1) SD<- 1/sqrt(hessian1) CI<- 1-(1-CI)/2 Est<-list(Original =Res1, CI=c(Res1-qnorm(CI)*SD,Res1+qnorm(CI)*SD)) }else{Est<-list(Original =Res1)} } Estimate_Standard<-estimate(def1,def2,CI) E_S<-Estimate_Standard$Original DEF<-rbind(def1,def2) if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d1) Ib<-sample(N,N,replace=TRUE) Db1<-def1[,Ib] Db2<-def2[,Ib] try(theta1[i]<-estimate(Db1,Db2,CI)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) Db3<-Db1[,Ic] Db4<-Db2[,Ic] try( theta2[c,i]<-estimate(Db3,Db4,CI)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(B>0){ N<-length(d1) theta=NULL for(i in 1:B){ Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db<-DEF[,Ib] DEF1<- Db[1:2,] DEF2<- Db[3:4,] theta[i]<-estimate(DEF1,DEF2,CI)$Original } Boot<- mean(theta, na.rm = TRUE) Estimate_Bootstrap<- 2*Estimate_Standard$Original - Boot Estimate_Bootstrap<-list(Original = E_S, Bootstrap=2*Estimate_Standard$Original - Boot,bValues=theta ) if(plot==TRUE){ Dens<-density(theta, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(E_S,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(JC==TRUE){ N<-length(d1) def1=rbind(d1,n1) def2=rbind(d2,n2) N<-length(n1) Test=NULL for(v in 1:N){ d1<-def1[,-v] d2<-def2[,-v] try(Test[v]<-estimate(d1,d2,CI)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/interMLE.R
intraALL<-function(d,n,B=NA,DB=NA,JC=FALSE,CI_Boot=NA,Adjust=0.0001,plot=FALSE,type="bca",Quantile=0.999,Estimator=c("AMM","FMM","CMM","JDP1","JDP2","MLE","AMLE","Beta","Mode"),show_progress=FALSE){ if(is.numeric(Adjust)){Adjust=Adjust}else{stop("Adjust is not numeric")} if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} NT=length(d) NE=length(Estimator) #Punktschaetzer mit Jackknife und Standardschaetzer PEST=data.frame(Estimator=Estimator,value=NA,Type=c("PEST"),correction=rep("none",NE),B=NA,DB=NA,CI_Boot=NA,lower_upper_ci=c(""),stringsAsFactors = FALSE) JackEST=data.frame(Estimator=Estimator,value=NA,Type=c("CEST"),correction=rep("Jackknife",NE),B=NA,DB=NA,CI_Boot=NA,lower_upper_ci=c(""),stringsAsFactors = FALSE) if(show_progress){ if(JC) cat("original and jackknife corrected estimators\n") else cat("original estimators\n") pb=txtProgressBar(style = 3) } for(i in 1:NE){ try({ temp= switch (JackEST$Estimator[i], "AMM" = intraAMM(d = d,n = n,JC = JC), "FMM" = intraFMM(d = d,n = n,JC = JC), "CMM" = intraCMM(d = d,n = n,JC = JC), "JDP1" = intraJDP1(d = d,n = n,JC = JC), "JDP2" = intraJDP2(d = d,n = n,JC = JC), "MLE" = intraMLE(d = d,n = n,JC = JC), "AMLE" = intraAMLE(d = d,n = n,JC = JC,Adjust = Adjust), "Beta" = intraBeta(d = d,n = n,JC = JC,Quantile=Quantile), "Mode" = intraMode(d = d,n = n,JC = JC), NA ) PEST$value[i]=temp$Original if(JC) JackEST$value[i]=temp$Jackknife }) if(show_progress) setTxtProgressBar(pb,value = i/NE) } if(show_progress) cat("\n") Estimators=PEST if(JC) Estimators=rbind(Estimators,JackEST) if(!is.na(B)){ #Punkt-/Intervallschaetzer mit Bootstrap BootEST=data.frame(Estimator=Estimator,value=NA,Type=c("CEST"),correction=rep("Bootstrap",NE),B=B,DB=NA,CI_Boot=NA,lower_upper_ci=c(""),stringsAsFactors = FALSE) LIBootEST=data.frame(Estimator=Estimator,value=NA,Type=c("IEST"),correction=rep("Bootstrap",NE),B=B,DB=NA,CI_Boot=CI_Boot,lower_upper_ci=rep("lower",NE),stringsAsFactors = FALSE) UIBootEST=data.frame(Estimator=Estimator,value=NA,Type=c("IEST"),correction=rep("Bootstrap",NE),B=B,DB=NA,CI_Boot=CI_Boot,lower_upper_ci=rep("upper",NE),stringsAsFactors = FALSE) if(show_progress){ cat("bootstrap corrected estimators\n") pb=txtProgressBar(style = 3) } if(!is.na(CI_Boot)){ for(i in 1:NE){ try({ temp= switch (LIBootEST$Estimator[i], "AMM" = intraAMM(d = d,n = n,B = B,CI_Boot = CI_Boot,type = type), "FMM" = intraFMM(d = d,n = n,B = B,CI_Boot = CI_Boot,type = type), "CMM" = intraCMM(d = d,n = n,B = B,CI_Boot = CI_Boot,type = type), "JDP1" = intraJDP1(d = d,n = n,B = B,CI_Boot = CI_Boot,type = type), "JDP2" = intraJDP2(d = d,n = n,B = B,CI_Boot = CI_Boot,type = type), "MLE" = intraMLE(d = d,n = n,B = B,CI_Boot = CI_Boot,type = type), "AMLE" = intraAMLE(d = d,n = n,B = B,CI_Boot = CI_Boot,type = type,Adjust = Adjust), "Beta" = intraBeta(d = d,n = n,B = B,CI_Boot = CI_Boot,type = type,Quantile=Quantile), "Mode" = intraMode(d = d,n = n,B = B,CI_Boot = CI_Boot,type = type), NA ) BootEST$value[i]=temp$Bootstrap LIBootEST$value[i]=temp$CI_Boot[1] UIBootEST$value[i]=temp$CI_Boot[2] }) if(show_progress) setTxtProgressBar(pb,value = i/NE) } if(show_progress) cat("\n") Estimators=rbind(Estimators,BootEST,LIBootEST,UIBootEST) } else{ for(i in 1:NE){ try({ temp= switch (LIBootEST$Estimator[i], "AMM" = intraAMM(d = d,n = n,B = B), "FMM" = intraFMM(d = d,n = n,B = B), "CMM" = intraCMM(d = d,n = n,B = B), "JDP1" = intraJDP1(d = d,n = n,B = B), "JDP2" = intraJDP2(d = d,n = n,B = B), "MLE" = intraMLE(d = d,n = n,B = B), "AMLE" = intraAMLE(d = d,n = n,B = B,Adjust = Adjust), "Beta" = intraBeta(d = d,n = n,B = B,Quantile=Quantile), "Mode" = intraMode(d = d,n = n,B = B), NA ) BootEST$value[i]=temp$Bootstrap }) if(show_progress) setTxtProgressBar(pb,value = i/NE) } if(show_progress) cat("\n") Estimators=rbind(Estimators,BootEST) } } if(all(!is.na(DB))){ #Punktschaetzer mit Double-Bootstrap DBootEST=data.frame(Estimator=Estimator,value=NA,Type=c("CEST"),correction=rep("Double Bootstrap",NE),B=DB[1],DB=DB[2],CI_Boot=NA,lower_upper_ci=c(""),stringsAsFactors = FALSE) if(show_progress){ cat("double bootstrap corrected estimators\n") pb=txtProgressBar(style = 3) } for(i in 1:NE){ try({ DBootEST$value[i]= switch (DBootEST$Estimator[i], "AMM" = intraAMM(d = d,n = n,DB = DB)$Double_Bootstrap, "FMM" = intraFMM(d = d,n = n,DB = DB)$Double_Bootstrap, "CMM" = intraCMM(d = d,n = n,DB = DB)$Double_Bootstrap, "JDP1" = intraJDP1(d = d,n = n,DB = DB)$Double_Bootstrap, "JDP2" = intraJDP2(d = d,n = n,DB = DB)$Double_Bootstrap, "MLE" = intraMLE(d = d,n = n,DB = DB)$Double_Bootstrap, "AMLE" = intraAMLE(d = d,n = n,DB = DB,Adjust = Adjust)$Double_Bootstrap, "Beta" = intraBeta(d = d,n = n,DB = DB,Quantile=Quantile)$Double_Bootstrap, "Mode" = intraMode(d = d,n = n,DB = DB)$Double_Bootstrap, NA ) }) if(show_progress) setTxtProgressBar(pb,value = i/NE) } if(show_progress) cat("\n") Estimators=rbind(Estimators,DBootEST) } if(plot==TRUE){ multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) { # Make a list from the ... arguments and plotlist plots <- c(list(...), plotlist) numPlots = length(plots) # If layout is NULL, then use 'cols' to determine layout if (is.null(layout)) { # Make the panel # ncol: Number of columns of plots # nrow: Number of rows needed, calculated from # of cols layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols)) } if (numPlots==1) { print(plots[[1]]) } else { # Set up the page grid.newpage() pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout)))) # Make each plot, in the correct location for (i in 1:numPlots) { # Get the i,j matrix positions of the regions that contain this subplot matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE)) print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row, layout.pos.col = matchidx$col)) } } } Plot=data.frame(1:length(d),d) colnames(Plot)<-c("Time","Defaults") Time<-Plot$Time Defaults<-Plot$Defaults DTS_plot=ggplot(Plot,aes(x=Time,y=Defaults))+theme_bw() +geom_line()+ggtitle("AssetCorr- An Overview" )+theme(plot.title = element_text(hjust = 0.5,size=15, face="bold.italic")) if(!is.na(CI_Boot)){ colnames(Estimators)<-c("Estimator","Estimate","Type","correction","B","DB","CI_Boot","CI") Estimate=Estimators$Estimate CI=Estimators$CI correction=Estimators$correction EST_plot=ggplot(data = Estimators,aes(x="",y=Estimate,shape=CI,col=correction))+theme_bw() +geom_point()+facet_grid(.~Estimator)+theme(axis.title.x = element_blank()) }else{ colnames(Estimators)<-c("Estimator","Estimate","Type","correction","B","DB","CI_Boot","CI") Estimate=Estimators$Estimate correction=Estimators$correction EST_plot=ggplot(data = Estimators,aes(x="",y=Estimate,col=correction))+theme_bw() +geom_point()+facet_grid(.~Estimator)+theme(axis.title.x = element_blank()) } multiplot(DTS_plot,EST_plot) } return(Estimators) }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraALL.R
intraAMLE <- function(d,n,B=0,DB=c(0,0),JC=FALSE,Adjust=0,CI_1,CI_2, CI_Boot,VaR=0.99, VaR_CI=0.95, ES=0.975, ES_CI= 0.95,type="bca", plot=FALSE){ if(is.numeric(Adjust)){Adjust=Adjust}else{stop("Adjust is not numeric")} if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} qVasicek=function(p,rho,pd){ pnorm((qnorm(pd)+sqrt(rho)*qnorm(p))/(sqrt(1-rho))) } d1=d/n if(missing(Adjust)){d1=d1}else{d1[d1== 0] <- Adjust} if(missing(Adjust)){d1=d1}else{d1[d1== 1] <- (1-Adjust)} if(missing(CI_1)){CI_1=-1} MEAN=1/length(d1)*sum(qnorm(d1)) p_d1=1/length(d1)*sum(qnorm(d1)^2-MEAN^2) PD=pnorm(MEAN/sqrt(1+p_d1)) rho_ml=p_d1/(1+p_d1) alpha_VaR=(1-VaR) n_obs=length(d1) delta=qnorm(d1) s1=sum(delta) s2=sum(delta^2) g1=dnorm((qnorm(PD)+sqrt(rho_ml)*qnorm(1-alpha_VaR))/sqrt(1-rho_ml))*(qnorm(PD)/(2*(1-rho_ml)^(3/2))+qnorm(1-alpha_VaR)/(2*(1-rho_ml)^(3/2)*sqrt(rho_ml))) g2=dnorm((qnorm(PD)+sqrt(rho_ml)*qnorm(1-alpha_VaR))/sqrt(1-rho_ml))/(dnorm(qnorm(PD))*sqrt(1-rho_ml)) grad=c(g1,g2) j11=-(s1^4+2*n_obs^2*(s2+n_obs)^2-s1^2*n_obs*(3*s2+4*n_obs))/(4*n_obs^3)*(1+p_d1)^2/p_d1^2 j12=-n_obs/2*mean(qnorm(d1))/dnorm(qnorm(PD))*(1+p_d1)^(3/2)/p_d1 j21=j12 j22=-n_obs/dnorm(qnorm(PD))^2*(1+p_d1)/(p_d1) FI=-matrix(c(j11,j21,j12,j22),2,2) VarTrafo=t(grad)%*%solve(FI)%*%t(t(grad)) VaR_mle=qVasicek(1-alpha_VaR,rho_ml,PD) alpha_CI= ((1-VaR_CI)/2) ci_lower=VaR_mle-qnorm(1-alpha_CI)*sqrt(VarTrafo) ci_upper=VaR_mle+qnorm(1-alpha_CI)*sqrt(VarTrafo) g1es=dnorm(qnorm(alpha_VaR))/(alpha_VaR*2*sqrt(rho_ml)*sqrt(1-rho_ml))*dnorm((qnorm(PD)-sqrt(rho_ml)*qnorm(alpha_VaR))/(sqrt(1-rho_ml))) g2es=pnorm((qnorm(alpha_VaR)-sqrt(rho_ml)*qnorm(PD))/(sqrt(1-rho_ml)))/alpha_VaR grad_es=c(g1es,g2es) ES_mle=pmvnorm(upper=c(qnorm(PD),-qnorm(1-alpha_VaR)),corr=matrix(c(1,sqrt(rho_ml),sqrt(rho_ml),1),2,2))[1]/(alpha_VaR) VarTrafoES=t(grad_es)%*%solve(FI)%*%t(t(grad_es)) alpha_CI=1-ES_CI ci_ES_lower=ES_mle-qnorm(1-alpha_CI/2)*sqrt(VarTrafoES) ci_ES_upper=ES_mle+qnorm(1-alpha_CI/2)*sqrt(VarTrafoES) if(CI_1>0){ estimate1=function(X,CI_1){ if(missing(CI_1)){Est<-list(Original =p_d1/(1+p_d1))}else{ CI_1=1-(1-CI_1)/2 s1=sum(qnorm(X)) s2=sum(qnorm(X)^2) s3=(s1^4+2*length(X)^2*(s2+length(X))^2-s1^2*length(X)*(3*s2+4*length(X))) z=3/2 SD_PD=sqrt((s3*p_d1)/(2*length(X)^5))*(dnorm(qnorm(PD))/(1+p_d1)^z) Est<-list(PD=PD, PD_CI_1=c(PD-qnorm(CI_1)*SD_PD,PD+qnorm(CI_1)*SD_PD), Original =p_d1/(1+p_d1), CI_1=c(p_d1/(1+p_d1)-qnorm(CI_1)*(sqrt(2/length(X))*((p_d1)/(1+p_d1)^2)),p_d1/(1+p_d1)+qnorm(CI_1)*(sqrt(2/length(X))*((p_d1)/(1+p_d1)^2))) , VaR=VaR_mle, VaR_CI=c(ci_lower,ci_upper),ES=ES_mle, ES_CI=c(ci_ES_lower,ci_ES_upper)) } return(Est) } Estimate_Standard<- estimate1(d1,CI_1) } if(missing(CI_2)){CI_2=-1} if(CI_2>0){ estimate2=function(X,CI_2){ MEAN=1/length(X)*sum(qnorm(X)) p_d1=1/length(X)*sum(qnorm(X)^2-MEAN^2) PD=pnorm(MEAN/sqrt(1+p_d1)) Res2=list() Res2[[1]]=p_d1/(1+p_d1) Alpha=1-CI_2 SD1=(length(X)*Res2[[1]])/((length(X)*Res2[[1]]+ qchisq((1-Alpha/2),length(X)-1)*(1-Res2[[1]]))) SD2=(length(X)*Res2[[1]])/((length(X)*Res2[[1]]+ qchisq(Alpha/2,length(X)-1)*(1-Res2[[1]]))) Res2[[2]]=c(SD1,SD2) Res=Res2 Est<-list(PD=PD,Original =Res2[[1]], CI_2=Res2[[2]], VaR=VaR_mle, VaR_CI=c(ci_lower,ci_upper),ES=ES_mle, ES_CI=c(ci_ES_lower,ci_ES_upper)) return(Est) } Estimate_Standard<- estimate2(d1,CI_2) } if(CI_1==-1&CI_2==-1){ estimate3=function(X){ MEAN=1/length(X)*sum(qnorm(X)) p_d1=1/length(X)*sum(qnorm(X)^2-MEAN^2) PD=pnorm(MEAN/sqrt(1+p_d1)) Est<-list(PD=PD,Original =p_d1/(1+p_d1), VaR=VaR_mle, VaR_CI=c(ci_lower,ci_upper),ES=ES_mle, ES_CI=c(ci_ES_lower,ci_ES_upper)) } Estimate_Standard<- estimate3(d1) } if(DB[1]!=0){ IN=DB[1] OUT=DB[2] MEAN=1/length(d1)*sum(qnorm(d1)) p_d1=1/length(d1)*sum(qnorm(d1)^2-MEAN^2) PD=pnorm(MEAN/sqrt(1+p_d1)) theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d1) Ib<-sample(N,N,replace=TRUE) Db<-d1[Ib] try(theta1[i]<-estimate3(Db)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) Dc<-Db[Ic] try( theta2[c,i]<-estimate3(Dc)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(PD=PD,Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, VaR=VaR_mle, VaR_CI=c(ci_lower,ci_upper),ES=ES_mle, ES_CI=c(ci_ES_lower,ci_ES_upper), oValues=theta1, iValues=theta2) } if(B>0){ estimate4=function(X){ MEAN=1/length(X)*sum(qnorm(X)) p_d1=1/length(X)*sum(qnorm(X)^2-MEAN^2) Res2=p_d1/(1+p_d1) } MEAN=1/length(d1)*sum(qnorm(d1)) p_d1=1/length(d1)*sum(qnorm(d1)^2-MEAN^2) PD=pnorm(MEAN/sqrt(1+p_d1)) N<-length(n) D<- matrix(ncol=1, nrow=N,d1) BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate4(d),error=function(e)NA) } boot1<- boot(data = D, statistic = BCA, R=B) Estimate_Bootstrap<-list(PD=PD,Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE), VaR=VaR_mle, VaR_CI=c(ci_lower,ci_upper),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} N<-length(d1) MEAN=1/length(d1)*sum(qnorm(d1)) p_d1=1/length(d1)*sum(qnorm(d1)^2-MEAN^2) PD=pnorm(MEAN/sqrt(1+p_d1)) CI_1=CI_2=1-(1-CI_Boot)/2 CI1=c(p_d1/(1+p_d1)-qnorm(CI_1)*(sqrt(2/N)*((p_d1)/(1+p_d1)^2)),p_d1/(1+p_d1)+qnorm(CI_1)*(sqrt(2/N)*((p_d1)/(1+p_d1)^2))) s1=sum(qnorm(d1)) s2=sum(qnorm(d1)^2) s3=(s1^4+2*length(d1)^2*(s2+length(d1))^2-s1^2*length(d1)*(3*s2+4*length(d1))) z=3/2 SD_PD=sqrt((s3*p_d1)/(2*length(d1)^5))*(dnorm(qnorm(PD))/(1+p_d1)^z) Alpha=1-CI_2 SD1=(N*boot1$t0)/(N*boot1$t0+ qchisq((1-Alpha/2),N-1)*(1-boot1$t0)) SD2=(N*boot1$t0)/(N*boot1$t0+ qchisq((Alpha/2),N-1)*(1-boot1$t0)) CI2=c(SD1,SD2) CI_1 Estimate_Bootstrap<-list(PD=PD,PD_CI_1=c(PD-qnorm(CI_1)*SD_PD,PD+qnorm(CI_1)*SD_PD),Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI_1=CI1, CI_2=CI2,CI_Boot=Conf, VaR=VaR_mle, VaR_CI=c(ci_lower,ci_upper),ES=ES_mle, ES_CI=c(ci_ES_lower,ci_ES_upper),bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(JC==TRUE){ N=length(d1) Test=NULL for(v in 1:N){ d2<-d1[-v] try(Test[v]<-estimate3(d2)$Original) } MEAN=1/length(d1)*sum(qnorm(d1)) p_d1=1/length(d1)*sum(qnorm(d1)^2-MEAN^2) PD=pnorm(MEAN/sqrt(1+p_d1)) Estimate_Jackknife<-list(PD=PD,Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test)), VaR=VaR_mle, VaR_CI=c(ci_lower,ci_upper),ES=ES_mle, ES_CI=c(ci_ES_lower,ci_ES_upper)) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraAMLE.R
intraAMM <- function(d,n,B=0,DB=c(0,0),JC=FALSE, CI_Boot, type="bca", plot=FALSE){ if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} d1=d/n estimate=function(X){ pd_mean=mean(X) var_dat=var(X) foo=function(rho){ corr=matrix(c(1,rho,rho,1),2) prob=pmvnorm(lower=c(-Inf,-Inf),upper=c(qnorm(pd_mean),qnorm(pd_mean)),mean=c(0,0),corr=corr) return(prob-pd_mean^2-var_dat) } Est<-list(Original = uniroot(foo,c(0,1))$root ) } Estimate_Standard<- estimate(d1) if(B>0){ N<-length(n) D<- matrix(ncol=1, nrow=N,d1) BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate(d)$Original,error=function(e)NA) } boot1<- boot(data = D, statistic = BCA, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d1) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db<-d1[Ib] try(theta1[i]<-estimate(Db)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement Dc<-Db[Ic] try( theta2[c,i]<-estimate(Dc)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(JC==TRUE){ N=length(d1) Test=NULL for(v in 1:N){ d2<-d1[-v] try(Test[v]<-estimate(d2)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraAMM.R
intraBeta=function(d,n,Quantile=0.999,B=0,DB=c(0,0),JC=FALSE,CI_Boot,type="bca", plot=FALSE){ if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} d1=d/n estimate=function(d1){ PD=mean(d1) alpha=PD*((PD*(1-PD))/var(d1)-1) beta=alpha/PD*(1-PD) foo=function(rho){ Var_Beta=qbeta(Quantile, alpha, beta) Var_Vasicek=pnorm((qnorm(PD)+sqrt(rho)*qnorm(Quantile))/sqrt(1-rho)) return(abs(Var_Beta-Var_Vasicek)) } Est<-list(Original =optimise(foo, interval = c(0, 1), maximum = FALSE)$minimum) } Estimate_Standard<-estimate(d1) if(B>0){ N<-length(n) D<- matrix(ncol=1, nrow=N,d1) BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate(d)$Original,error=function(e)NA) } boot1<- boot(data = D, statistic = BCA, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d1) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db<-d1[Ib] try(theta1[i]<-estimate(Db)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement Dc<-Db[Ic] try( theta2[c,i]<-estimate(Dc)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(JC==TRUE){ N=length(d1) Test=NULL for(v in 1:N){ d2<-d1[-v] try(Test[v]<-estimate(d2)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraBeta.R
intraCMM <- function(d,n,l=0, B=0, DB=c(0,0), JC=FALSE,CI_Boot,type="bca", plot=FALSE){ if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} d1=d/n CI=0 estimate=function(X,CI){ if(CI==0){ pd_mean=1/length(X)*sum(X) var_dat= 1/length(X)*sum((X^2-X/n)) foo=function(rho){ corr=matrix(c(1,rho,rho,1),2) prob=pmvnorm(lower=c(-Inf,-Inf),upper=c(qnorm(pd_mean),qnorm(pd_mean)),mean=c(0,0),corr=corr) return(prob-var_dat) } Res<-uniroot(foo,c(0,1))$root s=qnorm(pd_mean) ABL1<- 1/(2*pi*sqrt(1-Res^2))*exp(-(s^2/(1+Res))) ABL2<- ((s^2+ Res*(1-2*s^2) + s^2*Res^2 -Res^3)/(2*pi*(1-Res^2)^(5/2)))*exp(-(s^2/(1+Res))) Time<-length(X) if(l>0){ tryCatch(AC<-(acf(X^2, plot = FALSE, type = "covariance")$acf)[(1:l),1,1], error = function(e) 0) Sum=NULL for (z in 1:l){ Sum[z]<-(1-z/Time)*AC[z] } AB=sum(Sum)} else{AB=0} nX=X^2 nM=1/length(X)*sum(nX) var2=var(nX) Res2=(Res +(ABL2/(Time*ABL1^3))*(var2/2 + AB)) Est<-list(Original =(Res +(ABL2/(Time*ABL1^3))*(var2/2 + AB))) }else{ pd_mean=1/length(X)*sum(X) var_dat= 1/length(X)*sum((X^2-X/n)) foo=function(rho){ corr=matrix(c(1,rho,rho,1),2) prob=pmvnorm(lower=c(-Inf,-Inf),upper=c(qnorm(pd_mean),qnorm(pd_mean)),mean=c(0,0),corr=corr) return(prob-var_dat) } Res<-uniroot(foo,c(0,1))$root s=qnorm(pd_mean) ABL1<- 1/(2*pi*sqrt(1-Res^2))*exp(-(s^2/(1+Res))) ABL2<- ((s^2+ Res*(1-2*s^2) + s^2*Res^2 -Res^3)/(2*pi*(1-Res^2)^(5/2)))*exp(-(s^2/(1+Res))) Time<-length(X) if(l>0){ tryCatch(AC<-(acf(X^2, plot = FALSE, type = "covariance")$acf)[(1:l),1,1], error = function(e) 0) Sum=NULL for (z in 1:l){ Sum[z]<-(1-z/Time)*AC[z] } AB=sum(Sum)} else{AB=0} nX=X^2 nM=1/length(X)*sum(nX) var2=var(nX) Res2=(Res +(ABL2/(Time*ABL1^3))*(var2/2 + AB)) Est<-list(Original =Res2, CI=c(Res2-(qt(1-(1-CI)/2,Time-1)*abs(1/ABL1))/sqrt(Time)*sqrt(var2+2*(AB)),Res2+(qt(1-(1-CI)/2,Time-1)*abs(1/ABL1))/sqrt(Time)*sqrt(var2+2*(AB)))) } } Estimate_Standard<- estimate(d1,CI) if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d1) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db<-d1[Ib] try(theta1[i]<-estimate(Db,CI)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement Dc<-Db[Ic] try( theta2[c,i]<-estimate(Dc,CI)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(B>0){ N<-length(n) D<- matrix(ncol=1, nrow=N,d1) BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate(d,CI)$Original,error=function(e)NA) #try(estimate(d)) } boot1<- boot(data = D, statistic = BCA, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} CI=CI_Boot pd_mean=mean(d1) var_dat= 1/length(d1)*sum((d1^2-d1/n)) foo=function(rho){ corr=matrix(c(1,rho,rho,1),2) prob=pmvnorm(lower=c(-Inf,-Inf),upper=c(qnorm(pd_mean),qnorm(pd_mean)),mean=c(0,0),corr=corr) return(prob-var_dat) } Res<-uniroot(foo,c(0,1))$root s=qnorm(pd_mean) ABL1<- 1/(2*pi*sqrt(1-Res^2))*exp(-(s^2/(1+Res))) ABL2<- ((s^2+ Res*(1-2*s^2) + s^2*Res^2 -Res^3)/(2*pi*(1-Res^2)^(5/2)))*exp(-(s^2/(1+Res))) nX=d1^2 nM=1/length(d1^2)*sum(d1^2) var2=1/length(d1)*sum(nX^2-nM^2) if(l>0){ tryCatch(AC<-(acf(d1^2, plot = FALSE, type = "covariance")$acf)[(1:l),1,1], error = function(e) 0) Sum=NULL for (z in 1:l){ Sum[z]<-(1-z/N)*AC[z] } AB=sum(Sum)} else{AB=0} Res2=(Res +(ABL2/(N*ABL1^3))*(var2/2 + AB)) CI=c(Res2-(qt(1-(1-CI)/2,N-1)/abs(ABL1))/sqrt(N)*sqrt(var2+2*(AB)),Res2+(qt(1-(1-CI)/2,N-1)/abs(ABL1))/sqrt(N)*sqrt(var2+2*(AB))) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI=CI,CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(JC==TRUE){ N=length(d1) Test=NULL for(v in 1:N){ d2<-d1[-v] try(Test[v]<-estimate(d2,CI)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraCMM.R
intraFMM <- function(d,n,B=0,DB=c(0,0), JC=FALSE,CI_Boot, type="bca", plot=FALSE){ if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} d1=d/n estimate=function(X){ var_dat=var(X) pd_mean=mean(X) foo=function(rho){ corr=matrix(c(1,rho,rho,1),2) prob=pmvnorm(lower=c(-Inf,-Inf),upper=c(qnorm(pd_mean),qnorm(pd_mean)),mean=c(0,0),corr=corr) return((prob- pd_mean^2)+1/length(d1)*sum(1/n)*(pd_mean-prob)-var_dat) } Est<-list(Original =uniroot(foo,c(0,1))$root) } Estimate_Standard<- estimate(d1) if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d1) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db<-d1[Ib] try(theta1[i]<-estimate(Db)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement Dc<-Db[Ic] try( theta2[c,i]<-estimate(Dc)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(B>0){ N<-length(n) D<- matrix(ncol=1, nrow=N,d1) BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate(d)$Original,error=function(e)NA) } boot1<- boot(data = D, statistic = BCA, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(JC==TRUE){ N=length(d1) Test=NULL for(v in 1:N){ d2<-d1[-v] try(Test[v]<-estimate(d2)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraFMM.R
intraJDP1 <- function(d, n, B=0,DB=c(0,0), JC=FALSE, CI_Boot, type="bca", plot=FALSE){ if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} estimate=function(d,n){ estimateDefaultIntracorr <- function(pOneDefault, pTwoDefaults)# Equation 7 in Kalkbrenner { intracorr <- (pTwoDefaults - pOneDefault^2) / (pOneDefault - pOneDefault^2); return (intracorr); } estimateAssetCorr <- function(pBothDefaults, PD_1, PD_2) { # if (PD_1 == 0) # { # PD_1 = 10^-9; # } # if (PD_2 == 0) # { # PD_1 = 10^-9; # } # if (is.nan(pBothDefaults)) # { # pBothDefaults = 10^-9; # } c1 <- qnorm(PD_1); c2 <- qnorm(PD_2); fEquation <- function(rho) { return (pmvnorm(lower = c(-Inf, -Inf), upper = c(c1, c2), sigma = matrix(c(1, rho, rho, 1), nrow = 2)) - pBothDefaults); } return (uniroot(fEquation,c(0,1))$root); } numPeriods <- length(n) probOneDefault<- mean(d/n) tempVec <-0 for (t in 1:numPeriods) { tempVec<- (tempVec + (d[t]^2 - d[t]) / (n[t]^2 - n[t])); } probTwoDefaults <- tempVec / numPeriods defaultCorr <- estimateDefaultIntracorr(probOneDefault, probTwoDefaults); Est<-list(Original =estimateAssetCorr(probTwoDefaults, probOneDefault, probOneDefault)) } Estimate_Standard<- estimate(d,n) if(B>0){ N<-length(n) D<- matrix(ncol=1, nrow=N,d) BCA=function(data,n, indices){ d <- data[indices,] n<-n[indices] tryCatch(estimate(d,n)$Original,error=function(e)NA) } boot1<- boot(data = D, statistic = BCA, n=n, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement d_o<-d[Ib] n_o<-n[Ib] try(theta1[i]<-estimate(d_o,n_o)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement d_i<-d_o[Ic] n_i<-n_o[Ic] try( theta2[c,i]<-estimate(d_i,n_i)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(JC==TRUE){ N=length(d) Test=NULL for(v in 1:N){ d2<-d[-v] n2<-n[-v] try(Test[v]<-estimate(d2,n2)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraJDP1.R
intraJDP2 <- function(d, n, B=0, DB=c(0,0), JC=FALSE, CI_Boot, type="bca", plot=FALSE){ if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} estimate=function(d,n){ estimateDefaultIntracorr <- function(pOneDefault, pTwoDefaults)# Equation 7 in Kalkbrenner { intracorr <- (pTwoDefaults - pOneDefault^2) / (pOneDefault - pOneDefault^2); return (intracorr); } estimateAssetCorr <- function(pBothDefaults, PD_1, PD_2) { # if (PD_1 == 0) # { # PD_1 = 10^-9; # } # if (PD_2 == 0) # { # PD_1 = 10^-9; # } # if (is.nan(pBothDefaults)) # { # pBothDefaults = 10^-9; # } c1 <- qnorm(PD_1); c2 <- qnorm(PD_2); fEquation <- function(rho) { return (pmvnorm(lower = c(-Inf, -Inf), upper = c(c1, c2), sigma = matrix(c(1, rho, rho, 1), nrow = 2)) - pBothDefaults); } return (uniroot(fEquation,c(0,1))$root); } numPeriods <- length(n) probOneDefault<- mean(d/n) tempVec <-0 for (t in 1:numPeriods) { tempVec<- (tempVec + (d[t]^2 ) / (n[t]^2 )); } probTwoDefaults <- tempVec / numPeriods defaultCorr <- estimateDefaultIntracorr(probOneDefault, probTwoDefaults); Est<-list(Original =estimateAssetCorr(probTwoDefaults, probOneDefault, probOneDefault)) } Estimate_Standard<- estimate(d,n) if(B>0){ N<-length(n) D<- matrix(ncol=1, nrow=N,d) BCA=function(data,n, indices){ d <- data[indices,] n<-n[indices] tryCatch(estimate(d,n)$Original,error=function(e)NA) } boot1<- boot(data = D, statistic = BCA, n=n, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement d_o<-d[Ib] n_o<-n[Ib] try(theta1[i]<-estimate(d_o,n_o)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement d_i<-d_o[Ic] n_i<-n_o[Ic] try( theta2[c,i]<-estimate(d_i,n_i)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(JC==TRUE){ N=length(d) Test=NULL for(v in 1:N){ d2<-d[-v] n2<-n[-v] try(Test[v]<-estimate(d2,n2)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraJDP2.R
intraMLE <- function(d,n,B=0,DB=c(0,0),JC=FALSE,CI=0,CI_Boot,type="bca", plot=FALSE){ Res2=list() if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} estimate=function(d,n,CI){ if(CI==0){nll=function(rho){ integral=NULL simpson <- function(fun, a, b, n=700) { h <- (b-a)/n x <- seq(a, b, by=h) if (n == 2) { s <- fun(x[1]) + 4*fun(x[2]) +fun(x[3]) } else { s <- fun(x[1]) + fun(x[n+1]) + 2*sum(fun(x[seq(2,n,by=2)])) + 4 *sum(fun(x[seq(3,n-1, by=2)])) } s <- s*h/3 return(s) } ll=0 d1=d/n PD1=mean(d1) for(i in 1:length(d)){ d1i=d[i] n1i=n[i] integrand=function(x){ condPD <- pnorm((qnorm(PD1) - sqrt(rho) * x) / sqrt(1 - rho)); return (choose(n1i, d1i) * (condPD^d1i) * ((1 - condPD)^(n1i - d1i)) * dnorm(x)); } integral[i]=simpson(integrand,-10,10,n=10000) if(is.na(integral[i])){integral[i]=1} ll=ll+log(integral[i]) } return(-ll) } Est<-list(Original =optimise(nll, interval = c(0, 1), maximum = FALSE)$minimum) }else{ nll=function(rho){ integral=NULL simpson <- function(fun, a, b, n=700) { h <- (b-a)/n x <- seq(a, b, by=h) if (n == 2) { s <- fun(x[1]) + 4*fun(x[2]) +fun(x[3]) } else { s <- fun(x[1]) + fun(x[n+1]) + 2*sum(fun(x[seq(2,n,by=2)])) + 4 *sum(fun(x[seq(3,n-1, by=2)])) } s <- s*h/3 return(s) } ll=0 d1=d/n PD1=mean(d1) for(i in 1:length(d)){ d1i=d[i] n1i=n[i] integrand=function(x){ condPD <- pnorm((qnorm(PD1) - sqrt(rho) * x) / sqrt(1 - rho)); return (choose(n1i, d1i) * (condPD^d1i) * ((1 - condPD)^(n1i - d1i)) * dnorm(x)); } integral[i]=simpson(integrand,-10,10,n=10000) if(is.na(integral[i])){integral[i]=1} ll=ll+log(integral[i]) } return(-ll) } Res1<- optimise(nll, interval = c(0, 1), maximum = FALSE)$minimum hessian1<-hessian(nll,Res1) SD<- 1/sqrt(hessian1) CI<- 1-(1-CI)/2 Est<-list(Original =Res1, CI=c(Res1-qnorm(CI)*SD,Res1+qnorm(CI)*SD)) } } Estimate_Standard<- estimate(d,n,CI) ###### if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement d_o<-d[Ib] n_o<-n[Ib] try(theta1[i]<-estimate(d_o,n_o,CI)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement d_i<-d_o[Ic] n_i<-n_o[Ic] try( theta2[c,i]<-estimate(d_i,n_i,CI)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(B>0){ N<-length(n) D<- matrix(ncol=1, nrow=N,d) BCA=function(data,n, indices){ d <- data[indices,] n<-n[indices] tryCatch(estimate(d,n,CI)$Original,error=function(e)NA) } boot1<- boot(data = D, statistic = BCA, n=n, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} CI=CI_Boot nll=function(rho){ integral=NULL simpson <- function(fun, a, b, n=700) { h <- (b-a)/n x <- seq(a, b, by=h) if (n == 2) { s <- fun(x[1]) + 4*fun(x[2]) +fun(x[3]) } else { s <- fun(x[1]) + fun(x[n+1]) + 2*sum(fun(x[seq(2,n,by=2)])) + 4 *sum(fun(x[seq(3,n-1, by=2)])) } s <- s*h/3 return(s) } ll=0 d1=d/n PD1=mean(d1) for(i in 1:length(d)){ d1i=d[i] n1i=n[i] integrand=function(x){ condPD <- pnorm((qnorm(PD1) - sqrt(rho) * x) / sqrt(1 - rho)); return (choose(n1i, d1i) * (condPD^d1i) * ((1 - condPD)^(n1i - d1i)) * dnorm(x)); } integral[i]=simpson(integrand,-10,10,n=10000) if(is.na(integral[i])){integral[i]=1} ll=ll+log(integral[i]) } return(-ll) } Res1<- optimise(nll, interval = c(0, 1), maximum = FALSE)$minimum hessian1<-hessian(nll,Res1) SD<- 1/sqrt(hessian1) CI<- 1-(1-CI)/2 CI1=c(Res1-qnorm(CI)*SD,Res1+qnorm(CI)*SD) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI=CI1,CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(JC==TRUE){ N=length(d) Test=NULL for(v in 1:N){ d2<-d[-v] n2<-n[-v] try(Test[v]<-estimate(d2,n2,CI)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraMLE.R
intraMode=function(d,n,B=0,DB=c(0,0),JC=FALSE,CI_Boot,type="bca", plot=FALSE){ if(is.numeric(d)){d=d}else{stop("d is not numeric")} if(is.numeric(n)){n=n}else{stop("n is not numeric")} if(B==0&& plot==TRUE){stop("please select a number of bootstrap repititions for the plot")} if(B%%1==0){B=B}else{stop("B is not an integer")} if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")} if(length(d)==length(n)){}else{stop("Input vectors do not have the same length")} d1=d/n estimate=function(X){ mode_empirical <- function(x) { uniqv <- unique(x) uniqv[which.max(tabulate(match(x, uniqv)))] } mode=mode_empirical(X) if(mode==0){warning("Mode is equal to 0. The estimate may be unstalbe")} PD=mean(X) foo=function(rho){ mode_theoretical=pnorm(sqrt(1-rho)/(1-2*rho)*qnorm(PD)) return(abs(mode_theoretical-mode)) } Est<-list(Original =optimise(foo, interval = c(0, 1), maximum = FALSE)$minimum) } Estimate_Standard<-estimate(d1) if(B>0){ N<-length(n) D<- matrix(ncol=1, nrow=N,d1) BCA=function(data, indices){ d <- data[indices,] tryCatch(estimate(d)$Original,error=function(e)NA) } boot1<- boot(data = D, statistic = BCA, R=B) Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),bValues=boot1$t ) if(missing(CI_Boot)){Estimate_Bootstrap=Estimate_Bootstrap}else{ if(type=="norm"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$normal[2:3])} if(type=="basic"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type)$basic[4:5])} if(type=="perc"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$percent[4:5]} if(type=="bca"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))$bca[4:5]} if(type=="all"){Conf=(boot.ci(boot1,conf=CI_Boot,type = type))} Estimate_Bootstrap<-list(Original = boot1$t0, Bootstrap=2*boot1$t0 - mean(boot1$t,na.rm = TRUE),CI_Boot=Conf,bValues=boot1$t ) } if(plot==TRUE){ Dens<-density(boot1$t, na.rm = TRUE) XY<-cbind(Dens$x,Dens$y) label<-data.frame(rep("Bootstrap density",times=length(Dens$x))) Plot<-cbind(XY,label) colnames(Plot)<-c("Estimate","Density","Label") SD<-cbind(rep(boot1$t0,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x))) colnames(SD)<-c("Estimate","Density","Label") BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x))) colnames(BC)<-c("Estimate","Density","Label") Plot<-rbind(Plot,SD, BC) Plot$Estimate<-as.numeric(Plot$Estimate) Plot$Density<- as.numeric(Plot$Density) Estimate<-Plot$Estimate Density<-Plot$Density Label<-Plot$Label P<-ggplot() P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) + geom_line()+ scale_colour_manual(values = c("black", "red", "orange"))+ theme_minimal(base_size = 15) + ggtitle("Bootstrap Density" )+ theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12)) print(P) } } if(DB[1]!=0){ IN=DB[1] OUT=DB[2] theta1=NULL theta2=matrix(ncol = OUT, nrow=IN) for(i in 1:OUT){ N<-length(d1) Ib<-sample(N,N,replace=TRUE) ## sampling with replacement Db<-d1[Ib] try(theta1[i]<-estimate(Db)$Original, silent = TRUE) for(c in 1:IN){ Ic<-sample(N,N,replace=TRUE) ## sampling with replacement Dc<-Db[Ic] try( theta2[c,i]<-estimate(Dc)$Original, silent = TRUE) } } Boot1<- mean(theta1, na.rm = TRUE) Boot2<- mean(theta2, na.rm = TRUE) BC<- 2*Estimate_Standard$Original -Boot1 DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2) Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2) } if(JC==TRUE){ N<-length(n) Test=NULL for(v in 1:N){ d2<-d1[-v] try(Test[v]<-estimate(d2)$Original) } Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test))) } if(B>0){return(Estimate_Bootstrap)} if(JC==TRUE){return(Estimate_Jackknife)} if(DB[1]!=0){return(Estimate_DoubleBootstrap)} if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)} }
/scratch/gouwar.j/cran-all/cranData/AssetCorr/R/intraMode.R
## ----setup, include=FALSE----------------------------------------------------- library(knitr) ## ----------------------------------------------------------------------------- library(AssetCorr) set.seed(111) #number of obligors: 1000 #intra asset correlation: 0.3 #length of the default time series: 20 #probability of default: 0.01 D1=defaultTimeseries(1000,0.3,20,0.01) N1=rep(1000,20) ## ----------------------------------------------------------------------------- intraAMM(D1,N1) ## ----------------------------------------------------------------------------- Output<-intraAMM(D1,N1, B=1000, CI_Boot = 0.95, plot=TRUE) Output$Original Output$Bootstrap Output$CI_Boot ## ----------------------------------------------------------------------------- Output<-intraAMM(D1,N1, DB=c(100,100)) Output$Original Output$Bootstrap Output$Double_Bootstrap ## ----------------------------------------------------------------------------- intraAMM(D1,N1, JC=TRUE) ## ----------------------------------------------------------------------------- Output<-intraALL(D1,N1, B=500, plot=TRUE,Adjust = 0.0001, Estimator = c("AMM","FMM","CMM","JDP1","JDP2","AMLE") ) Output ## ----------------------------------------------------------------------------- library(mvtnorm) set.seed(2) #number of obligors: 1000 #intra asset correlation 1: 0.3 #intra asset correlation 2: 0.1 #inter correlation of the systematic factors: 0.5 #length of the default time series: 20 #probability of default: 0.01 Psi=rmvnorm(20,sigma=matrix(c(1,0.5,0.5,1),2)) PDcond1=pnorm((qnorm(0.01)-sqrt(0.3)*Psi[,1])/sqrt(1-0.3)) PDcond2=pnorm((qnorm(0.01)-sqrt(0.1)*Psi[,2])/sqrt(1-0.1)) D1=rbinom(20,1000,PDcond1) D2=rbinom(20,1000,PDcond2) N1=N2=rep(1000,20) ## ----------------------------------------------------------------------------- rho1=intraAMM(D1,N1)$Original rho2=intraAMM(D2,N2)$Original interCov(D1,N1,D2,N2,rho1,rho2) ## ----------------------------------------------------------------------------- #Single bootstrap Correction rho1=intraAMM(D1,N1)$Original rho2=intraAMM(D2,N2)$Original Output<- interCov(D1,N1,D2,N2,rho1,rho2, B=1000, CI_Boot = 0.95, plot=TRUE) Output$Original Output$Bootstrap Output$CI_Boot #Double bootstrap correction Output<- interCov(D1,N1,D2,N2,rho1,rho2, DB=c(100,100)) Output$Original Output$Bootstrap Output$Double_Bootstrap #Furthermore, a Jackknife correction would be possible ## ----------------------------------------------------------------------------- #A general overview Output<-interALL(D1,N1,D2,N2,rho1,rho2 ,B=100, plot=TRUE) Output ## ----------------------------------------------------------------------------- #A general overview library(mvtnorm) set.seed(111) NoO=1000 #Number of obligors in each sector Years=20 AC=0.3 PD=0.01 #Calculate the conditional PDs: Psi=rmvnorm(Years,sigma=matrix(c(1,0.5,0.5,0.5,1,0.5,0.5,0.5,1),3)) PDcond1=pnorm((qnorm(PD)-sqrt(AC)*Psi[,1])/sqrt(1-AC)) PDcond2=pnorm((qnorm(PD)-sqrt(AC/2)*Psi[,2])/sqrt(1-AC/2)) PDcond3=pnorm((qnorm(PD)-sqrt(AC*2)*Psi[,3])/sqrt(1-AC*2)) #Draw the default time series, depending on the conditional PDs DTS=cbind(rbinom(Years,NoO,PDcond1),rbinom(Years,NoO,PDcond2),rbinom(Years,NoO,PDcond3)) N=matrix(NoO,nrow = Years,ncol = 3) Output<-analyze_AssetCorr(DTS,N, B=100, Intra = c("AMM","FMM","CMM","JDP1"), Inter=c("Copula","Cov","JDP")) #Furthermore, the analyze_AssetCorr function also proves single/double bootstrap #and Jackknife corrections. Additionally, the confidence intervals #can be estimated and visualized.
/scratch/gouwar.j/cran-all/cranData/AssetCorr/inst/doc/AssetCorr_vignette.R
%\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{An AssetCorr Guide} %\VignetteEncoding{UTF-8} ```{r setup, include=FALSE} library(knitr) ``` # An AssetCorr Guide ## Overview AssetCorr is a package for the estimation of intra and inter asset correlations within the Vasicek Portfolio Model. The implemented methods are: * intra correlation + Asymptotic Method of Moments ("intraAMM", Gordy (2000)) + Finite Sample Method of Moments ("intraFMM", Gordy (2000)) + Joint Default Probability Matching 1 ("intraJDP1", Lucas (1995)) + Joint Default Probability Matching 2 ("intraJDP2", De Servigny and Renault (2002)) + Corrected Method of Moments ("intraCMM", Frei and Wunsch (2018)) + Parametric Apporach- Mode Matching ("intraMode", Botha and van Vuuren (2010)) + Parametric Apporach- Beta Distribution ("intraBeta", Botha and van Vuuren (2010)) + Binomial Maximum Likelihood Estimator ("intraMLE", Gordy and Heitfield (2010)) + Asymptotic Maximum Likelihood Estimator ("intraAMLE", Duellmann and Gehde-Trapp (2004)) * inter correlation + Covariance Matching Estimator ("interCov", Bluhm and Overbeck (2003)) + Joint Default Probability Matching ("interJDP", De Servigny and Renault (2002)) + Corrected Method of Moments ("interCMM", Frei and Wunsch (2018) ) + Binomial Maximum Likelihood Estimator ("interMLE", Gordy and Heitfield (2010)) + Copula Based Maximum Likelihood Estimator (interCopula, Pfeuffer et al. (2020)) ## Bias of the estimation methodologies It is commonly known that these methods are exposed to an estimation bias, especially if the default time series is short, see for example Meyer (2009) or Duellmann et al.(2010). Furthermore, Loeffler (2003) has shown that wrongly estimated correlations have a huge impact on the resulting risk figures. To tackle this issue Pfeuffer et al. (2018) outlined the use of single and double bootstrap correction and additionally jackknife correction approaches to reduce the bias. The suggested correction methods and methods to infer confidence intervals are provided by the package AssetCorr. To illustrate the bias of intra correlation methods, consider the following example: Suppose we have a default time series with the following properties: * Length: 20 observations(years) * Number of obligors: 1000 * Probability of default: 0.01 The following plot displays on the x-axis the true assumed value of the intra asset correlation and on the y-axis the estimated intra asset correlation as the mean of 1000 simulations: ![Bias of the estimation methodologies](RMarkdown2.jpeg) As we can see, the bias of method of moments methodologies can be tremendous, especially as the level of intra correlation increases. Furthermore, the MLE approach shows the best performance. Due to this large bias, Pfeuffer et al. (2020) suggested to use bootstrap and jackknife correction to reduce the bias. The following plot illustrates the combination of MLE and single bootstrap correction: ![Bias of the estimation methodologies](BCMarkdown.jpeg) As one can see, the bootstrap correction moves the original line towards the true value (red line), which implies that the estimation efficiency was improved. This is especially true for the range from 0 to 0.6, which covers the empirical estimated range. In the following, the functionality of AssetCorr will be briefly illustrated using a hypothetical default data time series. Load the package and generate a hypothetical default time series with the implemented function: ## Intra correlation ```{r} library(AssetCorr) set.seed(111) #number of obligors: 1000 #intra asset correlation: 0.3 #length of the default time series: 20 #probability of default: 0.01 D1=defaultTimeseries(1000,0.3,20,0.01) N1=rep(1000,20) ``` All methods for estimating the intra correlation have a very similar structure. Hence only one method will be used in the following. At the end of this section, another function will be illustrated which can be used to get an in depth analysis of the underlying default time series. First, the point estimate for the intra correlation will be estimated: ```{r} intraAMM(D1,N1) ``` As one can see, the Asymptotic Method of Moments underestimates the true value of 30% for the intra asset correlation. Hence Pfeuffer et al. (2020) suggested to use resampling methods to reduce this downward bias. Therefore, the single bootstrap correction will be illustrated in the following. This correction comes with the additional advantage that one can infer confidence intervals. Additionally, a plot of the bootstrap density can be generated: ```{r} Output<-intraAMM(D1,N1, B=1000, CI_Boot = 0.95, plot=TRUE) Output$Original Output$Bootstrap Output$CI_Boot ``` Furthermore, the double bootstrap correction achieves a higher order of bias reduction, but comes with a substantial increase in computational time. Hence the number of repetitions in the inner and outer loop is reduced to 100, which implies that the single bootstrap corrected estimate is now calculated with 100 repetitions (instead of 1000 as in the previous setting): ```{r} Output<-intraAMM(D1,N1, DB=c(100,100)) Output$Original Output$Bootstrap Output$Double_Bootstrap ``` As one can see, the double bootstrap correction produces the closest estimate, compared to the true value of 0.3. Additionally, a jackknife correction is implemented: ```{r} intraAMM(D1,N1, JC=TRUE) ``` As an additional feature, the package AssetCorr provides the possibility to evaluate and visualize a default time series using a list of estimators simultaneously to obtain first insights. The default is set to use all intra correlation estimators: ```{r} Output<-intraALL(D1,N1, B=500, plot=TRUE,Adjust = 0.0001, Estimator = c("AMM","FMM","CMM","JDP1","JDP2","AMLE") ) Output ``` ## Inter correlation Also the inter correlation estimates have a substantial impact on the portfolio's risk, various methods are implemented. First, two correlated default time series are generated. ```{r} library(mvtnorm) set.seed(2) #number of obligors: 1000 #intra asset correlation 1: 0.3 #intra asset correlation 2: 0.1 #inter correlation of the systematic factors: 0.5 #length of the default time series: 20 #probability of default: 0.01 Psi=rmvnorm(20,sigma=matrix(c(1,0.5,0.5,1),2)) PDcond1=pnorm((qnorm(0.01)-sqrt(0.3)*Psi[,1])/sqrt(1-0.3)) PDcond2=pnorm((qnorm(0.01)-sqrt(0.1)*Psi[,2])/sqrt(1-0.1)) D1=rbinom(20,1000,PDcond1) D2=rbinom(20,1000,PDcond2) N1=N2=rep(1000,20) ``` All methods for estimating the intra correlation have a very similar structure. Hence only one method will be used in the following. First, the point estimate for the inter correlation will be calculated. For this purpose, the intra correlation estimates of the default time series serve as input: ```{r} rho1=intraAMM(D1,N1)$Original rho2=intraAMM(D2,N2)$Original interCov(D1,N1,D2,N2,rho1,rho2) ``` Also for inter correlation methods the resampling corrections are implemented: ```{r} #Single bootstrap Correction rho1=intraAMM(D1,N1)$Original rho2=intraAMM(D2,N2)$Original Output<- interCov(D1,N1,D2,N2,rho1,rho2, B=1000, CI_Boot = 0.95, plot=TRUE) Output$Original Output$Bootstrap Output$CI_Boot #Double bootstrap correction Output<- interCov(D1,N1,D2,N2,rho1,rho2, DB=c(100,100)) Output$Original Output$Bootstrap Output$Double_Bootstrap #Furthermore, a Jackknife correction would be possible ``` Similar to the estimation of the intra correlations, the AssetCorr package also provides a function to combine several estimation techniques simultaneously. This function gives the user an in depth review of the dependencies of two default time series. ```{r} #A general overview Output<-interALL(D1,N1,D2,N2,rho1,rho2 ,B=100, plot=TRUE) Output ``` ## Evaluating a credit portfolio In practical applications, there are several default time series corresponding to several different sectors. To give the user a first indication of the sectors and their dependencies to other sectors, we provide a detailed analyze function. It estimates the intra and inter correlations for a portfolio using all or selected estimation methods. Suppose, we have three sectors within the portfolio of interest. All sectors have the same intra correlation of 30% and are correlated with an inter correlation of 50%. Similar to the previous review functions, the default is set to use all available estimators: ```{r} #A general overview library(mvtnorm) set.seed(111) NoO=1000 #Number of obligors in each sector Years=20 AC=0.3 PD=0.01 #Calculate the conditional PDs: Psi=rmvnorm(Years,sigma=matrix(c(1,0.5,0.5,0.5,1,0.5,0.5,0.5,1),3)) PDcond1=pnorm((qnorm(PD)-sqrt(AC)*Psi[,1])/sqrt(1-AC)) PDcond2=pnorm((qnorm(PD)-sqrt(AC/2)*Psi[,2])/sqrt(1-AC/2)) PDcond3=pnorm((qnorm(PD)-sqrt(AC*2)*Psi[,3])/sqrt(1-AC*2)) #Draw the default time series, depending on the conditional PDs DTS=cbind(rbinom(Years,NoO,PDcond1),rbinom(Years,NoO,PDcond2),rbinom(Years,NoO,PDcond3)) N=matrix(NoO,nrow = Years,ncol = 3) Output<-analyze_AssetCorr(DTS,N, B=100, Intra = c("AMM","FMM","CMM","JDP1"), Inter=c("Copula","Cov","JDP")) #Furthermore, the analyze_AssetCorr function also proves single/double bootstrap #and Jackknife corrections. Additionally, the confidence intervals #can be estimated and visualized. ``` ## References De Servigny, A. and O. Renault: _Default correlation: empirical evidence._ Working Paper, Standard and Poor's: 90-94, 2003. Available at: https://www.semanticscholar.org/paper/Default-correlation\%3A-empirical-evidence-Servigny-Renault/aae251436d0e3b489951c0d38463d71106755675. Accessed: 05.05.2020 Duellmann, K. and M. Gehde-Trapp: _Systematic risk in recovery rates: an empirical analysis of US corporate credit exposures._ Bundesbank Series 2, Discussion Paper (2): 2004. Available at: http://hdl.handle.net/10419/19729. Accessed: 04.06.2018 Duellmann, K., J. Kuell and M. Kunisch: _Estimating asset correlations from stock prices or default rates- Which method is superior?_ Journal of Economic Dynamics and Control 34(11): 2341-2357, 2010 Efron, B. and R. J. Tibshirani: _An introduction to the bootstrap._ CRC press, 1994 Frei, C. and M. Wunsch: _Moment Estimators for Autocorrelated Time Series and their Application to Default Correlations._ Journal of Credit Risk 14: 1-29, 2018 Gordy, M. B.: _A comparative anatomy of credit risk models._ Journal of Banking & Finance 24(1): 119-149, 2000 Gordy, M. B. and E. Heitfield: _Small-sample estimation of models of portfolio credit risk._ In Recent Advances in Financial Engineering: Proceedings of the KIER-TMU International Workshop on Financial Engineering, 2009: Otemachi, Sankei Plaza, Tokyo, 3-4 August 2009: 43-63, World Scientific, 2010 Hoese, S. and S. Huschens: _Confidence intervals for asset correlations in the asymptotic single risk factor model._ In Operations Research Proceedings 2010: 111-116, 2010 Kalkbrener, M. and A. Onwunta: _Validating structural credit portfolio models._ Model risk-identification, measurement and management. Risk Books, London: 233-261, 2010 Loeffler, G. _The effects of estimation error on measures of portfolio credit risk._ Journal of Banking & Finance 27(8): 1427-1453, 2003 Lucas, D. J.: _Default correlation and credit analysis._ The Journal of Fixed Income 4(4): 76-87, 1995 Meyer, C.: _Estimation of intra-sector asset correlations._ The Journal of Risk Model Validation 3(3): 47-79, 2009 Pfeuffer M., M. Nagl , M. Fischer, and D. Roesch: _Parameter Estimation, Bias Correction and Uncertainty Quantification in the Vasicek Credit Portfolio Model._ Journal of Risk (22)1, 2020 Vasicek, O. A: _The distribution of loan portfolio value._ Risk 15(12): 160-162, 2002
/scratch/gouwar.j/cran-all/cranData/AssetCorr/inst/doc/AssetCorr_vignette.Rmd
%\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{An AssetCorr Guide} %\VignetteEncoding{UTF-8} ```{r setup, include=FALSE} library(knitr) ``` # An AssetCorr Guide ## Overview AssetCorr is a package for the estimation of intra and inter asset correlations within the Vasicek Portfolio Model. The implemented methods are: * intra correlation + Asymptotic Method of Moments ("intraAMM", Gordy (2000)) + Finite Sample Method of Moments ("intraFMM", Gordy (2000)) + Joint Default Probability Matching 1 ("intraJDP1", Lucas (1995)) + Joint Default Probability Matching 2 ("intraJDP2", De Servigny and Renault (2002)) + Corrected Method of Moments ("intraCMM", Frei and Wunsch (2018)) + Parametric Apporach- Mode Matching ("intraMode", Botha and van Vuuren (2010)) + Parametric Apporach- Beta Distribution ("intraBeta", Botha and van Vuuren (2010)) + Binomial Maximum Likelihood Estimator ("intraMLE", Gordy and Heitfield (2010)) + Asymptotic Maximum Likelihood Estimator ("intraAMLE", Duellmann and Gehde-Trapp (2004)) * inter correlation + Covariance Matching Estimator ("interCov", Bluhm and Overbeck (2003)) + Joint Default Probability Matching ("interJDP", De Servigny and Renault (2002)) + Corrected Method of Moments ("interCMM", Frei and Wunsch (2018) ) + Binomial Maximum Likelihood Estimator ("interMLE", Gordy and Heitfield (2010)) + Copula Based Maximum Likelihood Estimator (interCopula, Pfeuffer et al. (2020)) ## Bias of the estimation methodologies It is commonly known that these methods are exposed to an estimation bias, especially if the default time series is short, see for example Meyer (2009) or Duellmann et al.(2010). Furthermore, Loeffler (2003) has shown that wrongly estimated correlations have a huge impact on the resulting risk figures. To tackle this issue Pfeuffer et al. (2018) outlined the use of single and double bootstrap correction and additionally jackknife correction approaches to reduce the bias. The suggested correction methods and methods to infer confidence intervals are provided by the package AssetCorr. To illustrate the bias of intra correlation methods, consider the following example: Suppose we have a default time series with the following properties: * Length: 20 observations(years) * Number of obligors: 1000 * Probability of default: 0.01 The following plot displays on the x-axis the true assumed value of the intra asset correlation and on the y-axis the estimated intra asset correlation as the mean of 1000 simulations: ![Bias of the estimation methodologies](RMarkdown2.jpeg) As we can see, the bias of method of moments methodologies can be tremendous, especially as the level of intra correlation increases. Furthermore, the MLE approach shows the best performance. Due to this large bias, Pfeuffer et al. (2020) suggested to use bootstrap and jackknife correction to reduce the bias. The following plot illustrates the combination of MLE and single bootstrap correction: ![Bias of the estimation methodologies](BCMarkdown.jpeg) As one can see, the bootstrap correction moves the original line towards the true value (red line), which implies that the estimation efficiency was improved. This is especially true for the range from 0 to 0.6, which covers the empirical estimated range. In the following, the functionality of AssetCorr will be briefly illustrated using a hypothetical default data time series. Load the package and generate a hypothetical default time series with the implemented function: ## Intra correlation ```{r} library(AssetCorr) set.seed(111) #number of obligors: 1000 #intra asset correlation: 0.3 #length of the default time series: 20 #probability of default: 0.01 D1=defaultTimeseries(1000,0.3,20,0.01) N1=rep(1000,20) ``` All methods for estimating the intra correlation have a very similar structure. Hence only one method will be used in the following. At the end of this section, another function will be illustrated which can be used to get an in depth analysis of the underlying default time series. First, the point estimate for the intra correlation will be estimated: ```{r} intraAMM(D1,N1) ``` As one can see, the Asymptotic Method of Moments underestimates the true value of 30% for the intra asset correlation. Hence Pfeuffer et al. (2020) suggested to use resampling methods to reduce this downward bias. Therefore, the single bootstrap correction will be illustrated in the following. This correction comes with the additional advantage that one can infer confidence intervals. Additionally, a plot of the bootstrap density can be generated: ```{r} Output<-intraAMM(D1,N1, B=1000, CI_Boot = 0.95, plot=TRUE) Output$Original Output$Bootstrap Output$CI_Boot ``` Furthermore, the double bootstrap correction achieves a higher order of bias reduction, but comes with a substantial increase in computational time. Hence the number of repetitions in the inner and outer loop is reduced to 100, which implies that the single bootstrap corrected estimate is now calculated with 100 repetitions (instead of 1000 as in the previous setting): ```{r} Output<-intraAMM(D1,N1, DB=c(100,100)) Output$Original Output$Bootstrap Output$Double_Bootstrap ``` As one can see, the double bootstrap correction produces the closest estimate, compared to the true value of 0.3. Additionally, a jackknife correction is implemented: ```{r} intraAMM(D1,N1, JC=TRUE) ``` As an additional feature, the package AssetCorr provides the possibility to evaluate and visualize a default time series using a list of estimators simultaneously to obtain first insights. The default is set to use all intra correlation estimators: ```{r} Output<-intraALL(D1,N1, B=500, plot=TRUE,Adjust = 0.0001, Estimator = c("AMM","FMM","CMM","JDP1","JDP2","AMLE") ) Output ``` ## Inter correlation Also the inter correlation estimates have a substantial impact on the portfolio's risk, various methods are implemented. First, two correlated default time series are generated. ```{r} library(mvtnorm) set.seed(2) #number of obligors: 1000 #intra asset correlation 1: 0.3 #intra asset correlation 2: 0.1 #inter correlation of the systematic factors: 0.5 #length of the default time series: 20 #probability of default: 0.01 Psi=rmvnorm(20,sigma=matrix(c(1,0.5,0.5,1),2)) PDcond1=pnorm((qnorm(0.01)-sqrt(0.3)*Psi[,1])/sqrt(1-0.3)) PDcond2=pnorm((qnorm(0.01)-sqrt(0.1)*Psi[,2])/sqrt(1-0.1)) D1=rbinom(20,1000,PDcond1) D2=rbinom(20,1000,PDcond2) N1=N2=rep(1000,20) ``` All methods for estimating the intra correlation have a very similar structure. Hence only one method will be used in the following. First, the point estimate for the inter correlation will be calculated. For this purpose, the intra correlation estimates of the default time series serve as input: ```{r} rho1=intraAMM(D1,N1)$Original rho2=intraAMM(D2,N2)$Original interCov(D1,N1,D2,N2,rho1,rho2) ``` Also for inter correlation methods the resampling corrections are implemented: ```{r} #Single bootstrap Correction rho1=intraAMM(D1,N1)$Original rho2=intraAMM(D2,N2)$Original Output<- interCov(D1,N1,D2,N2,rho1,rho2, B=1000, CI_Boot = 0.95, plot=TRUE) Output$Original Output$Bootstrap Output$CI_Boot #Double bootstrap correction Output<- interCov(D1,N1,D2,N2,rho1,rho2, DB=c(100,100)) Output$Original Output$Bootstrap Output$Double_Bootstrap #Furthermore, a Jackknife correction would be possible ``` Similar to the estimation of the intra correlations, the AssetCorr package also provides a function to combine several estimation techniques simultaneously. This function gives the user an in depth review of the dependencies of two default time series. ```{r} #A general overview Output<-interALL(D1,N1,D2,N2,rho1,rho2 ,B=100, plot=TRUE) Output ``` ## Evaluating a credit portfolio In practical applications, there are several default time series corresponding to several different sectors. To give the user a first indication of the sectors and their dependencies to other sectors, we provide a detailed analyze function. It estimates the intra and inter correlations for a portfolio using all or selected estimation methods. Suppose, we have three sectors within the portfolio of interest. All sectors have the same intra correlation of 30% and are correlated with an inter correlation of 50%. Similar to the previous review functions, the default is set to use all available estimators: ```{r} #A general overview library(mvtnorm) set.seed(111) NoO=1000 #Number of obligors in each sector Years=20 AC=0.3 PD=0.01 #Calculate the conditional PDs: Psi=rmvnorm(Years,sigma=matrix(c(1,0.5,0.5,0.5,1,0.5,0.5,0.5,1),3)) PDcond1=pnorm((qnorm(PD)-sqrt(AC)*Psi[,1])/sqrt(1-AC)) PDcond2=pnorm((qnorm(PD)-sqrt(AC/2)*Psi[,2])/sqrt(1-AC/2)) PDcond3=pnorm((qnorm(PD)-sqrt(AC*2)*Psi[,3])/sqrt(1-AC*2)) #Draw the default time series, depending on the conditional PDs DTS=cbind(rbinom(Years,NoO,PDcond1),rbinom(Years,NoO,PDcond2),rbinom(Years,NoO,PDcond3)) N=matrix(NoO,nrow = Years,ncol = 3) Output<-analyze_AssetCorr(DTS,N, B=100, Intra = c("AMM","FMM","CMM","JDP1"), Inter=c("Copula","Cov","JDP")) #Furthermore, the analyze_AssetCorr function also proves single/double bootstrap #and Jackknife corrections. Additionally, the confidence intervals #can be estimated and visualized. ``` ## References De Servigny, A. and O. Renault: _Default correlation: empirical evidence._ Working Paper, Standard and Poor's: 90-94, 2003. Available at: https://www.semanticscholar.org/paper/Default-correlation\%3A-empirical-evidence-Servigny-Renault/aae251436d0e3b489951c0d38463d71106755675. Accessed: 05.05.2020 Duellmann, K. and M. Gehde-Trapp: _Systematic risk in recovery rates: an empirical analysis of US corporate credit exposures._ Bundesbank Series 2, Discussion Paper (2): 2004. Available at: http://hdl.handle.net/10419/19729. Accessed: 04.06.2018 Duellmann, K., J. Kuell and M. Kunisch: _Estimating asset correlations from stock prices or default rates- Which method is superior?_ Journal of Economic Dynamics and Control 34(11): 2341-2357, 2010 Efron, B. and R. J. Tibshirani: _An introduction to the bootstrap._ CRC press, 1994 Frei, C. and M. Wunsch: _Moment Estimators for Autocorrelated Time Series and their Application to Default Correlations._ Journal of Credit Risk 14: 1-29, 2018 Gordy, M. B.: _A comparative anatomy of credit risk models._ Journal of Banking & Finance 24(1): 119-149, 2000 Gordy, M. B. and E. Heitfield: _Small-sample estimation of models of portfolio credit risk._ In Recent Advances in Financial Engineering: Proceedings of the KIER-TMU International Workshop on Financial Engineering, 2009: Otemachi, Sankei Plaza, Tokyo, 3-4 August 2009: 43-63, World Scientific, 2010 Hoese, S. and S. Huschens: _Confidence intervals for asset correlations in the asymptotic single risk factor model._ In Operations Research Proceedings 2010: 111-116, 2010 Kalkbrener, M. and A. Onwunta: _Validating structural credit portfolio models._ Model risk-identification, measurement and management. Risk Books, London: 233-261, 2010 Loeffler, G. _The effects of estimation error on measures of portfolio credit risk._ Journal of Banking & Finance 27(8): 1427-1453, 2003 Lucas, D. J.: _Default correlation and credit analysis._ The Journal of Fixed Income 4(4): 76-87, 1995 Meyer, C.: _Estimation of intra-sector asset correlations._ The Journal of Risk Model Validation 3(3): 47-79, 2009 Pfeuffer M., M. Nagl , M. Fischer, and D. Roesch: _Parameter Estimation, Bias Correction and Uncertainty Quantification in the Vasicek Credit Portfolio Model._ Journal of Risk (22)1, 2020 Vasicek, O. A: _The distribution of loan portfolio value._ Risk 15(12): 160-162, 2002
/scratch/gouwar.j/cran-all/cranData/AssetCorr/vignettes/AssetCorr_vignette.Rmd
.onAttach <- function(lib, pkg) { ver <- read.dcf(file.path(lib, pkg, "DESCRIPTION"), "Version") packageStartupMessage(paste(pkg, ver)) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/First.R
a2sf <- function(x,tt,xlim,npts=1000) { # # Convert a function built by approx() to a step function. # if(missing(tt)) { if(missing(xlim)) stop("One of tt and xlim must be specified.\n") tt <- seq(xlim[1],xlim[2],length=npts) } y <- x(tt) dy <- diff(y) iii <- which(dy!=0) if(length(iii)==0) { knots <- range(tt) values <- c(y[1],y[1],y[1]) } else { knots <- tt[iii] values <- c(y[1],y[1+iii]) } stepfun(x=knots,y=values) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/a2sf.R
buildS <- function(alpha,beta,kn,tmax) { K <- length(kn) # Check that the resulting S function is continuous in x for all # t in c(0,tmax). if(K > 1) { OK <- rep(TRUE,K-1) for(k in 1:(K-1)) { foo <- function(t) { (alpha[[k]](t) - alpha[[k+1]](t) + (beta[[k]](t) - beta[[k+1]](t))*kn[k])^2 } chk <- optimize(foo,c(0,tmax),maximum=TRUE,tol=1e-12) if(chk$objective > .Machine$double.eps) OK[k] <- FALSE } if(any(!OK)) { NOK <- !OK nbad <- sum(NOK) kbad <- paste((1:(K-1))[NOK],collapse=" ") stop(paste("S is discontinuous in x at", ngettext(nbad,"knot","each of the knots"), kbad,"for some t.\n")) } } # Check that S(x,t) is *non-increasing* in x for all t, OK <- rep(TRUE,K) for(k in 1:K) { chk <- optimize(beta[[k]],c(0,tmax),maximum=TRUE,tol=1e-12) if(chk$objective > .Machine$double.eps) OK[k] <- FALSE } if(any(!OK)) { NOK <- !OK nbad <- sum(NOK) kbad <- paste((1:(K-1))[NOK],collapse=" ") stop(paste("The \"beta\"",ngettext(nbad,"function","functions"), "numbered",kbad,ngettext(nbad,"is","are"), "positive for some values of \"t\" \n", "whence S(x,t) would fail to be non-increasing in \"x\".\n")) } # Check that S(x,t) is *non-negative* for all t. foo <- function(t) { alpha[[K]](t) + beta[[K]](t)*kn[K] } chk <- optimize(foo,c(0,tmax),tol=1e-12) if(chk$objective < -(.Machine$double.eps)) stop(paste("S(x,t) fails to be always non-negative and hence\n", "does not define a probability.\n")) # Check that S(0,t) = 1 for all t, foo <- function(t) { (alpha[[1]](t) - 1)^2 } chk <- optimize(foo,c(0,tmax),maximum=TRUE,tol=1e-12) if(chk$objective > .Machine$double.eps) stop("S(0,t) is not equal to 1 for some \"t\".\n") # OK, we're good to go. S <- function(x,t) { K <- length(kn) eps <- sqrt(.Machine$double.eps) if(any(x < -eps | x > kn[K]+eps)) stop("At least one price value out of range.\n") #if(any(t < -eps | t > tmax+eps)) # stop("At least one time value out of range.\n") k <- cut(x,c(0,kn),include.lowest=TRUE,labels=1:K) k <- as.numeric(levels(k)[k]) a <- lapply(k,function(i,alpha,t){alpha[[i]](t)},alpha=alpha,t=t) b <- lapply(k,function(i,beta,t){beta[[i]](t)},beta=beta,t=t) a <- matrix(unlist(a),nrow=length(x),byrow=TRUE) b <- matrix(unlist(b),nrow=length(x),byrow=TRUE) m <- a + b*x if(any(dim(m)==1)) as.vector(m) else m } environment(S) <- new.env() assign("alpha",alpha,envir=environment(S)) assign("beta",beta,envir=environment(S)) assign("kn",kn,envir=environment(S)) assign("tmax",tmax,envir=environment(S)) attr(S,"tmax") <- tmax attr(S,"funtype") <- "pwl" # Why? Can't remember .... parent.env(environment(S)) <- globalenv() return(S) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/buildS.R
cev <- function(x,t,v,type,maximize=FALSE) { # # Conditional expected value (given an arrival at # the time in question). Note that objects dS, gpr, alpha # and epsilon are assigned in the environment of cev. # if(!(type%in%c("sip","dip"))) stop(paste("Type",type,"not recognized.\n")) qmax <- length(v) jmax <- length(gpr) R <- numeric(qmax) if(maximize) { # Since we are maximizing we are using either xsolve.disc() or # xsolve.pwl(). If we are using xsolve.disc() then x is a vector # whose entries are the possible discrete prices. If we are using # xsolve.pwl() then x is a list, created by getPossPrices(), with # one entry for each value of q. E <- parent.env(environment()) xback <- E$xback usexb <- !is.null(xback) lopt <- if(type=="sip") qmax else jmax*(qmax+0.5-jmax/2) xopt <- numeric(lopt) for(q in 1:qmax) { vq <- v[q] jtop <- min(q,jmax) rv <- (c(v[q:1],0)[-1])[1:jtop] K <- gpr[1:jtop] if(jtop < jmax) K[jtop] <- K[jtop] + alpha*(1-sum(K)) xc <- if(is.list(x)) x[[q]] else x if(type=="sip") { if(usexb) { xb <- xback[q] xc <- sort(unique(c(xc,xb))) } nx <- length(xc) tmp <- numeric(nx) S <- getS(dS,xc,t,1:jtop) for(i in 1:nx) { tmp[i] <- xc[i]*sum((1:jtop)*S[i,1:jtop]*K) + sum((rv - vq)*S[i,1:jtop]*K) } # New stuff. If there is "no discernable improvement" by changing # to a new price, i.e. if the maximum of tmp is no more than its # value at "the previous price" minus epsilon, then set the new # price equal to the previous value. If there is "discernable # improvement" then look at those values of price where the value # of tmp is greater than its maximum value minus epsilon and pick # the one which is closest to the previous price. This prevents # there being too many jumps, and where there are jumps, prevents # them from being unnecessarily large. Before, we just took the # first of the possible maxima of tmp. If a number of values of # tmp differ only by "numerical noise" then the first maximum can # occur at a price substantially different from the previous price # when there is no meaningful improvement from the change. if(usexb) { mt <- max(tmp) ib <- which(xc==xb) if(mt - tmp[ib] <= epsilon) im <- ib else { ic <- which(tmp > mt - epsilon) im <- ic[which.min(abs(xc[ic]-xb))] } } else im <- which.max(tmp)[1] # End new stuff. R[q] <- tmp[im] + vq xopt[q] <- xc[im] } else { # type == "dip" # Here xc may be a vector of candidate prices --- discrete pricing --- # or a list whose j-th entry is the vector of candidate prices for # groups of size j. iqj <- qj2i(q,1:jtop,qmax) if(usexb) xb <- xback[iqj] Rtmp <- numeric(jtop) xtmp <- numeric(jtop) for(j in 1:jtop) { xcj <- if(is.list(xc)) xc[[j]] else xc if(usexb) xcj <- sort(unique(c(xcj,xb[j]))) nxj <- length(xcj) S <- getS(dS,xcj,t,j) for(i in 1:nxj) { tmp[i] <- (j*xcj[i] + rv[j] - vq)*S[i,1]*K[j] } # New stuff for the "dip" case. if(usexb) { mt <- max(tmp) ib <- which(xcj==xb[j]) if(mt - tmp[ib] <= epsilon) im <- ib else { ic <- which(tmp > mt - epsilon) im <- ic[which.min(abs(xcj[ic]-xb))] } } else im <- which.max(tmp)[1] # End new stuff. Rtmp[j] <- tmp[im] xtmp[j] <- xc[im] } R[q] <- sum(Rtmp) + vq iv <- qj2i(q,1:jtop,qmax) xopt[iv] <- xtmp } } attr(R,"xopt") <- xopt } else { S <- getS(dS,x,t,1:jmax) for(q in 1:qmax) { vq <- v[q] jtop <- min(q,jmax) rv <- (c(v[q:1],0)[-1])[1:jtop] K <- gpr[1:jtop] if(jtop < jmax) K[jtop] <- K[jtop] + alpha*(1-sum(K)) if(type=="sip") { R[q] <- x[q]*sum((1:jtop)*S[q,1:jtop]*K) + sum((rv - vq)*S[q,1:jtop]*K) + vq } else { i <- qj2i(q,1:jtop,qmax) R[q] <- sum(((1:jtop)*x[i] + rv - vq)* S[cbind(i,1:jtop)]*K) + vq } } } R }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/cev.R
if(getRversion() >= "2.15.1") utils::globalVariables(c("gpr","dS","alpha","epsilon", "stabilize","type","lambda","x"))
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/declareGlobals.R
findSolType <- function(S,prices) { # # Examine the nature of the price sensitivity function and check # for the existence of "prices", and accordingly return the resulting # "solution type" ("cont", "disc", or "pwl"). if(is.function(S)) { if(!is.null(prices)) { return("disc") } else { funtype <- attr(S,"funtype") if(!is.null(funtype) && funtype == "pwl") return("pwl") stop(paste("Argument \"S\" is a function but not of type \"pwl\"\n", "and \"prices\" is not specified. Something is wrong.\n")) } } # Expression. if(is.expression(S)) { return("cont") } # List. if(is.list(S)) { # List of functions --- not allowed in the "pwl" setting. if(all(sapply(S,is.function))) { if(!is.null(prices)) { return("disc") } else { stop(paste("All entries of \"S\" are functions but \"prices\"\n", "is not specified. Something is wrong.\n")) } } # List of expressions. if(all(sapply(S,is.expression))) { return("cont") } else { stop(paste("At least one entry of \"S\" is neither a function\n", "nor an expression.\n")) } } stop(paste("Argument \"S\" must be a function, an expression\n", "or a list of such.\n")) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/findSolType.R
getPossPrices <- function(v,t,alpha,beta,kn,Kpa,type) { K <- length(kn) kn <- c(0,kn) rslt <- vector("list",K) for(k in 1:K) { a <- alpha[[k]](t) b <- beta[[k]](t) xlo <- kn[k] xhi <- kn[k+1] rslt[[k]] <- turnPts(a,b,v,Kpa,xlo,xhi,type) } if(type=="sip") { newres <- sort(unique(c(unlist(rslt),kn))) } else { q <- length(v) newres <- vector("list",q) for(j in 1:q) { newres[[j]] <- lapply(1:K,function(i,x,j){x[[i]][[j]]},x=rslt,j=j) } newres <- lapply(newres,function(x,knots){unique(c(unlist(x),knots))}, knots=kn) } newres }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/getPossPrices.R
getS <- function(dS,xc,t,jays) { if(is.list(dS)) { S <- lapply(dS[jays],function(f,x,t){f(x,t)},x=xc,t=t) } else { S <- lapply(jays,function(j,dS,x,t){dS(x,t,j)},dS=dS,x=xc,t=t) } matrix(unlist(S),nrow=length(xc),ncol=length(jays)) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/getS.R
i2qj <- function(i,qmax,jmax){ if(jmax > qmax) stop(paste("Argument \"jmax\" must be less than", "or equal to \"qmax\".\n")) M <- matrix(NA,nrow=qmax,ncol=jmax) itop <- jmax*(qmax - (jmax-1)/2) M[row(M) >= col(M)] <- 1:itop q_all <- row(M)[!is.na(M)] j_all <- col(M)[!is.na(M)] if(missing(i)) { i <- 1:itop } else if(any(i > itop)) { stop("At least one value of \"i\" out of range.\n") } list(q=q_all[i],j=j_all[i]) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/i2qj.R
initx <- function (v,type) { # # Note that objects dS, gpr and alpha are assigned in the # environment of initx. # # Set up objective function according to type: if(type=="sip") { sip <- function(x, vi, gpr) { q <- length(vi) vq <- vi[q] q <- min(q,length(gpr)) rv <- c(vi[q:1],0)[-1] if(is.list(dS)) { S <- lapply(dS[1:q],function(f,x){f(x,0)},x=x) dSdx <- sapply(S,function(f){ attr(f,"gradient")[,"x"] }) d2Sdx2 <- sapply(S,function(f){ attr(f,"hessian")[,"x","x"] }) S <- unlist(S) } else { S <- dS(x, 0, 1:q) dSdx <- attr(S, "gradient")[, "x"] d2Sdx2 <- attr(S, "hessian")[, "x", "x"] } kappa <- gpr[1:q] kappa[q] <- kappa[q] + alpha*(1-sum(kappa)) fval <- x * (sum((1:q) * dSdx * kappa)) + sum((rv * dSdx + (1:q)*S)*kappa) - vq*sum(dSdx*kappa) jacobian <- x * (sum((1:q) * d2Sdx2*kappa)) + sum((rv * d2Sdx2 + 2*(1:q)*dSdx)*kappa) - vq*sum(d2Sdx2*kappa) list(fval = fval, jacobian = jacobian) } } else if(type=="dip") { dip <- function(x,vqmj,vq,j) { S <- if(is.list(dS)) dS[[j]](x,0) else dS(x,0,j) dSdx <- attr(S,"gradient")[,"x"] d2Sdx2 <- attr(S,"hessian")[,"x","x"] fval <- (j*x + vqmj - vq)*dSdx + j*S jacobian <- (x*j + vqmj + vq)*d2Sdx2 + 2*j*dSdx list(fval=fval,jacobian=jacobian) } } else { stop(paste("Type",type,"not recognized.\n")) } qmax <- length(v) jmax <- length(gpr) # Solve according to type: if(type=="sip") { x <- numeric(qmax) for (i in 1:qmax) { x[i] <- .newt(sip, v[1], vi = v[1:i], gpr=gpr) } } else { N <- jmax*(qmax - jmax/2 + 0.5) x <- numeric(N) for(q in 1:qmax) { vq <- v[q] jtop <- min(q,jmax) for(j in 1:jtop) { vqmj <- if(j<q) v[q-j] else 0 i <- qj2i(q,j,qmax) x[i] <- .newt(dip,v[1],vqmj=vqmj,vq=vq,j=j) } } } x }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/initx.R
.newt <- function(fn,start,...,eps.p = 1e-08,eps.v = NULL, maxit = 50,verb = FALSE) { p.o <- start itno <- 1 repeat { fj <- fn(p.o,...) v <- fj$fval t1 <- if(is.null(eps.v)) NULL else sum(abs(v)) J <- as.matrix(fj$jacobian) if(qr(J)$rank < ncol(J)) { cat("Singular Jacobian.\n") rslt <- if(is.null(eps.v)) NA else if(t1 < eps.v) p.o else NA break } else { p.n <- p.o - solve(J) %*% v t2 <- max(abs(p.n - p.o)) if(verb) { tmp <- format(round(c(p.o,p.n,v,t2,t1),6)) np <- length(v) v1 <- paste(tmp[1:np],collapse = " ") v2 <- paste(tmp[(np + 1):(2 * np)],collapse = " ") v3 <- paste(tmp[(2 * np + 1):(3 * np)],collapse = " ") v4 <- tmp[3 * np + 1] v5 <- tmp[3 * np + 2] cat("\nIteration : ",itno,"\n",sep = "") cat("Old par : ",v1,"\n",sep = "") cat("New par : ",v2,"\n",sep = "") cat("Test ch.par: ",v4,"\n",sep = "") cat("Fn. vals. : ",v3,"\n",sep = "") if(!is.null(t1)) cat("Test f.val: ",v5,"\n",sep = "") } if((!is.null(t1) && t1 < eps.v) | t2 < eps.p) { rslt <- p.n break } itno <- itno + 1 if(itno > maxit) { cat("Newton's method failed to converge in\n") cat(maxit,"iterations.\n") rslt <- NA break } p.o <- p.n } } as.vector(rslt) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/newt.R
plot.AssetPricing <- function(x,witch=c("price","expVal","vdot"),xlim=NULL, ylim=NULL,lty=NULL,cols=NULL,xlab=NULL,ylab=NULL, main=NULL,main.panel=NULL,groups=NULL,add=FALSE, gloss=FALSE,glind=NULL,extend=0.3,col.gloss=1, cex.gloss=0.8,mfrow=NULL,...) { # witch <- match.arg(witch) xxx <- switch(EXPR=witch,price=x[["x"]], expVal=x[["v"]], vdot=x[["vdot"]]) plot(xxx,xlim=xlim,ylim=ylim,lty=lty,cols=cols,xlab=xlab,ylab=ylab, main=main,main.panel=main.panel,groups=groups,add=add, gloss=gloss,glind=glind,extend=extend,col.gloss=col.gloss, cex.gloss=cex.gloss,mfrow=mfrow,...) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/plot.AssetPricing.R
plot <- function(x,y,...) { UseMethod("plot") }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/plot.R
plot.default <- function(x,y,...) { graphics::plot(x,y,...) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/plot.default.R
plot.flap <- function(x,xlim=NULL,ylim=NULL,lty=NULL,cols=NULL, xlab=NULL,ylab=NULL,main=NULL,main.panel=NULL, groups=NULL,add=FALSE,gloss=FALSE,glind=NULL, extend=0.3,col.gloss=1,cex.gloss=0.8,mfrow=NULL,...) { # qmax <- attr(x,"qmax") jmax <- attr(x,"jmax") if(is.null(jmax)) jmax <- 1 # If groups is NULL, build it --- with the groups column # running from 1 to length(x). Otherwise make sure its entries # are sensible. if(is.null(groups)) { groups <- cbind(group=1,as.data.frame(i2qj(1:length(x),qmax,jmax))) } else { if(!is.data.frame(groups) || ncol(groups) > 3) stop("Argument \"groups\" must be a data frame with 1, 2 or 3 columns.\n") if(!all(names(groups)%in%c("group","q","j"))) stop("Names of \"groups\" argument are not right.\n") if(ncol(groups)==1) { if(names(groups) != "q") stop("The column name of a 1-column \"groups\" should be \"q\".\n") } if(!("j" %in% names(groups))) { if(jmax > 1) stop(paste("Column \"j\" of \"groups\" must be specified\n", "when \"jmax\" is greater than 1.\n")) groups$j <- 1 } if(!("group" %in% names(groups))) groups$group <- 1:nrow(groups) } # If jmax is 1 make sure that all j-entries of "groups" are equal to 1. if(jmax==1 & !all(groups$j == 1)) stop(paste("Not all of the entries of \"groups$j\" are\n", "equal to 1, but \"jmax\" is 1.\n")) # Make sure all entries of "groups" are in the right range. if(any(groups$q != round(groups$q)) | any(groups$j != round(groups$j)) | any(groups$q <= 0) | any(groups$j <= 0)) stop(paste("Entries of \"groups$q\" and \"groups$j\"\n", "must be postive integers.\n")) if(any(groups$q > qmax) | any(groups$j > pmin(jmax,groups$q))) stop(paste("Some entries of the \"q\" or \"j\" columns\n", "of \"groups\" are out of range.\n")) ng <- length(unique(groups$group)) if(add & ng > 1) stop(paste("Cannot add to an existing plot when there\n", "is more than one group of traces.\n")) # Check on whether "gloss" should be done, and if so, should it be made # (or is it given). do.gloss <- if(is.logical(gloss)) gloss[1] else TRUE make.gloss <- do.gloss && is.logical(gloss) # Make sure "gloss" and "glind", if given, are lists of vectors # whose lengths match up appropriately with the groups in "groups". if(do.gloss) { if(is.null(glind)) { # Add gloss for all traces. glind <- rep(TRUE,nrow(groups)) } else { if(length(glind) != nrow(groups)) stop("Mismatch in length of \"glind\" and nrow of \"groups\".\n") if(!is.logical(glind)) stop("Argument \"glind\" must be a logical vector.\n") } } if(make.gloss) { gloss <- if(jmax==1) { paste("q =",groups$q) } else { paste("q =",groups$q,"j = ",groups$j) } } else if(do.gloss) { if(length(gloss) != nrow(groups)) stop("Mismatch in length of \"gloss\" and nrow of \"groups\".\n") ok <- sapply(1:ng,function(k,groups,gloss){ length(gloss[[k]]) == length(groups[[k]]$q)}, groups=groups,gloss=gloss) } # Blank out those entries of "gloss" where the values # of "glind" are FALSE. if(do.gloss) gloss[!glind] <- "" # Set up multiway array of plots. adjMfrow <- !add & (is.null(mfrow) || !is.na(mfrow)) if(adjMfrow) { if(is.null(mfrow)) { if(ng==1) mfrow <- c(1,1) else if(2 <= ng & ng <= 4) mfrow <- c(2,2) else mfrow <- c(3,2) } np <- prod(mfrow) oma <- if(ng > 1 & !is.null(main)) c(0,0,2,0) else rep(0,4) opar <- par(mfrow=mfrow, oma=oma) on.exit(par(opar)) } else np <- 1 # Set up xlab, ylab, main, main.panel and ylim, if(!add) { if(is.null(xlab)) xlab <- "" if(is.null(ylab)) ylab <- "" if(is.null(main)) main <- "" if(ng > 1) { if(is.null(main.panel)) { main.panel <- paste("group",1:ng) } else if(length(main.panel) == 1) { main.panel <- rep(main.panel,ng) } else if(length(main.panel) != ng) { stop("Mismatch in lengths of \"main.panel\" and \"groups\".\n") } } if(is.null(ylim)) ylim <- attr(x,'ylim') if(is.null(ylim)) stop(paste("Argument ylim is missing and x has no", "ylim attribute.\n")) } # Set up xlim. Needed even, if add is FALSE, since we're using # plot.function/plot.stepfun. if(is.null(xlim)) xlim <- attr(x,'tlim') if(is.null(xlim)) stop(paste("Argument xlim is missing and x has no", "\"tlim\" attribute.\n")) xlime <- xlim if(do.gloss) { if(extend < 0 | extend > 1) stop(paste("Crazy value",extend,"for \"extend\".\n")) xlime[2] <- xlime[2] + extend*diff(xlime) } # Set up line types and colours. if(is.null(lty)) lty <- 1 if(is.null(cols)) cols <- 1 lty <- rep(lty,length.out=nrow(groups)) cols <- rep(cols,length.out=nrow(groups)) # A couple of auxiliary constructs ... startPlot <- function(xlim,xlime,ylim,xlab,ylab,main) { plot(0,0,type="n",xlim=xlime,ylim=ylim, xlab=xlab,ylab=ylab,main=main,axes=FALSE) axis(side=2) axis(side=1,at=pretty(xlim)) } if(do.gloss) x0 <- xlim[2] + 0.05*diff(xlim) stride <- inherits(x,"pwc.flap") # Are you ready boots? Start plotting! if(ng==1 & !add) { startPlot(xlim=xlim,xlime=xlime,ylim=ylim, xlab=xlab,ylab=ylab,main=main) } for(kg in 1:ng) { if(ng > 1) { startPlot(xlim=xlim,xlime=xlime,ylim=ylim, xlab=xlab,ylab=ylab,main=main.panel[kg]) } ikg <- groups$group==kg qkg <- groups$q[ikg] jkg <- groups$j[ikg] lkg <- lty[ikg] ckg <- cols[ikg] K <- length(qkg) for(k in 1:K) { i <- qj2i(qkg[k],jkg[k],qmax) if(stride) { plot(x[[i]], xlim=xlim,lty=lkg[k], col=ckg[k], add=TRUE, do.points=FALSE, ...) } else { plot(x[[i]], xlim=xlim,lty=lkg[k], col=ckg[k], add=TRUE,...) } # Marginal gloss. if(do.gloss) { lbl <- gloss[ikg][k] xi <- x[[i]](xlim[2]) text(x0,xi,labels=lbl,adj=0,cex=cex.gloss,col=col.gloss) } } if(ng > 1) { if(kg %% np == 0 | kg == ng) { mtext(outer=TRUE,side=3,line=0,text=main,cex=1.2,font=2) } if(dev.interactive() & kg < ng & kg%%np == 0) readline('Go? ') } } invisible() }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/plot.flap.R
plot.stepfun <- function (x, xval, xlim, ylim = range(c(y, Fn.kn)), xlab = "x", ylab = "f(x)", main = NULL, add = FALSE, verticals = TRUE, do.points = TRUE, pch = par("pch"), col = par("col"), col.points = col, cex.points = par("cex"), col.hor = col, col.vert = col, lty = par("lty"), lwd = par("lwd"), ...) { if (!is.stepfun(x)) { if (is.numeric(x)) { sarg <- substitute(x) x <- ecdf(x) attr(x, "call") <- call("ecdf", sarg) } else stop("'plot.stepfun' called with wrong type of argument 'x'") } if (missing(main)) main <- { cl <- attr(x, "call") deparse(if (!is.null(cl)) cl else sys.call()) } knF <- knots(x) extend <- missing(xlim) xval <- if (missing(xval)) knF else sort(xval) if (extend) { rx <- range(xval) dr <- if (length(xval) > 1) max(0.08 * diff(rx), median(diff(xval))) else abs(xval)/16 xlim <- rx + dr * c(-1, 1) } else { dr <- 0 eps <- sqrt(.Machine$double.eps) xtra <- c(x(min(knF)-eps),x(max(knF)+eps)) } knF <- knF[xlim[1L] - dr <= knF & knF <= xlim[2L] + dr] ti <- c(xlim[1L] - dr, knF, xlim[2L] + dr) ti.l <- ti[-length(ti)] ti.r <- ti[-1L] y <- x(0.5 * (ti.l + ti.r)) n <- length(y) Fn.kn <- x(knF) if (add) segments(ti.l, y, ti.r, y, col = col.hor, lty = lty, lwd = lwd, ...) else { if (missing(ylim)) ylim <- if(extend) range(y, Fn.kn) else range(y,Fn.kn,xtra) plot(0, 0, type = "n", xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, main = main, ...) segments(ti.l, y, ti.r, y, col = col.hor, lty = lty, lwd = lwd) } if (do.points) points(knF, Fn.kn, pch = pch, col = col.points, cex = cex.points) if (verticals) segments(knF, y[-n], knF, y[-1L], col = col.vert, lty = lty, lwd = lwd) invisible(list(t = ti, y = y)) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/plot.stepfun.R
progRep <- function(info,verbInt,tt,tmax) { eps <- .Machine$double.eps st.now <- Sys.time() elt <- st.now - info$st.last units(elt) <- "secs" elt <- as.vector(elt) if(elt > verbInt-eps) { pct <- max(0,round(100*(tmax - tt)/tmax,1)) pct <- sprintf("%4.1f",pct) cat("Approximately",pct,"percent of [0,tmax] left to cover.\n") telt <- st.now - info$st.first overmin <- telt > as.difftime(1,units="mins") if(overmin) { units(telt) <- "mins" } else { units(telt) <- "secs" } telt <- sprintf("%5.2f",round(as.vector(telt),2)) cat("Total *elapsed* (wall) time approximately ",telt, if(overmin) " minutes.\n" else " seconds.\n",sep="") info$st.last <- st.now } invisible() }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/progRep.R
putAway <- function(odeRslt,type,jmax,qmax,soltype,x=NULL,prices=NULL) { # # Equation solved; now do a bunch of housekeeping to store # the results in a convenient manner. Note than odeRslt is # a matrix of dimension # (a) nout x (1 + np + 2*qmax) --- for continuous pricing, # and a smooth price sensitivity function, or # (b) nout x (1 + qmax + np + qmax) --- for discrete pricing or # a piecewise linear price sensitivity function, or # (c) nout x (1 + 2*qmax) --- for a given pricing policy # where we are solving only for the expected value v. # In the foregoing np is equal to the total number of price functions. # This is given by np = jmax*(qmax - (jmax-1)/2) # # The first column of odeRslt consists of the times at which the # solution to the differential equation is evaluated. In case (a) # columns 2 through np+1 contain the corresponding values of the # optimal price x, and the last qmax columns contain the corresponding # expected values. In case (b), columns 2 through qmax+1 contain # the optimal expected values, and the last np columns contain the # corresponding optimal prices. In case (c) the last qmax columns # contain expected values (corresponding to the prices in the given # pricing policy). # # If type == "sip" then when putAway() is called, jmax is effectively # 1. In the process of solving the differential equation, jmax is # the length of the vector of group size probabilities. If type is # "dip" then there is a price for each (q,j) combination for j = 1, # ..., min{jmax,q} and hence jmax has an impact on the number of # price functions. However when type is "sip" then there is only # one price for each q value and so the number of price functions # is qmax, the same as it would be if jmax were equal to 1. if(type=="sip") jmax <- 1 tvec <- odeRslt[,1] tmax <- max(tvec) np <- jmax*(qmax - (jmax-1)/2) nc <- ncol(odeRslt) # Put away the prices. # # If putAway() is being called by vsolve() then these prices are # *not* necessarily optimal. In this case "x" is passed as an argument # and nothing needs to be done to it. # Note that if type == "dip" then the entry x[[i]] is equal to # the function x_qj(t) where i = (j-1)*(qmax - j/2) + q. if(is.null(x)) { if(soltype=="cont") xx <- as.data.frame(odeRslt[,2:(np+1)]) else xx <- as.data.frame(odeRslt[,(2+qmax):(1+qmax+np)]) if(soltype=="disc") { x <- lapply(xx,function(x,t){approxfun(x=t,y=x,method="constant", yleft=NA,yright=NA)},t=tvec) x <- lapply(x,a2sf,tt=tvec) } else x <- lapply(xx,function(x,t){splinefun(x=t,y=x)},t=tvec) ylim <- range(xx) attr(x,'tlim') <- c(0,tmax) attr(x,'ylim') <- ylim attr(x,'qmax') <- qmax attr(x,'jmax') <- jmax if(!is.null(prices)) attr(x,'prices') <- prices comment(x) <- "Optimal prices." class(x) <- "flap" if(soltype=="disc") class(x) <- c(class(x),"pwc.flap") # pwc <--> piecewise constant. if(type=="dip") class(x) <- c(class(x),"di.flap") # di <--> doubly indexed. } # Put away the optimal expected values. if(soltype=="cont") vv <- as.data.frame(odeRslt[,(2+np):(1+np+qmax)]) else vv <- as.data.frame(odeRslt[,2:(1+qmax)]) v <- lapply(vv,function(x,t){splinefun(x=t,y=x)},t=tvec) ylim <- range(vv) attr(v,'tlim') <- c(0,tmax) attr(v,'ylim') <- ylim attr(v,'qmax') <- qmax attr(v,'jmax') <- 1 class(v) <- "flap" # Put away the derivatives of the optimal expected values. vd <- as.data.frame(odeRslt[,(nc-qmax+1):nc]) vdot <- lapply(vd,function(x,t){splinefun(x=t,y=x)},t=tvec) ylim <- range(vd) attr(vdot,'tlim') <- c(0,tmax) attr(vdot,'ylim') <- ylim attr(vdot,'qmax') <- qmax attr(vdot,'jmax') <- 1 class(vdot) <- "flap" rslt <- list(x=x,v=v,vdot=vdot) class(rslt) <- "AssetPricing" rslt }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/putAway.R
qj2i <- function(q,j,qmax){ # # The index "i" is in position (q,j) of an array where the # first column of the array is 1 to qmax, the second column is # NA, (qmax + 1) to (2*qmax - 1), the third column is # NA, NA, (2*qmax):(3*qmax - 3), and so on. # if(any(j>q)) stop("Index \"j\" must be less than or equal to \"q\".\n") (j-1)*(qmax - j/2) + q }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/qj2i.R
scrF <- function(tt,v,parms,verbInt,tmax,info,...) { # # The argument "parms" is a dummy, required by ode(). The "..." # argument is not used. The objects lambda, type, gpr and stabilize are # (always) assigned in the environment of scrF. When scrF is called # by xsolve.disc(), the vector "x" of *possible* prices is assigned # in the environment of scrF. When scrF is called by vsolve() # the pricing policy "x" is assigned in the environment of scrF. # When scrF is called by xsolve.pwl() the lists "alpha" and "beta" of # coefficient functions and knot vector "kn" for the piecewise linear # representation of S(x,t) are assigned in the environment of scrF. # # The value of this function is a list whose entries are: # # (a) When this function is called by vsolve (whence the prices # are given): just vdot = ``script F''(v,t), the (unstated :-( )) # vectorized version of equation (2) of Banerjee and Turner (2012) # (b) When this function is called by xsolve.disc (discrete prices) # or by xsolve.pwl() (piecewise linear price sensitivity function): # vdot, x = the vector of optimal prices and vdlit=vdot (again). # The latter two list entries are the "global values that are required # at each point". See the description of the argument "func" to ode() # in the help for ode(). E <- parent.env(environment()) if(is.null(E$x)) { # Here scrF is being called by xsolve.pwl() and the price elasticity # functions are piecewise linear (rather than discrete). We want # to maximize over the possible price values, but first we need to # determine what the possible prices are. Note that "x" is a list # here, with the q-th entry of the list being the vector of possible # optimal prices for stock size "q". qmax <- length(v) jmax <- length(gpr) x <- vector("list",qmax) for(q in 1:qmax) { jtop <- min(q,jmax) Kpa <- gpr[1:jtop] if(jtop < jmax) Kpa[jtop] <- Kpa[jtop] + environment(cev)$alpha*(1-sum(Kpa)) x[[q]] <- getPossPrices(v[1:q],tt,E$alpha,E$beta, E$kn,Kpa,type=type) } } else if(inherits(x,"flap")) { # Here scrF is being called by vsolve() --- pricing policy is given # so x supplies the actual prices. No optimization to be done, # so just return the derivatives of the expected values. xx <- sapply(x,function(f,t){f(t)},t=tt) if(verbInt > 0) progRep(info,verbInt,tt,tmax) return(list(vdot=lambda(tt)*(-v + cev(xx,tt,v,type)))) } # At this point either scrF was called by xsolve.pwl() and the # vector of possible prices x has been constructed, or scrF is being # called by xsolve.disc() and the set of possible (discrete) prices # was provided in the vector x which was assigned in the environment # of this function. In either case the vector of possible prices # is now available and we can maximize over this vector. R <- try(cev(x,tt,v,type,maximize=TRUE)) # The object R consists of the expected values at the optimum prices # (both discrete and pwl settings). It has an attrbute consisting # of the actual optimum prices; Return the vdot values *at* the # optimum prices, and the optimum prices. vdot <- lambda(tt)*(R - v) xopt <- attr(R,"xopt") if(stabilize) assign("xback",xopt,envir=environment(cev)) if(verbInt > 0) progRep(info,verbInt,tt,tmax) list(vdot=vdot,x=xopt,vdlit=vdot) # "lit" for literal. }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/scrF.R
scrG <- function(tt,x,parms,verbInt,tmax,info,...) { # Note that the objects dS, gpr, alpha, and type are # assigned in the environment of scrG. # # This is the value of xdot = {script G}(x,t) (equation (6) of paper. vvdot <- vupdate(x,tt,type) v <- vvdot$v vdot <- vvdot$vdot qmax <- length(v) if(type=="sip") { xdot <- numeric(qmax) for(q in 1:qmax) { vq <- v[q] vdotq <- vdot[q] jtop <- min(q,length(gpr)) jv <- 1:jtop rv <- (c(v[q:1],0)[-1])[1:jtop] rvdot <- (c(vdot[q:1],0)[-1])[1:jtop] K <- gpr[1:jtop] K[jtop] <- K[jtop] + alpha*(1-sum(K)) if(is.list(dS)) { S <- lapply(dS[jv],function(f,xx,tt){f(xx,tt)}, xx=x[q],tt=tt) dSdx <- sapply(S,function(f){ attr(f,"gradient")[,"x"] }) dSdt <- sapply(S,function(f){ attr(f,"gradient")[,"t"] }) d2Sdx2 <- sapply(S,function(f){ attr(f,"hessian")[,"x","x"] }) d2Sdxdt <- sapply(S,function(f){ attr(f,"hessian")[,"x","t"] }) S <- unlist(S) } else { S <- dS(x[q], tt, jv) dSdx <- attr(S, "gradient")[, "x"] dSdt <- attr(S, "gradient")[, "t"] d2Sdx2 <- attr(S, "hessian")[, "x", "x"] d2Sdxdt <- attr(S, "hessian")[, "x", "t"] } num <- -(x[q]*sum(jv*d2Sdxdt*K) + sum((rv-vq)*d2Sdxdt*K) + sum((rvdot-vdotq)*dSdx*K) + sum(jv*dSdt*K)) den <- x[q]*sum(jv*d2Sdx2*K) + sum((rv-vq)*d2Sdx2*K) + 2*sum(jv*dSdx*K) xdot[q] <- num/den } } else if(type=="dip") { N <- length(x) xdot <- numeric(N) jmax <- length(gpr) for(q in 1:qmax) { jtop <- min(q,jmax) for(j in 1:jtop) { i <- qj2i(q,j,qmax) S <- if(is.list(dS)) { dS[[j]](x[i],tt) } else dS(x[i],tt,j) dSdx <- attr(S,"gradient")[,"x"] dSdt <- attr(S,"gradient")[,"t"] d2Sdx2 <- attr(S,"hessian")[,"x","x"] d2Sdxdt <- attr(S,"hessian")[,"x","t"] vdqmj <- if(j<q) vdot[q-j] else 0 num <- j*(S*d2Sdxdt - dSdx*dSdt) - (vdqmj - vdot[q])*dSdx^2 den <- j*(2*dSdx^2 - S*d2Sdx2) xdot[i] <- num/den } } } else stop(paste("Type",type,"unrecognized.\n")) if(verbInt > 0) progRep(info,verbInt,tt,tmax) list(xdot=xdot,v=v,vdot=vdot) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/scrG.R
turnPts <- local({ goodZeroes <- function(dp,xlo,xhi){ zzz <- polyroot(dp) rrr <- sapply(zzz,function(z){isTRUE(all.equal(Im(z),0))}) if(!any(rrr)) return(numeric(0)) zzz <- unique(Re(zzz[rrr])) ok <- xlo <= zzz & zzz <= xhi zzz[ok] } function(a,b,v,Kpa,xlo,xhi,type) { # # Construct the polynomial. q <- length(v) rmax <- suppressWarnings(max(which(Kpa > 0))) if(rmax < 1) return(if(type=="sip") NULL else rep(list(NULL),q)) Kpa <- Kpa[1:rmax] vq <- v[q] if(type=="sip") ply <- 0 else rslt <- vector("list",q) for(r in 1:rmax) { vqmr <- if(r < q) v[q-r] else 0 c1 <- vqmr - vq if(type=="dip") { d1 <- if(r>1) polynomial(c(a,b)) else 1 dply <- d1*polynomial(c(a+b*c1,b*(r+1))) rslt[[r]] <- goodZeroes(dply,xlo,xhi) } else { p1 <- polynomial(c(c1,r)) p2 <- polynomial(c(a,b)) ply <- ply + Kpa[r]*p1*p2^r } } if(type=="dip") return(rslt) # # Take the derivative of ply. dply <- deriv(ply) # # Return the "good" zeroes. goodZeroes(dply,xlo,xhi) }})
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/turnPts.R
vsolve <- function(S,lambda,gprob=NULL,tmax=NULL,x,nout=300, alpha=NULL,salval=0,method="lsoda",verbInt=0) { # # Function vsolve to solve numerically the system of d.e.'s for the # value v_q(t) of a stock of q items at time t given a ***general*** # (not necessarily optimal; either discrete or continuous) pricing # policy ``x'', specified as an object of class ``flap''. Uses the # method of Runge-Kutta. (Note that time is ***residual*** time, # decreasing toward the ``departure time'' of 0.) But we solve # ``forward in time'', starting from 0. # # Check on the given pricing policy. if(!inherits(x,"flap")) stop("Argument x must be an object of class \"flap\".\n") qmax <- attr(x,"qmax") type <- if(inherits(x,"di.flap")) "dip" else "sip" # If S is a piecewise linear price sensitivity function, obtain # tmax, if NULL, as the corresponding attribute of that function # otherwise make sure that it is smaller than that attribute. funtype <- attr(S,"funtype") if(!is.null(funtype) && funtype=="pwl") { if(is.null(tmax)) { tmax <- attr(S,"tmax") } else if(tmax > attr(S,"tmax")) { stop(paste("Argument \"tmax\" is greater than the \"tmax\" attribute\n", "of the pwl price sensitivity function specified as\n", "argument \"S\".\n")) } } # Check that the functions in x were built on an interval at least # as large as [0,tmax]. (Since they are step functions, their # values are not really meaningful for arguments outside of the # tlim attribute of x.) if(is.null(tmax)) { tmax <- attr(x,"tlim")[2] } else if(tmax > attr(x,"tlim")[2]) stop("Argument \"x\" has attribute tlim[2] < tmax.\n") # Make sure the group size probabilities are OK. if(is.null(gprob)) gprob <- 1 gpr <- if(is.function(gprob)) gprob(1:qmax) else gprob if(!is.numeric(gpr)) stop("Group size probabilities are not numeric.\n") if(any(gpr<0) | sum(gpr) > 1) stop("Group size probabilities are not probabilities!\n") # Find the value of jmax as determined by gprob. If there is # doubly indexed pricing, check that this value of jmax is less than # or equal to the jmax attribute of x. jmax <- max(which(gpr > sqrt(.Machine$double.eps))) jmax <- min(jmax,qmax) if(type=="dip" & jmax > attr(x,"jmax")) stop(paste("Pricing is group-size dependent and the maximum\n", "customer group size is larger than the\n", "\"jmax\" attribute of \"x\".\n")) # Clip the group size probability vector to be of length jmax. gpr <- gpr[1:jmax] # Check up on alpha. if(is.null(alpha)) { if(jmax > 1) { stop(paste("Argument \"alpha\" must be specified if there is\n", "non-zero probability of a group of size greater than 1.\n")) } alpha <- 1 } N <- length(x) nc <- if(type=="dip") jmax*(qmax - (jmax-1)/2) else qmax if(N != nc) stop("Length of \"x\" incommensurate with \"qmax\" and \"jmax\".\n") # If S is a list make sure that it is of the right length. Then # check that all entries are either expressions (smooth functions # for continuous prices) or functions (for discrete prices). If S # consists of only one function or expression, raise it to the power # "n" making "n" a function argument. In the expressions case # differentiate the expression(s). if(is.list(S)) { if(length(S) != jmax) stop(paste("Length of \"S\" as a list must equal ", "\"jmax\" = ",jmax,".\n",sep="")) if(all(sapply(S,is.expression))) { dS <- list() for(i in 1:jmax) { xxx <- deriv3(S[[i]],c("x","t"),function.arg=c("x","t")) environment(xxx) <- new.env() pars <- attr(S[[i]],"parvec") for(nm in names(pars)) { assign(nm,pars[nm],envir=environment(xxx)) } dS[[i]] <- xxx } } else if(!all(sapply(S,is.function))) { stop("At least one entry of \"S\" is neither an expression nor a function.\n") } } else if(is.expression(S)) { # Note that here S is an expression but even so the (counter-intuitive) # syntax ``S[[1]]'' (a) makes ``sense'' and (b) is needed. pars <- attr(S,"parvec") S <- substitute(a^b,list(a=S[[1]],b=quote(n))) dS <- deriv3(S,c("x","t"),function.arg=c("x","t","n")) environment(dS) <- new.env() for(nm in names(pars)) { assign(nm,pars[nm],envir=environment(dS)) } } else if(is.function(S)) { dS <- with(list(S=S),function(x,t,n) {S(x,t)^n}) } else { stop(paste("Argument \"S\" must be either an expression or a list of such,\n", "or a function or a list of such.\n")) } if(is.numeric(lambda)) { if(length(lambda) != 1 || lambda <=0) stop("When \"lambda\" is numeric it must be a positive scalar.\n") lambda <- with(list(lambda=lambda),function(t){rep(lambda,length(t))}) } # Renew the environment of cev() and scrF() to prevent old remnants hanging # around and thereby instigating spurious results. environment(cev) <- new.env() environment(scrF) <- new.env() # assign("dS",dS,envir=environment(cev)) assign("gpr",gpr,envir=environment(cev)) assign("alpha",alpha,envir=environment(cev)) # assign("x",x,envir=environment(scrF)) assign("lambda",lambda,envir=environment(scrF)) assign("type",type,envir=environment(scrF)) # Do some setting up/initializing: tvec <- seq(0,tmax,length=nout) v <- (1:qmax)*salval info <- new.env() info$st.first <- info$st.last <- Sys.time() # Solve the differential equation. odeRslt <- ode(v,tvec,scrF,parms=NULL,method=method, verbInt=verbInt,tmax=tmax,info=info) # The functions in the object x are ``non-parametric'' functions; # they could have been defined over a larger interval than [0,tmax] # which is currently being investigated. If so we need to reset # the tlim and ylim values based on the current value of tmax. # if(tmax < attr(x,"tlim")[2]) { ttt <- seq(0,tmax,length=nout) foo <- function(f,tt) { return(range(f(tt))) } tstor <- lapply(x,foo,tt=ttt) attr(x,'tlim') <- c(0,tmax) attr(x,'ylim') <- range(unlist(tstor)) } comment(x) <- c(comment(x),"Prices not necessarily optimal.") putAway(odeRslt,type,jmax,qmax,soltype="vsolve",x=x,prices=NULL) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/vsolve.R
vupdate <- function(x,tt,type) { # Note that objects lambda, dS, gpr and alpha are assigned in the # environment of vupdate. N <- length(x) if(type=="sip") { qmax <- N v <- numeric(qmax+1) for(q in 1:qmax) { jtop <- min(q,length(gpr)) rv <- (v[q:1])[1:jtop] K <- gpr[1:jtop] K[jtop] <- K[jtop] + alpha*(1-sum(K)) if(is.list(dS)) { S <- lapply(dS[1:jtop],function(f,x,t){ f(x,t)},x=x[q],t=tt) dSdx <- sapply(S,function(f){ attr(f,"gradient")[,"x"] }) S <- unlist(S) } else { S <- dS(x[q], tt, 1:jtop) dSdx <- attr(S, "gradient")[, "x"] } num <- (x[q]*sum((1:jtop)*dSdx*K) + sum(rv*dSdx*K) + sum((1:jtop)*S*K)) den <- sum(dSdx*K) v[q+1] <- num/den } v <- v[-1] } else if(type=="dip") { jmax <- length(gpr) qmax <- N/jmax + (jmax-1)/2 x1 <- x[1:qmax] S <- if(is.list(dS)) dS[1](x1,tt) else dS(x1,tt,1) dSdx <- attr(S,"gradient")[,"x"] v <- cumsum(x1+S/dSdx) } else stop(paste("Type",type,"unrecognized.\n")) vdot <- lambda(tt)*(-v + cev(x,tt,v,type)) list(v=v,vdot=vdot) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/vupdate.R
xsolve <- function(S,lambda,gprob=1,tmax=NULL,qmax,prices=NULL,nout=300, type="sip",alpha=NULL,salval=0,epsilon=NULL, method="lsoda",verbInt=0) { # # Dispatch the problem to one of xsolve.cont(), xsolve.disc(), # xsolve.pwl(), depending on the nature of the price sensitivity # function "S". # soltype <- findSolType(S,prices) if(is.numeric(lambda)) { if(length(lambda) != 1 || lambda <=0) stop("When \"lambda\" is numeric it must be a positive scalar.\n") lambda <- with(list(lambda=lambda),function(t){rep(lambda,length(t))}) } if(is.null(epsilon)) { epsilon <- switch(EXPR=soltype,cont=NULL, disc = (.Machine$double.eps)^0.25, pwl = (.Machine$double.eps)^0.5) } switch(EXPR = soltype, cont = xsolve.cont(S,lambda,gprob,tmax,qmax,nout,type, alpha,salval,method=method,verbInt=verbInt), disc = xsolve.disc(S,lambda,gprob,tmax,qmax,prices,nout,type, alpha,salval,epsilon,method=method,verbInt=verbInt), pwl = xsolve.pwl(S,lambda,gprob,tmax,qmax,nout,type, alpha,salval,epsilon,method=method,verbInt=verbInt) ) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/xsolve.R
xsolve.cont <- function(S,lambda,gprob,tmax,qmax,nout,type, alpha,salval,method,verbInt) { # # Function xsolve.cont to solve numerically the system of d.e.'s for # the optimum price x_q(t) of an item of stock when there are q # items remaining at time t. Of necessity the price x_q(t) must be # ***continuous*** for this function to make sense. Note that time # is residual time, i.e. the time remaining until the expiry date, # and hence decreases as the expiry date approaches. However we # solve forward in time, starting from the expiry date, i.e. t=0. # # Note that # - S is an *expression* or *call*, or a *list* of such, # specifying the time-varying price sensitivity. # - lambda is the intensity of the Poisson arrival process # - gprob specifies the probability function for the size # of the arriving group of customers, either as a function # or as a vector of probabilities. # - tmax is the length of the time interval over which # we are solving the differential equation # - qmax is the initial number of items for sale, tmax # time units from the expiry date # - nout is the number of time points at which the values of # the solution are required. These points will be equispaced # on [0,tmax] # - type is the specification of the model type, either ``sip'' # (singly indexed prices) or ``dip'' (doubly indexed prices). # - salval is the ``salvage value'' of an item of stock when # the expiry date or deadline is reached; used to determine # the initial conditions. # # Note that ``scrG'' means ``script G function''; the (vector) d.e. # that we are solving is # ``x.dot = {script G}(x,t)'' # # Make sure the group size probabilities are OK. gpr <- if(is.function(gprob)) gprob(1:qmax) else gprob if(!is.numeric(gpr)) stop("Group size probabilities are not numeric.\n") if(any(gpr<0) | sum(gpr) > 1) stop("Group size probabilities are not probabilities!\n") jmax <- max(which(gpr > sqrt(.Machine$double.eps))) jmax <- min(jmax,qmax) gpr <- gpr[1:jmax] if(is.null(alpha)) { if(jmax > 1) stop(paste("When the maximum group size is great than 1,\n", "\"alpha\" must be specified.\n")) alpha <- 1 } # If jmax = 1 we might as well set type equal to "sip" --- since # indexing according to group size is "degenerate" in this case. if(jmax==1) type <- "sip" # Make sure tmax is specified. if(is.null(tmax)) stop("Argument \"tmax\" was not specified.\n") # Differentiate each price sensitivity function. If there # is only one such, raise it to the power "n" and differentiate # the result, making "n" a function argument. if(is.list(S)) { if(length(S) < jmax) stop(paste("Length of \"S\" as a list must be at least ", "\"jmax\" = ",jmax,".\n",sep="")) dS <- list() for(i in 1:jmax) { xxx <- deriv3(S[[i]],c("x","t"),function.arg=c("x","t")) environment(xxx) <- new.env() pars <- attr(S[[i]],"parvec") for(nm in names(pars)) { assign(nm,pars[nm],envir=environment(xxx)) } dS[[i]] <- xxx } } else { pars <- attr(S,"parvec") S <- substitute(a^b,list(a=S[[1]],b=quote(n))) dS <- deriv3(S,c("x","t"),function.arg=c("x","t","n")) environment(dS) <- new.env() for(nm in names(pars)) { assign(nm,pars[nm],envir=environment(dS)) } } # Renew the environments of the functions into which objects # are assigned to prevent old remnants hanging around and thereby # instigating spurious results. environment(vupdate) <- new.env() environment(scrG) <- new.env() environment(initx) <- new.env() environment(cev) <- new.env() # assign("dS",dS,envir=environment(vupdate)) assign("dS",dS,envir=environment(scrG)) assign("dS",dS,envir=environment(initx)) assign("dS",dS,envir=environment(cev)) # assign("gpr",gpr,envir=environment(vupdate)) assign("gpr",gpr,envir=environment(scrG)) assign("gpr",gpr,envir=environment(initx)) assign("gpr",gpr,envir=environment(cev)) # assign("alpha",alpha,envir=environment(vupdate)) assign("alpha",alpha,envir=environment(scrG)) assign("alpha",alpha,envir=environment(initx)) assign("alpha",alpha,envir=environment(cev)) # assign("lambda",lambda,envir=environment(vupdate)) # assign("type",type,envir=environment(scrG)) # Do some setting up/initializing: tvec <- seq(0,tmax,length=nout) v <- (1:qmax)*salval x <- initx(v,type) info <- new.env() info$st.first <- info$st.last <- Sys.time() # Solve the differential equation. odeRslt <- ode(x,tvec,scrG,parms=NULL,method=method,verbInt=verbInt, tmax=tmax,info=info) putAway(odeRslt,type,jmax,qmax,soltype="cont",x=NULL,prices=NULL) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/xsolve.cont.R
xsolve.disc <- function(S,lambda,gprob,tmax,qmax,prices,nout,type, alpha,salval,epsilon,method,verbInt) { # # Function xsolve.disc to solve numerically the system of d.e.'s # for the value v_q(t) of a stock of q items at time t, using the # method of Runge-Kutta, in the setting in which prices vary over a # ***discrete*** set. The optimal prices are then determined by # maximizing over this discrete set. (Note that time is thought # of as ***decreasing*** toward the ``departure time'' of 0.) # But we solve ``forward in time'', starting from 0. # # We are solving the vector system of differential equations # # vdot(t) = {script F}(v,t) # # In what follows, ``script F'' is denoted by ``scrF()''. # In addition to "v" and "t", scrF() has an additional auxilliary # argument "op". This is a logical scalar which defaults to FALSE; # if "op" is TRUE then the value returned by scrF() has an attribute # "xopt" giving the vector of values of the optimal prices (chosen # from amongst the finite set of possible prices. # # Make sure the group size probabilities are OK. gpr <- if(is.function(gprob)) gprob(1:qmax) else gprob if(!is.numeric(gpr)) stop("Group size probabilities are not numeric.\n") if(any(gpr<0) | sum(gpr) > 1) stop("Group size probabilities are not probabilities!\n") jmax <- max(which(gpr > sqrt(.Machine$double.eps))) jmax <- min(jmax,qmax) gpr <- gpr[1:jmax] if(is.null(alpha)) { if(jmax > 1) stop(paste("When the maximum group size is great than 1,\n", "\"alpha\" must be specified.\n")) alpha <- 1 } # If jmax = 1 we might as well set type equal to "sip" --- since # indexing according to group size is "degenerate" in this case. if(jmax==1) type <- "sip" # Make sure tmax is specified. if(is.null(tmax)) stop("Argument \"tmax\" was not specified.\n") # If there is only one price sensitivity function, create a new # function equal to the original function raised to the power "n", with # "n" a function argument. if(is.list(S)) { if(length(S) < jmax) stop(paste("Length of \"S\" as a list must be at least ", "\"jmax\" = ",jmax,".\n",sep="")) if(!all(sapply(S,is.function))) stop("At least one entry of \"S\" is NOT a function.\n") } else if(is.function(S)) { oldS <- S S <- with(list(oldS=S),function(x,t,n) {oldS(x,t)^n}) } else { stop("Argument \"S\" must be either a function or a list of functions.\n") } # Renew the environments of the functions into which objects # are assigned to prevent old remnants hanging around and thereby # instigating spurious results. environment(scrF) <- new.env() environment(cev) <- new.env() # assign("dS",S,envir=environment(cev)) # "dS" to make notation compatible with # the "smooth" case. assign("gpr",gpr,envir=environment(cev)) assign("alpha",alpha,envir=environment(cev)) assign("epsilon",epsilon,envir=environment(cev)) # assign("x",prices,envir=environment(scrF)) assign("lambda",lambda,envir=environment(scrF)) assign("type",type,envir=environment(scrF)) assign("gpr",gpr,envir=environment(scrF)) assign("stabilize",epsilon>0,envir=environment(scrF)) # Do some setting up/initializing. tvec <- seq(0,tmax,length=nout) v <- (1:qmax)*salval info <- new.env() info$st.first <- info$st.last <- Sys.time() # Solve the differental equation: odeRslt <- ode(v,tvec,scrF,parms=NULL,method=method,verbInt=verbInt, tmax=tmax,info=info) putAway(odeRslt,type,jmax,qmax,soltype="disc",x=NULL,prices=prices) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/xsolve.disc.R
xsolve.pwl <- function(S,lambda,gprob,tmax,qmax,nout,type, alpha,salval,epsilon,method,verbInt) { # # Function xsolve.pwl to solve numerically the system of d.e.'s # for the value v_q(t) of a stock of q items at time t, using # the method of Runge-Kutta, in the setting in which prices vary # continuously but the price sensitivity functions are powers of a # function (the "group size = 1" function) which is piecewise linear # in x. The optimal prices are determined by maximizing over the # discrete set consisting of: # # (1) The ``knots'' x_1, ..., x_K of the price sensitivity function. # (2) The zeros of the derivative of the conditional (given an arrival # at time "t") expected value of the stock. Note that this # conditional expected value is a piecewise polynomial in the # price "x" whence so is its derivative. # # We are solving the vector system of differential equations # # vdot(t) = {script F}(v,t) # # The function "script F" is represented as scrF() in the code. # Make sure the group size probabilities are OK. gpr <- if(is.function(gprob)) gprob(1:qmax) else gprob if(!is.numeric(gpr)) stop("Group size probabilities are not numeric.\n") if(any(gpr<0) | sum(gpr) > 1) stop("Group size probabilities are not probabilities!\n") jmax <- max(which(gpr > sqrt(.Machine$double.eps))) jmax <- min(jmax,qmax) gpr <- gpr[1:jmax] if(is.null(alpha)) { if(jmax > 1) stop(paste("When the maximum group size is great than 1,\n", "\"alpha\" must be specified.\n")) alpha <- 1 } # If jmax = 1 we might as well set type equal to "sip" --- since # indexing according to group size is "degenerate" in this case. if(jmax==1) type <- "sip" # Dig out the upper bound for the values of residual time. if(is.null(tmax)) { tmax <- attr(S,"tmax") } else if(tmax > attr(S,"tmax")) { stop(paste("Argument \"tmax\" is greater than the \"tmax\" attribute\n", "of the pwl price sensitivity function specified as\n", "argument \"S\".\n")) } # Renew the environment of scrF() to prevent old remnants hanging # around and thereby instigating spurious results. environment(scrF) <- new.env() # Stow necessary objects in the environment of scrF. assign("stabilize",epsilon>0,envir=environment(scrF)) assign("type",type,envir=environment(scrF)) assign("lambda",lambda,envir=environment(scrF)) assign("alpha",get("alpha",envir=environment(S)),envir=environment(scrF)) assign("beta",get("beta",envir=environment(S)),envir=environment(scrF)) assign("kn",get("kn",envir=environment(S)),envir=environment(scrF)) assign("gpr",gpr,envir=environment(scrF)) # # There should be only one price sensitivity function. Create a new # function equal to the original function raised to the power "n", with # "n" a function argument; call it "dS" to make notation compatible # with the "smooth" case. dS <- with(list(S=S),function(x,t,n) {S(x,t)^n}) # Renew the environment of cev() to prevent old remnants hanging # around and thereby instigating spurious results. environment(cev) <- new.env() # Stow necessary objects in the environment of cev. assign("dS",dS,envir=environment(cev)) assign("gpr",gpr,envir=environment(cev)) assign("alpha",alpha,envir=environment(cev)) assign("epsilon",epsilon,envir=environment(cev)) # Do some setting up/initializing: tvec <- seq(0,tmax,length=nout) v <- (1:qmax)*salval info <- new.env() info$st.first <- info$st.last <- Sys.time() # Solve the differential equation. odeRslt <- ode(v,tvec,scrF,parms=NULL,method=method,verbInt=verbInt, tmax=tmax,info=info) putAway(odeRslt,type,jmax,qmax,soltype="pwl",x=NULL,prices=NULL) }
/scratch/gouwar.j/cran-all/cranData/AssetPricing/R/xsolve.pwl.R
###### Wcorrected # X being Chi # MAF: matrix (#Snps * 2): First column contains Minor Allele Frequency (MAF) in cases; Second column contains MAF in controls # Pheno: matrix (#subjects * 1): this one-column matrix contains 0's amd 1's: 1 for cases and 0 for controls. No missing values are allowed. # Kin: The kinship matrix (#subjects * #subjects): the subjects must be ordered as the Pheno variable. # Correlation: Correlation matrix between SNPs (#Snps * #Snps). The user should calculate this matrix beforehand. Either based on own genotype data (in cases, controls, or both) or based on public databases (e.g., 1000 Genomes Projects, ESP, etc.). NA values are not allowed. They have to be replaced by zeros. ###### #Correlation <- cor(IND[pheno>=0,7:ncol(IND)]) #Correlation[is.na(Correlation)] <- 0 Wcorrected <- function(MAF, Pheno, Kin, Correlation, Weights) { Na <- length(Pheno[Pheno[, 1] == 1,]) Nu <- length(Pheno[Pheno[, 1] == 0,]) N <- Na + Nu # The three following lines: prepare the phenotype variables OneN <- matrix(1, ncol = 1, nrow = N) Y <- Pheno OneHat <- matrix(Na / N, ncol = 1 , nrow = N) # Estimate MAF in all subjects P <- (MAF[, 1] * Na + MAF[, 2] * Nu) / N if (is.null(Weights)) { # Variance of SNPs (2p(1-p)) VarSnps <- sqrt(P * (1 - P)) } else { # Variance of SNPs (2p(1-p)) accounting for the prespecified Snp weights VarSnps <- Weights * sqrt(P * (1 - P)) } VarSnps <- matrix(VarSnps, ncol = 1) # This value will account for the correlation between Snps. cs <- 2 * t(VarSnps) %*% Correlation %*% VarSnps if (is.null(Weights)) { # Numerator of the Xcorrec test statistic num <- 4 * (sum (Na * MAF[, 1] - Na * P)) ^ 2 } else{ # Numerator of the Xcorrec test statistic num <- 4 * (sum (Na * Weights * MAF[, 1] - Na * Weights * P)) ^ 2 } # Denominator of the Xcorrec test statistic denom <- 2 * as.numeric(cs) * t(Y - OneHat) %*% Kin %*% (Y - OneHat) # Xcorrec test statistic W <- num / denom # Pvalue from a chi-square proba distribution Pvalue <- 1 - pchisq(W, 1) out <- t(data.frame(c(sum(MAF[,1]), sum(MAF[,2]), sum(P), num, denom, W, Pvalue))) colnames(out) <- c("Sum MAF Cases", "Sum MAF Controls", "Sum MAF All Weighted", "Numerator", "Denominator", "Wcorrected", "Pvalue") rownames(out) <- "Statistics" return(out) }
/scratch/gouwar.j/cran-all/cranData/AssocAFC/R/Wcorrected.R
###### Wqls # Genotypes: matrix(#Subjects * #Snps). The genotypes are coded as 0, 1, or 2 copies of the minor allele. # MAF: matrix (#Snps * 2): First column contains Minor Allele Frequency (MAF) in cases; Second column contains MAF in controls # Pheno: matrix (#subjects * 1): this one-column matrix contains 0's amd 1's: 1 for cases and 0 for controls. No missing values are allowed. # Kin: The kinship matrix (#subjects * #subjects): the subjects must be ordered as the Pheno variable. # Correlation: Correlation matrix between SNPs (#Snps * #Snps). The user should calculate this matrix beforehand. Either based on own genotype data (in cases, controls, or both) or based on public databases (e.g., 1000 Genomes Projects, ESP, etc.). NA values are not allowed. They have to be replaced by zeros. ###### Wqls <- function(Genotypes, MAF, Pheno, Kin, Correlation, Weights) { Na <- length(Pheno[Pheno[,1]==1,]) Nu <- length(Pheno[Pheno[,1]==0,]) N <- Na + Nu # The three following lines: prepare the phenotype variables OneN <- matrix(1, ncol=1, nrow = N) Y <- Pheno OneHat <- matrix( Na/N, ncol=1 , nrow=N) temp <- Y - OneHat # Estimate MAF in all subjects P <- (MAF[,1]*Na + MAF[,2]*Nu)/N # Number of Snps Nsnps <- nrow(MAF) if (is.null(Weights)) { # Variance of SNPs (2p(1-p)) VarSnps <- sqrt(P*(1-P)) } else { # Variance of SNPs (2p(1-p)) accounting for the prespecified Snp weights VarSnps <- Weights*sqrt(P*(1-P)) } VarSnps <- matrix(VarSnps,ncol=1) # This value will account for the correlation between Snps. cs <- 2*t(VarSnps) %*% Correlation %*% VarSnps if (Nsnps==1) { S<-Genotypes }else { if (is.null(Weights)) { # Rare variant score: sum of minor alleles accross all Snps. S <- apply(Genotypes , 1 , sum) }else { # Rare variant score: sum of minor alleles accross all Snps. S <- Genotypes %*% Weights } } S <- matrix(S,ncol=1,nrow=length(S)) Kin <- 2*Kin KinInv <- solve(Kin) A <- as.numeric(t(Y) %*% KinInv %*% OneN %*% solve(t(OneN) %*% KinInv %*% OneN)) V <- KinInv %*% Y - A * KinInv %*% OneN if (is.null(Weights)) { num <- (sum((S[Y==1]) * rowSums(KinInv[Y==1,Y==1]))*(1-A) -2*sum(MAF[,2])*(A)*Nu)^2; }else { num <- (sum((S[Y==1]) * rowSums(KinInv[Y==1,Y==1]))*(1-A) -2*sum(Weights*MAF[,2])*(A)*Nu)^2; } denom <- as.numeric(cs) * (t(V)%*% Kin %*% V) ; W <- num/denom ; Pvalue <- 1-pchisq(W,1) ; out <- t(data.frame(c(sum(MAF[,1]), sum(MAF[,2]), sum(P), num, denom, W, Pvalue))) colnames(out) <- c("Sum MAF Cases", "Sum MAF Controls", "Sum MAF All Weighted", "Numerator", "Denominator", "Wqls", "Pvalue") rownames(out) <- "Statistics" return(out) }
/scratch/gouwar.j/cran-all/cranData/AssocAFC/R/Wqls.R
###### afcSKAT # MAF: matrix (#Snps * 2): First column contains Minor Allele Frequency (MAF) in cases; Second column contains MAF in controls # Pheno: matrix (#subjects * 1): this one-column matrix contains 0's amd 1's: 1 for cases and 0 for controls. No missing values are allowed. # Kin: The kinship matrix (#subjects * #subjects): the subjects must be ordered as the Pheno variable. # Correlation: Correlation matrix between SNPs (#Snps * #Snps). The user should calculate this matrix beforehand. Either based on own genotype data (in cases, controls, or both) or based on public databases (e.g., 1000 Genomes Projects, ESP, etc.). NA values are not allowed. They have to be replaced by zeros. ###### afcSKAT <- function(MAF, Pheno, Kin, Correlation, Weights) { Na <- length(Pheno[Pheno[,1]==1,]) Nu <- length(Pheno[Pheno[,1]==0,]) N <- Na + Nu # The three following lines: prepare the phenotype variables OneN <- matrix(1, ncol=1, nrow = N) Y <- Pheno OneHat <- matrix( Na/N, ncol=1 , nrow=N) # Estimate MAF in all subjects P <- (MAF[,1]*Na + MAF[,2]*Nu)/N if (is.null(Weights)) { # Variance of SNPs (2p(1-p)) VarSnps <- sqrt(P*(1-P)) } else { # Variance of SNPs (2p(1-p)) accounting for the prespecified Snp weights VarSnps <- Weights*sqrt(P*(1-P)) } VarSnps <- matrix(VarSnps,ncol=1) cz <- 2* sum((Y - OneHat) %*% t(Y - OneHat) * Kin ) Vz <- cz * VarSnps %*% t(VarSnps)*Correlation if (is.null(Weights)) { # Quadratic form without Weights Q <- 4*Na^2*(sum ( (MAF[,1] - P)^2 )) } else { # Quadratic form with Weights Q <- 4*Na^2*(sum ( Weights*(MAF[,1] - P)^2 )) } # Satterwaite approximation (1) E_Q <- sum(diag(Vz)) # Satterwaite approximation (2) V_Q <- 2* sum( diag (Vz %*% Vz)) # Satterwaite approximation (3) Delta <- V_Q/(2*E_Q) # Satterwaite approximation (4) df<- 2*E_Q^2/V_Q # Satterwaite approximation (5) Qscaled <- Q / Delta # Satterwaite approximation (6) Pvalue_Sat <- 1-pchisq(Qscaled, df) # Davies approximation (1) eig <- eigen(Vz, symmetric = T, only.values = T) # Davies approximation (2) evals <- eig$values[eig$values > 1e-06 * eig$values[1]] # Davies approximation (3) Pvalue_Dav <-davies(Q, evals, acc = 1e-5)$Qq out <- t(data.frame(c(Pvalue_Sat,Pvalue_Dav))) colnames(out)<- c("Satterwaite","Davies") rownames(out) <- "Pvalue" return(out) }
/scratch/gouwar.j/cran-all/cranData/AssocAFC/R/afcSKAT.R
##' @title Wrapper for recursive binning ##' @description `binner` is an iterative implementation of a ##' recursive binary partitioning algorithm which accepts the ##' splitting and stopping functions that guide partitioning as ##' arguments. ##' @details `binner` creates a two-dimensional histogram of the ##' sample space of `x` and `y` by recursively splitting partitions of ##' the data using `splitter` until `stopper` indicates that all ##' partitions are not to be split. An optional argument `init` gives ##' the function applied to the first bin containing all points to ##' initialize the binning algorithm. ##' @param x numeric vector of the first variable to be binned ##' @param y numeric vector of the second variable to be binned ##' @param stopper function which accepts a list with elements ##' `x`, `y`, `bnds`, `expn`, and `n` and returns a logical indicating ##' whether a split should occur for the bin defined by that list ##' @param splitter function which accepts a list of lists with ##' elements `x`, `y`, `bnds`, `expn`, and `n` and returns a list ##' where each element is a list of two corresponding to a split of ##' the bin at that position in the original list ##' @param init function like `splitter` applied to the sole first ##' bin ##' @return A list of lists each with elements `x`, `y`, `bnds`, ##' `expn`, and `n`. ##' @examples ##' ## necessary set up ##' crits <- makeCriteria(depth >= 4, n < 10, expn <= 5) ##' stopFn <- function(bns) stopper(bns, crits) ##' spltFn <- function(bn) maxScoreSplit(bn, chiScores) ##' ## generate data ##' x <- sample(1:100) ##' y <- sample(1:100) ##' ## run binner ##' bins <- binner(x, y, stopper = stopFn, splitter = spltFn) ##' @author Chris Salahub binner <- function(x, y, stopper, splitter, init = halfSplit) { ## initialize bin with all the data contained bin <- list(x = x, y = y, # x and y points bnds = list(x = c(0, max(x, na.rm = TRUE)), y = c(0, max(y, na.rm = TRUE))), expn = length(x), # default expectation is n n = length(x), depth = 0) # size, depth binList <- init(bin) # first split, otherwise score max fails stopStatus <- stopper(binList) # initialize logical vector while (any(!stopStatus)) { # check the stop criteria oldBins <- binList[stopStatus] # stopped bins oldStop <- stopStatus[stopStatus] # all TRUE newBins <- lapply(binList[!stopStatus], splitter) # split bins newBins <- unlist(newBins, recursive = FALSE) # simplify newStop <- stopper(newBins) # get stop values binList <- c(oldBins, newBins) # update list of bins stopStatus <- c(oldStop, newStop) # update stop status } binList # return the final list of bins }
/scratch/gouwar.j/cran-all/cranData/AssocBin/R/binner.R
##' De-Garched S&P 500 returns ##' ##' This data uses code from the 'zenplots' package to process S&P 500 ##' consituent stock returns into uniform pseudo-observations for ##' measuring association. ##' ##' @format ##' A matrix with 755 rows and 461 columns, the rows correspond to ##' dates between 2007 and 2009 and the columns correspond to the ##' different S&P 500 constituent stocks. ##' ##' @usage data(sp500pseudo) "sp500pseudo"
/scratch/gouwar.j/cran-all/cranData/AssocBin/R/data.R
##' @title Plot a binning using shaded rectangles ##' @description Use a binning and vector of fill ##' colours to visualize the sample space of pairwise data. ##' @details `plotBinning` plots each bin within a list of bins with ##' custom shading to communicate large residuals, the depth of bins, ##' or highlight particular bins ##' @param bins list of lists each with a named elements `x`, `y`, and ##' `bnds`, the last of which is a list having named elements `x` and ##' `y` ##' @param fill vector of values which can be interpreted as colours ##' of the same length as `bins` ##' @param add logical, should the plot of bins be added to the ##' current plot area? ##' @param xlab string, the label to be placed on the x axis ##' @param ylab string, the label to be placed on the y axis ##' @param ... optional additional arguments to be passed to `plot`, ##' `points` ##' @return A list of lists each with elements `x`, `y`, `bnds`, ##' `expn`, and `n`. ##' @examples ##' bin <- list(x = 1:10, y = sample(1:10), ##' bnds = list(x = c(0, 10), y = c(0, 10)), ##' expn = 10, n = 10, depth = 0) ##' bin2 <- halfSplit(bin, "x") ##' bin3 <- unlist(lapply(bin2, maxScoreSplit, scorer = chiScores), ##' recursive = FALSE) ##' plotBinning(bin3) ##' @author Chris Salahub plotBinning <- function(bins, fill, add = FALSE, xlab = "x", ylab = "y", ...) { if (missing(fill)) fill <- rep(NA, length(bins)) # custom fill option nbins <- length(bins) xbnds <- sapply(bins, function(bn) bn$bnds$x) ybnds <- sapply(bins, function(bn) bn$bnds$y) if (!add) { plot(NA, xlim = range(xbnds), ylim = range(ybnds), xlab = xlab, ylab = ylab, ...) } for (ii in seq_along(bins)) { rect(xbnds[1,ii], ybnds[1,ii], xbnds[2,ii], ybnds[2,ii], col = fill[ii]) points(bins[[ii]]$x, bins[[ii]]$y, ...) # disable with pch = "" } } ##' Shadings ##' @title Generate fills encoding bin features ##' @description These functions all accept a list of bins and return ##' a vector of colours of the same length that encode some feature of ##' the bins. ##' @details Two functions are provided by default: one which ##' generates a fill based on bin depth and the other based on a ##' residual function applied to each bin. ##' @param bins list of bins to be visualized ##' @param colrng hue range to be passed to `colorRampPalette` to ##' generate the final hue scale ##' @param resFun function which returns a result with a name element ##' `residuals` that is a numeric vector of the same length as `bins` ##' @param maxRes numeric maximum value of the residuals to maintain ##' the correct origin, taken to be the maximum observed residual if ##' not provided ##' @param breaks numeric vector of breakpoints to control hues ##' @param nbr number of breakpoints for automatic breakpoint ##' generation if `breaks` is not provided ##' @return A vector of colours the same length as `bins`. ##' @examples ##' bin <- list(x = 1:10, y = sample(1:10), ##' bnds = list(x = c(0, 10), y = c(0, 10)), ##' expn = 10, n = 10, depth = 0) ##' bin2 <- halfSplit(bin, "x") ##' bin3 <- unlist(lapply(bin2, maxScoreSplit, ##' scorer = chiScores), ##' recursive = FALSE) ##' plotBinning(bin3, fill = depthFill(bin3)) # all the same depth ##' plotBinning(bin3, fill = residualFill(bin3)) # diff resids ##' @author Chris Salahub ##' @describeIn shadings Fill by depth depthFill <- function(bins, colrng = c("floralwhite", "firebrick")) { depths <- sapply(bins, function(bn) bn$depth) colorRampPalette(colrng)(max(depths))[depths] } ##' @describeIn shadings Fill by residual values residualFill <- function(bins, resFun = binChi, maxRes, colrng = c("steelblue", "floralwhite", "firebrick"), breaks = NA, nbr = 50) { residuals <- resFun(bins)$residuals # get residuals if (missing(maxRes)) maxRes <- 1.01*max(abs(residuals)) if (is.na(breaks)) { breaks <- seq(-maxRes, maxRes, length.out = nbr) } residCols <- cut(residuals, breaks) # distribute colors colorRampPalette(colrng)(nbr)[as.numeric(residCols)] }
/scratch/gouwar.j/cran-all/cranData/AssocBin/R/plotBinning.R
##' Scorings ##' @title Scoring functions to choose splits ##' @description These functions define scores to evaluate candidate ##' splits along a single margin within partition. ##' @details Each of these functions accepts `vals`, an ordered ##' numeric vector containing the candidate splits within a bin and ##' the bin bounds all in increasing order. To restrict splitting, ##' they also accept `expn` and `minExp`, which provide the expected ##' count within the split and minimum value of this count, ##' respectively. Any split which produces an expected value less ##' than `minExp` (assuming a uniform density within the bin) is given ##' a score of zero. ##' @param vals numeric vector candidate splits and bounds ##' @param expn the expected number of points in the bin ##' @param minExp the minimum number of points allowed in a bin ##' @return A vector of scores. ##' @examples ##' vals <- c(2, 5, 12, 16, 19) ##' ## restricting the minExp changes output ##' chiScores(vals, 4, minExp = 0) ##' chiScores(vals, 4, minExp = 2) ##' ## same for the miScores ##' miScores(vals, 4, minExp = 0) ##' miScores(vals, 4, minExp = 2) ##' ## random scoring produces different output every time ##' randScores(vals, 4, minExp = 0) ##' randScores(vals, 4, minExp = 0) ##' @author Chris Salahub ##' @describeIn scorings A chi-squared statistic score chiScores <- function(vals, expn, minExp = 0) { diffs <- diff(vals) n <- length(vals) - 2 total <- c(diffs[1]-1, cumsum(diffs)) h1 <- total[1:(n+1)] # length below h2 <- total[n+2] - h1 # length above d <- n/total[n+2] # density i <- 0:n # number below split i ni <- n - i # number above i scr <- (i - d*h1)^2/(h1*d) + (ni - d*h2)^2/(h2*d) scr[is.na(scr)] <- 0 scr[pmin(expn*h1/total[n+2], expn*h2/total[n+2]) < minExp] <- 0 # minimum size limit scr } ##' @describeIn scorings A mutual information score miScores <- function(vals, expn, minExp = 0) { diffs <- diff(vals) n <- length(vals) - 2 total <- c(diffs[1] - 1, cumsum(diffs)) h1 <- total[1:(n+1)] # length below h2 <- total[n+2] - h1 # length above d <- n/total[n+2] # density i <- 0:n # number below point i ni <- n - i # number above i below <- (i/n)*log(i/(d*h1)) above <- (ni/n)*log(ni/(d*h2)) # split expectation below[1] <- 0 above[n+1] <- 0 # handle known zeros scr <- below + above scr[pmin(expn*h1/total[n+2], expn*h2/total[n+2]) < minExp] <- 0 # minimum size limit scr } ##' @describeIn scorings A random score for random splitting randScores <- function(vals, expn, minExp = 0) { diffs <- diff(vals) n <- length(vals) - 2 scores <- runif(length(diffs)) ## compute the expected counts at each split total <- c(diffs[1]-1, cumsum(diffs)) h1 <- total[1:(n+1)] # length below h2 <- total[n+2] - h1 # length above ## if the difference is one, splitting creates bin with area 0 scores[1] <- min(diffs[1]-1, scores[1]) ## difference of zero here does the same scores[length(scores)] <- min(diffs[length(diffs)], scores[length(scores)]) scores[pmin(expn*h1/total[n+2], expn*h2/total[n+2]) < minExp] <- 0 # minimum size limit scores }
/scratch/gouwar.j/cran-all/cranData/AssocBin/R/scoring.R
##` Marginalsplitters ##' @title Helper functions for marginal splitting ##' @description These functions are helpers to safely split bins ##' along X or Y. ##' @details These unexported functions have been defined primarily ##' to clean up other code, but could be changed to obtain different ##' core functionality. ##' @param bin a bin to be split with elements `x`, `y`, `depth`, ##' `bnds` (list with elements `x` and `y`), `expn`, `n` ##' @param bd numeric split point within the bin bounds ##' @param above indices of `x` and `y` points in the bin above `bd` ##' @param below indices of `x` and `y` points in the bin below `bd` ##' @return A list of two bins resulting from the split of `bin` at ##' `bds`. ##' @author Chris Salahub ##' @describeIn marginalsplitters Splitting on x splitX <- function(bin, bd, above, below) { belowfac <- (bd - bin$bnds$x[1])/diff(bin$bnds$x) abovefac <- (bin$bnds$x[2] - bd)/diff(bin$bnds$x) list(list(x = bin$x[below], y = bin$y[below], bnds = list(x = c(bin$bnds$x[1], bd), y = bin$bnds$y), expn = bin$expn*belowfac, n = bin$n-length(above), depth = bin$depth + 1), list(x = bin$x[above], y = bin$y[above], bnds = list(x = c(bd, bin$bnds$x[2]), y = bin$bnds$y), expn = bin$expn*abovefac, n = length(above), depth = bin$depth + 1)) } ##' @describeIn marginalsplitters Splitting on y splitY <- function(bin, bd, above, below) { belowfac <- (bd - bin$bnds$y[1])/diff(bin$bnds$y) abovefac <- (bin$bnds$y[2] - bd)/diff(bin$bnds$y) list(list(x = bin$x[below], y = bin$y[below], bnds = list(x = bin$bnds$x, y = c(bin$bnds$y[1], bd)), expn = bin$expn*belowfac, n = bin$n-length(above), depth = bin$depth + 1), list(x = bin$x[above], y = bin$y[above], bnds = list(x = bin$bnds$x, y = c(bd, bin$bnds$y[2])), expn = bin$expn*abovefac, n = length(above), depth = bin$depth + 1)) } ##' @title Halve at an observed point ##' @description This function halves a bin under the restriction that ##' splits can only occur at observation coordinates. ##' @details Given a bin and a margin, this function splits the bin so ##' half the points are above the new split point and half are below. ##' @param bin a bin to be split with elements `x`, `y`, `depth`, ##' `bnds` (list with elements `x` and `y`), `expn`, `n` ##' @param margin string, one of `x` or `y` ##' @return A list of two bins resulting from the split of `bin` in ##' half along the specified margin ##' @examples ##' bin <- list(x = 1:10, y = sample(1:10), ##' bnds = list(x = c(0, 10), y = c(0, 10)), ##' expn = 10, n = 10, depth = 0) ##' halfSplit(bin) ##' halfSplit(bin, margin = "y") ##' @author Chris Salahub halfSplit <- function(bin, margin = "x") { if (margin == "x") { xsort <- order(bin$x) hind <- floor(bin$n/2) # middle index newbnd <- bin$x[xsort][hind] # middle value above <- xsort[(hind+1):(bin$n)] # points above below <- xsort[1:hind] # and below splitX(bin, bd = newbnd, above = above, below = below) } else if (margin == "y") { ysort <- order(bin$y) hind <- floor(bin$n/2) # middle index newbnd <- bin$y[ysort][hind] # middle value above <- ysort[(hind+1):(bin$n)] # points above below <- ysort[1:hind] splitY(bin, bd = newbnd, above = above, below = below) } else stop("Margin must be one of x or y") } ##' @title Halve continuously to break ties ##' @description This function halves a bin based on the midpoint of ##' the bounds along whichever margin produces the larger score. ##' @details The goal of this function is to break ties within bin ##' splitting in a way which prevents very small or lopsided bins from ##' forming, a common problem with the `halfSplit` function ##' @param bin a bin to be split with elements `x`, `y`, `depth`, ##' `bnds` (list with elements `x` and `y`), `expn`, `n` ##' @param xscore numeric value giving the score for all splits along ##' x ##' @param yscore numeric value giving the score for all splits along ##' y ##' @return A list of two bins resulting from the split of `bin` in ##' half along the margin corresponding to the larger score. ##' @examples ##' bin <- list(x = 1:10, y = sample(1:10), ##' bnds = list(x = c(0, 10), y = c(0, 10)), ##' expn = 10, n = 10, depth = 0) ##' halfCutTie(bin, 1, 2) # splits on y ##' halfCutTie(bin, 2, 1) # splits on x ##' halfCutTie(bin, 1, 1) # ties are random ##' @author Chris Salahub halfCutTie <- function(bin, xscore, yscore) { u <- as.numeric(yscore > xscore) # prefer to split on max score if (yscore == xscore) u <- runif(1) if (u < 0.5) { # y has a larger score, or random newbnd <- ceiling(mean(bin$bnds$x)) # split value abv <- bin$x > newbnd # which x values are above above <- which(abv) # indices above below <- which(!abv) # indices below splitX(bin, bd = newbnd, above = above, below = below) } else { newbnd <- ceiling(mean(bin$bnds$y)) # split value abv <- bin$y > newbnd # which y values are above above <- which(abv) # indices above below <- which(!abv) # indices below splitY(bin, bd = newbnd, above = above, below = below) } } ##' @title Bivariate score maximizing splitting ##' @description A function which splits a bin based on the location ##' maximizing a score function. ##' @details This function serves as a wrapper which manages the ##' interaction of a score function, marginal splitting functions, ##' tie breaking function, and a maximum selection function to split ##' a bin at the observation coordinate which maximizes the score ##' function. ##' @param bin a bin to be split with elements `x`, `y`, `depth`, ##' `bnds` (list with elements `x` and `y`), `expn`, `n` ##' @param scorer function which accepts a numeric vector of potential ##' split coordinates and the bounds of `bin` and returns a numeric ##' vector of scores for each ##' @param ties function which is called to break ties when all splits ##' generate the same score ##' @param pickMax function which accepts a list of scores and returns ##' the element of the largest score according to some rule ##' @param ... optional additional arguments to `scorer` ##' @return A list of two bins resulting from the split of `bin` ##' along the corresponding margin at the maximum location ##' @examples ##' bin <- list(x = 1:10, y = sample(1:10), ##' bnds = list(x = c(0, 10), y = c(0, 10)), ##' expn = 10, n = 10, depth = 0) ##' maxScoreSplit(bin, chiScores) ##' maxScoreSplit(bin, miScores) # pretty similar for both ##' maxScoreSplit(bin, randScores) ##' maxScoreSplit(bin, randScores) # different every time ##' @author Chris Salahub maxScoreSplit <- function(bin, scorer, ties = halfCutTie, pickMax = which.max, ...) { xsort <- order(bin$x) ysort <- order(bin$y) # get marginal ordering xscore <- scorer(c(bin$bnds$x[1], bin$x[xsort], bin$bnds$x[2]), expn = bin$expn, ...) yscore <- scorer(c(bin$bnds$y[1], bin$y[ysort], bin$bnds$y[2]), expn = bin$expn, ...) xmax <- pickMax(xscore) ymax <- pickMax(yscore) # the score values xallEq <- all(abs(xscore - xscore[1]) < sqrt(.Machine$double.eps)) yallEq <- all(abs(yscore - yscore[1]) < sqrt(.Machine$double.eps)) if (xallEq & yallEq) { # in the case of ties, use tie function ties(bin, xscore[1], yscore[1]) } else if (xscore[xmax] >= yscore[ymax]) { # ties go to x xsplts <- bin$x[xsort] newbnd <- c(xsplts[1]-1, xsplts)[xmax] # new boundary below <- xsort[seq_len(xmax-1)] # get indices of points below above <- if (xmax == bin$n+1) integer(0) else xsort[xmax:bin$n] splitX(bin, bd = newbnd, above = above, below = below) } else { # do the same on y ysplts <- bin$y[ysort] newbnd <- c(ysplts[1]-1, ysplts)[ymax] below <- ysort[seq_len(ymax-1)] above <- if (ymax == bin$n+1) integer(0) else ysort[ymax:bin$n] splitY(bin, bd = newbnd, above = above, below = below) } } ##' @title Univariate score maximizing splitting ##' @description A function which splits a bin based on the location ##' maximizing a score function. ##' @details This function is the univariate version of ##' `maxScoreSplit` and so is considerably simpler. It assumes the ##' variable to be split is named `x` in the bin, and the other ##' variable is to remain unsplit. ##' @param bin a bin to be split with elements `x`, `y`, `depth`, ##' `bnds` (list with elements `x` and `y`), `expn`, `n` ##' @param scorer function which accepts a numeric vector of potential ##' split coordinates and the bounds of `bin` and returns a numeric ##' vector of scores for each ##' @param pickMax function which accepts a list of scores and returns ##' the element of the largest score according to some rule ##' @param ... optional additional arguments to `scorer` ##' @return A list of two bins resulting from the split of `bin` at ##' the maximum split location along x ##' @author Chris Salahub uniMaxScoreSplit <- function(bin, scorer = diff, pickMax = which.max, ...) { xsort <- order(bin$x) xscore <- scorer(c(bin$bnds$x[1], bin$x[xsort], bin$bnds$x[2]), expn = bin$expn, ...) xmax <- pickMax(xscore) xsplts <- bin$x[xsort] newbnd <- c(xsplts[1]-1, xsplts)[xmax] # new bin boundary below <- xsort[seq_len(xmax-1)] above <- if (xmax == bin$n+1) integer(0) else xsort[xmax:bin$n] splitX(bin, bd = newbnd, above = above, below = below) }
/scratch/gouwar.j/cran-all/cranData/AssocBin/R/splitters.R
##' Binstatistics ##' @title Statistics for bins ##' @description These functions compute statistics based on observed ##' and expected counts for a list of bins. ##' @details Three functions are provided by default, `binChi` ##' computes the chi-squared statistic by taking the squared ##' difference between observed and expected counts and dividing this ##' by the expected counts. `binMi` computes the mutual information ##' for each bin using the observed and expected counts. Finally, ##' `binAbsDif` computes the absolute difference between observed ##' and expected counts. Each function first computes a value on ##' every bin independently and stores all these values in memory ##' before using the function provided in the optional argument `agg` ##' to aggregate these values. ##' @param bins a list of bins, each a list with elements `x`, `y`, ##' `depth`, `bnds` (list with elements `x` and `y`), `expn`, `n` ##' @param agg function which is aggregates the individual statistics ##' computed over each bin ##' @return A list with elements `residuals` and `stat` reporting the ##' individual statistic values (possibly transformed) and the ##' aggegrated statistic value. ##' @examples ##' binList1 <- list(list(x = c(1,2), y = c(3,1), depth = 1, n = 2, ##' expn = 2), ##' list(x = c(3,4), y = c(2,4), depth = 1, n = 2, ##' expn = 2)) ##' binList2 <- list(list(x = c(1,2), y = c(3,1), depth = 6, n = 2, ##' expn = 4), ##' list(x = c(), y = c(), depth = 1, n = 0, expn = 1)) ##' binChi(binList1) ##' binChi(binList2) ##' binMI(binList1) ##' binMI(binList2) ##' binAbsDif(binList2) ##' @author Chris Salahub ##' @describeIn binstatistics Chi-squared statistic binChi <- function(bins, agg = sum) { obs <- sapply(bins, function(bn) bn$n) ex <- sapply(bins, function(bn) bn$expn) resids <- (obs - ex)^2/ex signs <- sign(obs - ex) # signs of residuals list(residuals = signs*sqrt(resids), stat = agg(resids)) } ##' @describeIn binstatistics Mutual information binMI <- function(bins, agg = sum) { obs <- sapply(bins, function(bin) bin$n) ex <- sapply(bins, function(bin) bin$expn) n <- sum(obs) resids <- log(obs/ex) resids[obs == 0] <- 0 probs <- obs/n list(residuals = resids, stat = agg(resids*probs)) } ##' @describeIn binstatistics Absolute difference between observed ##' and expected binAbsDif <- function(bins, agg = sum) { obs <- sapply(bins, function(bin) bin$n) ex <- sapply(bins, function(bin) bin$expn) resids <- abs(obs - ex) signs <- sign(obs - ex) list(residuals = signs*resids, stat = agg(resids)) }
/scratch/gouwar.j/cran-all/cranData/AssocBin/R/stats.R
##' @title Make stop crteria ##' @description Capture a sequence of logical statements and append ##' them into a single expression. ##' @details This function, along with `stopper` dictates the stop ##' behaviour of recursive binning. It accepts an arbitrary number ##' of arguments, each a logical statement, and appends them all into ##' a string separated by the pipe character. ##' @param ... an arbitrary number of expressions which evaluate to ##' logicals ##' @return A string which appends all expressions together. ##' @examples ##' makeCriteria(depth >= 5, n < 1) ##' @author Chris Salahub makeCriteria <- function(...) { cl <- match.call() # capturing inputs crits <- as.list(cl) # change to a list ## remove self reference, collapse into single OR paste(sapply(crits[-1], deparse), collapse = " | ") } ##' @title Check bins against stop criteria ##' @description Evaluate the stop `criteria` for each bin in ##' `binList` ##' @details This function makes use of R's lexical scoping to ##' evaluate `criteria` (a string), within each bin of `binList`. ##' @param binList a list of bins, each a list which can be cast as an ##' environment for evaluation ##' @param criteria string of logical expressions separated by pipes ##' to be evaluated within each bin of `binList` ##' @return A logical vector of the same length as `binList`. ##' @examples ##' crits <- makeCriteria(depth >= 5, n < 1) ##' binList1 <- list(list(x = c(1,2), y = c(3,1), depth = 1, n = 2), ##' list(x = c(3,4), y = c(2,4), depth = 1, n = 2)) ##' binList2 <- list(list(x = c(1,2), y = c(3,1), depth = 6, n = 2), ##' list(x = c(), y = c(), depth = 1, n = 0)) ##' stopper(binList1, crits) ##' stopper(binList2, crits) ##' @author Chris Salahub stopper <- function(binList, criteria) { sapply(binList, function(b) eval(parse(text = criteria), envir = b)) }
/scratch/gouwar.j/cran-all/cranData/AssocBin/R/stopper.R
## PACKAGES ########################################################## ## recursive binning package library(AssocBin) ## FUNCTIONS ######################################################### ## custom plotting function with narrow margins narrowPlot <- function(xgrid, ygrid, main = "", xlab = "", ylab = "", xticks = xgrid, yticks = ygrid, xlim = range(xgrid), ylim = range(ygrid), addGrid = TRUE, ...) { plot(NA, ylim = ylim, xlim = xlim, xaxt = 'n', xlab = "", yaxt = 'n', ylab = "", main = "", ...) ## add labels mtext(main, side = 3, line = 0, cex = 0.8) # main mtext(ylab, side = 2, line = 1, cex = 0.8) # ylab mtext(xlab, side = 1, line = 1, padj = 0, cex = 0.8) # xlab ## add grid lines if (addGrid) { abline(h = ygrid, v = xgrid, lty = 1, col = adjustcolor("gray", alpha.f = 0.4)) } ## and ticks mtext(side = 1, at = xgrid, text = "|", line = 0, cex = 0.5, padj = -2) mtext(text = xticks, at = xgrid, side = 1, cex = 0.8) mtext(side = 2, at = ygrid, text = "|", line = 0, cex = 0.5, padj = 1) mtext(text = yticks, at = ygrid, side = 2, cex = 0.8) } ## wrapper to apply function to a nested list and return an array deNest <- function(nstdLst, fn) { lapply(nstdLst, function(olst) { sapply(olst, function(lst) { sapply(lst, fn) }) }) } ## the internal functions to work with deNest getStat <- function(x) x$stat getnBin <- function(x) length(x$residuals) getMaxRes <- function(x) max(abs(x$residuals)) ## SIMULATED DATA PATTERNS ########################################### ## patterns from Newton (2009) provided in a list of functions patFns <- list( wave = function(n) { x <- seq(-1, 1, length=n) u <- x + runif(n)/3; v <- 4*((x^2 - 1/2)^2 + runif(n)/500) cbind(x = u, y = v) }, rotatedSquare = function(n) { x <- runif(n, min = -1, max = 1) y <- runif(n, min = -1, max = 1) theta <--pi/8 rr <- rbind(c(cos(theta), -sin(theta)), c(sin(theta), cos(theta))) tmp <- cbind(x, y) %*% rr colnames(tmp) <- c("x", "y") tmp }, circle = function(n) { x <- runif(n, min = -1, max = 1) y <- runif(n, min = -1, max = 1) theta <- -pi/4 rr <- rbind(c(cos(theta), -sin(theta)), c(sin(theta), cos(theta))) tmp <- cbind(x, y) %*% rr colnames(tmp) <- c("x", "y") tmp }, valley = function(n) { x <- seq(-1,1, length=n ) y <- (x ^2 + runif(n))/2 cbind(x = x, y = y) }, cross = function(n) { x <- seq(-1, 1, length = n) y <- (x^2 + runif(n)/2)*(sample(c(-1,1), size=n, replace = T)) cbind(x = x, y = y) }, ring = function(n) { x <- seq(-1, 1, length = n) u <- sin(x*pi) + rnorm(n)/8 v <- cos(x*pi) + rnorm(n)/8 cbind(x = u, y = v) }, noise = function(n) { dx <- rnorm(n)/3 dy <- rnorm(n)/3 cx <- sample(c(-1, 1), size=n, replace = T) cy <- sample(c(-1, 1), size=n, replace = T) u <- cx + dx v <- cy + dy cbind(x = u, y = v) }) ## write a wrapper for these patterns to generate an array of all generatePatterns <- function(n) { simplify2array(lapply(patFns, function(fn) fn(n))) } ## generate many repetitions of each to bin set.seed(70111238) n <- 1000 nsim <- 100 simData <- replicate(nsim, generatePatterns(n)) ## plot the first data realization m <- 1 pal <- c(hcl.colors(6, "Set2"), "black") oldPar <- par(mfrow=c(1,7), mar=c(1,1,1,1)/2) for(i in 1:7) { plot(simData[, "x", i, 1], simData[, "y", i, 1], xlab = "", ylab = "", axes = F, pch = 19, cex = 0.2, col = pal[i]) } ## convert this data into pairwise ranks and plot it simXr <- apply(simData[, "x", , ], c(2, 3), rank) simYr <- apply(simData[, "y", , ], c(2, 3), rank) for(i in 1:7) { plot(simXr[, i, 1], simYr[, i, 1], xlab = "", ylab = "", axes= F, pch = 19, cex = 0.2, col = pal[i]) } ## try the binning algorithm on these data ## define a range of depths depths <- 1:10 ## define the criteria dynamically, works due to R's lexical scoping crits <- makeCriteria(depth >= ii, expn <= 10, n == 0) ## define the stop function stopFn <- function(bns) stopper(bns, crits) ## and splitting functions chiSplit <- function(bn) maxScoreSplit(bn, chiScores, minExp = 5) rndSplit <- function(bn) maxScoreSplit(bn, randScores, minExp = 5) ## allocate storage for every split method testChiBins <- vector("list", nsim) testRndBins <- vector("list", nsim) ## bin each realization (takes a few minutes) for (jj in 1:nsim) { ## each list element is also a list for each testChiBins[[jj]] <- vector("list", length(depths)) testRndBins[[jj]] <- vector("list", length(depths)) for (ii in seq_along(depths)) { # iterate through depths ## chi bins for each pattern testChiBins[[jj]][[ii]] <- lapply(1:7, function(kk) { binner(simXr[, kk, jj], simYr[, kk, jj], stopper = stopFn, splitter = chiSplit) }) ## random bins for each pattern testRndBins[[jj]][[ii]] <- lapply(1:7, function(kk) { binner(simXr[, kk, jj], simYr[, kk, jj], stopper = stopFn, splitter = rndSplit) }) } } ## compute the chi square statistics for each split method testChiChi <- lapply(testChiBins, # nested list makes it ugly function(lst) { lapply(lst, function(el) lapply(el, binChi)) }) testRndChi <- lapply(testRndBins, ## ... and random splitting function(lst) { lapply(lst, function(el) lapply(el, binChi)) }) ## deNest these to get the relevant statistics chiPaths <- deNest(testChiChi, getStat) chiNbin <- deNest(testChiChi, getnBin) rndPaths <- deNest(testRndChi, getStat) rndNbin <- deNest(testRndChi, getnBin) ## plot the paths of every pattern under different splitting regimes ## compared to the null nNull <- 500 nullChiBins <- vector(mode = "list", nsim) nullRndBins <- vector(mode = "list", nsim) for (jj in 1:nNull) { ## each list element is also a list for each nullChiBins[[jj]] <- vector("list", length(depths)) nullRndBins[[jj]] <- vector("list", length(depths)) for (ii in seq_along(depths)) { # iterate through depths randx <- sample(1:n) randy <- sample(1:n) ## chi bins on random noise nullChiBins[[jj]][[ii]] <- binner(randx, randy, stopper = stopFn, splitter = chiSplit) ## random bins for each pattern nullRndBins[[jj]][[ii]] <- binner(randx, randy, stopper = stopFn, splitter = rndSplit) } } ## compute the chi square statistics for each split method nullChiChi <- lapply(nullChiBins, function(lst) { lapply(lst, binChi) }) nullRndChi <- lapply(nullRndBins, function(lst) { lapply(lst, binChi) }) ## deNest these to get the relevant statistics nullChiPaths <- sapply(nullChiChi, function(lst) sapply(lst, getStat)) nullChiNbin <- sapply(nullChiChi, function(lst) sapply(lst, getnBin)) nullRndPaths <- sapply(nullRndChi, function(lst) sapply(lst, getStat)) nullRndNbin <- sapply(nullRndChi, function(lst) sapply(lst, getnBin)) ## plot paths for an individual random split par(mfrow = c(1,1), mar = c(2.1, 2.1, 1.1, 1.1)) narrowPlot(xgrid = seq(0, 160, by = 40), xlab = "Number of bins", ygrid = seq(0, 1600, by = 400), ylab = expression(chi^2~statistic)) for (ii in 1:nNull) { # add the null lines lines(nullRndNbin[ ,ii], nullRndPaths[ ,ii], col = adjustcolor("gray", 0.1)) } ## add these as lines to the plot of null lines for (jj in 1:7) { lines(rndNbin[[1]][jj,], rndPaths[[1]][jj,], col = pal[jj]) points(rndNbin[[1]][jj,], rndPaths[[1]][jj,], col = pal[jj], pch = 19, cex = 0.5) } ## add the 95% chi quantile lines(1:160, qchisq(0.95, 1:160), lty = 2) ## make the same plot for paths from chi splitting narrowPlot(xgrid = seq(0, 160, by = 40), xlab = "Number of bins", ygrid = seq(0, 1600, by = 400), ylab = expression(chi^2~statistic)) for (ii in 1:nNull) { lines(nullChiNbin[ ,ii], nullChiPaths[ ,ii], col = adjustcolor("gray", 0.1)) } for (jj in 1:7) { lines(chiNbin[[1]][jj,], chiPaths[[1]][jj,], col = pal[jj]) points(chiNbin[[1]][jj,], chiPaths[[1]][jj,], col = pal[jj], pch = 19, cex = 0.5) } ## for the random split repetitions, plot every one narrowPlot(xgrid = seq(0, 160, by = 40), xlab = "Number of bins", ygrid = seq(0, 1200, by = 300), ylim = c(0, 1300), ylab = expression(chi^2~statistic)) for (ii in 1:nNull) { # add the null lines lines(nullRndNbin[ ,ii], nullRndPaths[ ,ii], col = adjustcolor("gray", 0.1)) } for (jj in 1:7) { for (ii in 1:100) { lines(rndNbin[[ii]][jj,], rndPaths[[ii]][jj,], col = adjustcolor(pal[jj], 0.2)) } } lines(1:160, qchisq(0.95, 1:160), lty = 2) ## do the same for the chi splits narrowPlot(xgrid = seq(0, 160, by = 40), xlab = "Number of bins", ygrid = seq(0, 1200, by = 300), ylim = c(0, 1300), ylab = expression(chi^2~statistic)) for (ii in 1:nNull) { lines(nullChiNbin[ ,ii], nullChiPaths[ ,ii], col = adjustcolor("gray", 0.1)) } for (jj in 1:7) { for (ii in 1:100) { lines(chiNbin[[ii]][jj,], chiPaths[[ii]][jj,], col = adjustcolor(pal[jj], 0.2)) } } lines(1:160, qchisq(0.95, 1:160), lty = 2) ## next, check the bins for every depth ## start by getting the maximum residual to make the shading constant maxRes <- max(sapply(unlist(testChiChi[[1]], recursive = FALSE), getMaxRes)) ## for every depth, display the binning for each pattern par(mfrow=c(7,7), mar=c(1,1,1,1)/2) for (depth in 4:10) { for(i in 1:7) { plot(NA, ylim = c(1, n), xlim = c(1, n), # remove axes axes = F, xlab = "", ylab = "", main = "") plotBinning(testChiBins[[1]][[depth]][[i]], pch = 19, cex = 0.1, add = TRUE, col = adjustcolor("grey", 0.8), fill = residualFill(testChiBins[[1]][[depth]][[i]], maxRes = maxRes)) } } ## repeat this for the random splitting maxRes <- max(sapply(unlist(testRndChi[[10]], recursive = FALSE), getMaxRes)) par(mfrow=c(7,7), mar=c(1,1,1,1)/2) for (depth in 4:10) { for(i in 1:7) { plot(NA, ylim = c(1, n), xlim = c(1, n), axes = F, xlab = "", ylab = "", main = "") plotBinning(testRndBins[[10]][[depth]][[i]], pch = 19, cex = 0.1, add = TRUE, col = adjustcolor("grey", 0.8), fill = residualFill(testRndBins[[10]][[depth]][[i]], maxRes = maxRes)) } } par(oldPar)
/scratch/gouwar.j/cran-all/cranData/AssocBin/demo/simulatedPatterns.R
## PACKAGES ########################################################## ## recursive binning package library(AssocBin) ## FUNCTIONS ######################################################### ## add marginal histograms to a scatterplot addMarHists <- function(x, y, xcuts, ycuts) { bds <- par()$usr rowDist <- table(cut(x, xcuts)) colDist <- table(cut(y, ycuts)) # marginal distributions vboxBds <- c(bds[2], bds[2] + 0.1*(bds[2] - bds[1]), bds[3:4]) hboxBds <- c(bds[1:2], bds[4], bds[4] + 0.1*(bds[4] - bds[3])) ## density boxes rect(vboxBds[1], vboxBds[3], vboxBds[2], vboxBds[4], xpd = NA) rect(hboxBds[1], hboxBds[3], hboxBds[2], hboxBds[4], xpd = NA) ## add marginal histograms vseq <- ycuts rect(vboxBds[1], vseq[1:length(colDist)], vboxBds[1] + 0.9*diff(vboxBds[1:2])*(colDist/max(colDist)), vseq[2:(length(colDist) + 1)], xpd = NA, col = adjustcolor("firebrick", 0.5)) hseq <- xcuts rect(hseq[1:length(rowDist)], hboxBds[3], hseq[2:(length(rowDist) + 1)], xpd = NA, hboxBds[3] + 0.9*diff(hboxBds[3:4])*(rowDist/max(rowDist)), col = adjustcolor("firebrick", 0.5)) } ## a custom plot with thinner margins narrowPlot <- function(xgrid, ygrid, main = "", xlab = "", ylab = "", xticks = xgrid, yticks = ygrid, xlim = range(xgrid), ylim = range(ygrid), addGrid = TRUE, ...) { plot(NA, ylim = ylim, xlim = xlim, xaxt = 'n', xlab = "", yaxt = 'n', ylab = "", main = "", ...) ## add labels mtext(main, side = 3, line = 0, cex = 0.8) # main mtext(ylab, side = 2, line = 1, cex = 0.8) # ylab mtext(xlab, side = 1, line = 1, padj = 0, cex = 0.8) # xlab ## add grid lines if (addGrid) { abline(h = ygrid, v = xgrid, lty = 1, col = adjustcolor("gray", alpha.f = 0.4)) } ## and ticks mtext(side = 1, at = xgrid, text = "|", line = 0, cex = 0.5, padj = -2) mtext(text = xticks, at = xgrid, side = 1, cex = 0.8) mtext(side = 2, at = ygrid, text = "|", line = 0, cex = 0.5, padj = 1) mtext(text = yticks, at = ygrid, side = 2, cex = 0.8) } ## S&P DATA EXAMPLE ################################################## ## S&P500 data: "SP500" demo in "zenplots" package, code from Marius ## Hofert, produces a set of pseudo-observations that are uniform ## these are loaded here and converted to ranks data(sp500pseudo) spRanks <- apply(sp500pseudo, 2, rank, ties.method = "random") rownames(spRanks) <- NULL ## take a subset for inspection pairwise set.seed(63212023) spRanks <- spRanks[, sample(1:ncol(spRanks), 50)] spPairs <- combn(ncol(spRanks), 2) # all possible pairs ## next, we iterate through all pairs and bin to a maximum depth of 6 ## define the criteria to used crits <- makeCriteria(depth >= 6, expn <= 10, n == 0) stopFn <- function(bns) stopper(bns, crits) ## and potential splitting functions chiSplit <- function(bn) maxScoreSplit(bn, chiScores, minExp = 5) miSplit <- function(bn) maxScoreSplit(bn, miScores, minExp = 5) rndSplit <- function(bn) maxScoreSplit(bn, randScores, minExp = 5) ## allocate storage spBins <- vector("list", ncol(spPairs)) ## iterate through all pairs ## ~ 20s (grows roughly linearly) system.time({for (ii in seq_len(ncol(spPairs))) { ## ~20s pair <- spPairs[, ii] # indices of pairs spBins[[ii]] <- binner(spRanks[, pair[1]], spRanks[, pair[2]], stopper = stopFn, splitter = chiSplit) } }) ## get chi statistics across the bins spChis <- lapply(spBins, function(bns) binChi(bns)) spChiStats <- sapply(spChis, function(x) x$stat) spChiResid <- sapply(spChis, function(x) x$residuals) spChiNbin <- sapply(spChiResid, length) ## order by most interesting spOrd <- order(spChiStats, decreasing = TRUE) spMaxRes <- max(abs(unlist(spChiResid))) ## compute some null examples nVar <- ncol(spRanks) null <- sapply(1:nVar, function(ii) sample(1:nrow(spRanks))) ## the same number of columns are present in the null data here, so ## use the same pair indices nullBins <- vector("list", ncol(spPairs)) for (ii in seq_len(ncol(spPairs))) { pair <- spPairs[, ii] # indices of pairs nullBins[[ii]] <- binner(null[, pair[1]], null[, pair[2]], stopper = stopFn, splitter = chiSplit) } ## get chi statistics for the null examples nullChis <- lapply(nullBins, function(bns) binChi(bns)) nullChiStats <- sapply(nullChis, function(x) x$stat) nullChiResid <- sapply(nullChis, function(x) x$residuals) nullChiNbin <- sapply(nullChiResid, length) nullMaxRes <- max(abs(unlist(nullChiResid))) ## plot the sp500 point cloud alongside the null oldPar <- par(mar = c(2.1, 2.1, 3.1, 3.1)) narrowPlot(xgrid = seq(1, 2, by = 0.25), ygrid = seq(1, 3, by = 0.5), ylim = c(1, 3.2), xlab = expression(log[10]~"(Number of bins)"), ylab = expression(log[10]~{"("~chi^2~statistic~")"})) points(log(nullChiNbin, 10), log(nullChiStats, 10), cex = 1, pch = 20, col = adjustcolor("steelblue", 0.2)) points(log(spChiNbin, 10), log(spChiStats, 10), col = adjustcolor("firebrick", 0.2), pch = 20) legend(x = "bottomright", cex = 0.8, legend = c("Null", "S&P500"), pch = 20, col = c("steelblue", "firebrick")) addMarHists(log(spChiNbin, 10), log(spChiStats, 10), xcuts = seq(1, 2, by = 0.03125), ycuts = seq(1.5, 3, by = 0.0625)) ## use this to plot the top pairs and their binnings par(mfrow = c(6, 6), mar = c(0.1, 0.55, 1.1, 0.55)) for (prInd in spOrd[1:36]) { pr <- spPairs[, prInd] # pair indices plot(NA, xlim = c(1, (nrow(spRanks))), ylim = c(1, (nrow(spRanks))), axes = "F", xlab = colnames(spRanks)[pr[1]], ylab = colnames(spRanks)[pr[2]], main = "") mtext(paste(colnames(spRanks)[pr], collapse = ":"), cex = 0.6) plotBinning(spBins[[prInd]], fill = residualFill(spBins[[prInd]], maxRes = spMaxRes), add = TRUE, pch = "") points(spRanks[, pr[1]], spRanks[, pr[2]], pch = ".", col = adjustcolor("gray50")) } ## fix parameters par(oldPar)
/scratch/gouwar.j/cran-all/cranData/AssocBin/demo/sp500.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) ## ----------------------------------------------------------------------------- library(AssocBin) ## ----------------------------------------------------------------------------- set.seed(9023831) n <- 100 randx <- rnorm(n) randy <- rnorm(n) plot(randx, randy) ## ----------------------------------------------------------------------------- rankx <- rank(randx, ties.method = "random") ranky <- rank(randy, ties.method = "random") plot(rankx, ranky) ## ----------------------------------------------------------------------------- criteria <- makeCriteria(expn <= 10, n == 0, depth >= d) str(criteria) ## ----------------------------------------------------------------------------- stopper stopFn <- function(bns) stopper(bns, criteria) ## ----------------------------------------------------------------------------- chiSplit <- function(bn) maxScoreSplit(bn, chiScores, minExp = 5) miSplit <- function(bn) maxScoreSplit(bn, miScores, minExp = 5) rndSplit <- function(bn) maxScoreSplit(bn, randScores, minExp = 5) ## ----------------------------------------------------------------------------- d <- 5 randBin <- binner(x = rankx, y = ranky, stopper = stopFn, splitter = chiSplit) ## ----------------------------------------------------------------------------- plotBinning(randBin, pch = 19, cex = 1, col = adjustcolor("gray50", 0.5)) ## ----------------------------------------------------------------------------- ## first fill by depth plotBinning(randBin, pch = 19, cex = 1, fill = depthFill(randBin), col = adjustcolor("gray50", 0.5)) ## ----------------------------------------------------------------------------- ## next fill by residual plotBinning(randBin, pch = 19, cex = 1, fill = residualFill(randBin, colrng = c("steelblue", "white", "firebrick")), col = adjustcolor("gray50", 0.5)) ## ----------------------------------------------------------------------------- depx <- rnorm(10*n) depy <- depx + rnorm(10*n, sd = 0.4) plot(depx, depy) ## ----------------------------------------------------------------------------- d <- 10 # change maximum depth due to larger sample size depx.rank <- rank(depx) depy.rank <- rank(depy) depBins <- binner(depx.rank, depy.rank, stopper = stopFn, splitter = chiSplit) plotBinning(depBins, pch = ".", cex = 1) ## ----------------------------------------------------------------------------- plotBinning(depBins, pch = ".", cex = 1, fill = residualFill(depBins)) ## ----------------------------------------------------------------------------- set.seed(591241) depBins.rand <- binner(depx.rank, depy.rank, stopper = stopFn, splitter = rndSplit) plotBinning(depBins.rand, pch = ".", cex = 1, fill = residualFill(depBins.rand)) ## ----------------------------------------------------------------------------- binChi(randBin)$stat binChi(depBins)$stat binChi(depBins.rand)$stat
/scratch/gouwar.j/cran-all/cranData/AssocBin/inst/doc/AssocBin.R
--- title: "An Introduction to `AssocBin`" author: "Chris Salahub" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{An Introduction to AssocBin} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` The `AssocBin` package implements the core algorithm and several helpers to measure the association between two variables using a recursive binary partitioning (or binning) of the data. At each step, the algorithm is provided with a list of bins that have edges, contained points, and other features and then splits those bins which fail some stopping checks. Though it is not forced by any function contained, it is assumed the user will provide the algorithm with *ranked* variables along each margin, as every helper function assumes this is the case. The core logic of the test rests on the observation that variable ranks will have a joint uniform density only under independence. This vignette will demonstrate how to set up, run, and process the results from this core algorithm using some of the defined helpers. A more complete description can be found in the associated pre-print, https://arxiv.org/abs/2311.08561. We begin by loading the package. ```{r} library(AssocBin) ``` Now we can move on to some simple examples to establish how the algorithm works. We start by generating some independent data and taking the ranks. ```{r} set.seed(9023831) n <- 100 randx <- rnorm(n) randy <- rnorm(n) plot(randx, randy) ``` ```{r} rankx <- rank(randx, ties.method = "random") ranky <- rank(randy, ties.method = "random") plot(rankx, ranky) ``` With rank data in hand, we can work on setting up the algorithm. This requires (minimally) three steps: 1. Defining the stop criteria 2. Defining a stopping function 3. Defining a splitting function `AssocBin` has been built in a modular way so any of these can be custom-defined and swapped into the algorithm by a user, but helpers are provided for the suggested use case. Let's look at how the stop criteria are defined: ```{r} criteria <- makeCriteria(expn <= 10, n == 0, depth >= d) str(criteria) ``` The `makeCriteria` function captures its arguments and appends them into a character separated by `|`. This creates a single logical statement which is evaluated within each bin (treated as an environment) to determine whether than bin's named elements satisfy the stop criteria. The wrapper that performs this evaluation is `stopper`, which is set up by defining a closure using the criteria: ```{r} stopper stopFn <- function(bns) stopper(bns, criteria) ``` In this way, many criteria can be quickly checked and the `makeCriteria` function can be adapted to changes in the named elements of a bin. Note that `d` is not defined yet. This is deliberate choice that allows us to use R's lexical scoping to set it dynamically and modify the depth criteria on the fly. Next, we set up the splitting logic. The helper provided to support different splitting logic is the `maxScoreSplit` function, which accepts a bin, score function, and numeric value specifying the minimum expected number of observations (proportional to the area) allowed for bins to put a floor on the minimum bin size. It uses the score function to evaluate splits at every observation in the bin and chooses the one which maximizes the score subject to the minimum size constraint. `chiScores` computes $\chi^2$ statistic-like scores based on $$\frac{(o - e)^2}{e}$$ where $e$ is the expected number of points in a bin and $o$ is the observed number. `miScores` instead takes inspiration from the mutual information in $$\frac{o}{n} \log \frac{o}{e}.$$ Finally, `randScores` supports random splitting by sampling random uniform values in place of computing a score for each potential split. To set up their use, all can be placed in appropriate closures. ```{r} chiSplit <- function(bn) maxScoreSplit(bn, chiScores, minExp = 5) miSplit <- function(bn) maxScoreSplit(bn, miScores, minExp = 5) rndSplit <- function(bn) maxScoreSplit(bn, randScores, minExp = 5) ``` With all the necessary preliminaries set up, we can apply the algorithm to our data. This is done by using the `binner` function with the appropriate arguments. ```{r} d <- 5 randBin <- binner(x = rankx, y = ranky, stopper = stopFn, splitter = chiSplit) ``` We can visualize the result using the helper `plotBinning`: ```{r} plotBinning(randBin, pch = 19, cex = 1, col = adjustcolor("gray50", 0.5)) ``` This plot is augmented by filling the displayed bins. The `fill` argument to `plotBinning` accepts a vector of colour values, but two useful cases are shading by residual and shading by depth. These cases are completed by `residualFill`, which computes the Pearson residual $$\text{sign}(o - e)\sqrt{\frac{(o - e)^2}{e}}$$ and uses a divergent palette to shade the bins. Trying both for our simple data: ```{r} ## first fill by depth plotBinning(randBin, pch = 19, cex = 1, fill = depthFill(randBin), col = adjustcolor("gray50", 0.5)) ``` ```{r} ## next fill by residual plotBinning(randBin, pch = 19, cex = 1, fill = residualFill(randBin, colrng = c("steelblue", "white", "firebrick")), col = adjustcolor("gray50", 0.5)) ``` Both of these can be modified to change the palette used and the residual fill can be modified based on a value for maximum saturation. The residual fill is much more interesting when applied to a larger sample of data which is not random and uniform. ```{r} depx <- rnorm(10*n) depy <- depx + rnorm(10*n, sd = 0.4) plot(depx, depy) ``` ```{r} d <- 10 # change maximum depth due to larger sample size depx.rank <- rank(depx) depy.rank <- rank(depy) depBins <- binner(depx.rank, depy.rank, stopper = stopFn, splitter = chiSplit) plotBinning(depBins, pch = ".", cex = 1) ``` ```{r} plotBinning(depBins, pch = ".", cex = 1, fill = residualFill(depBins)) ``` The Pearson residuals, which use red to display areas of unusually high density and blue for areas of unusually low density, effectively highlight the pattern of the data and the regions which depart the most from an assumption of uniformity (and therefore independence). Even under random splitting they provide some idea of the structure in the data (though the bins aren't as regular). ```{r} set.seed(591241) depBins.rand <- binner(depx.rank, depy.rank, stopper = stopFn, splitter = rndSplit) plotBinning(depBins.rand, pch = ".", cex = 1, fill = residualFill(depBins.rand)) ``` Once a binning has been completed, we can compute the $\chi^2$ statistic on the bins using `binChi`. ```{r} binChi(randBin)$stat binChi(depBins)$stat binChi(depBins.rand)$stat ``` As might be expected, the statistic for the strongly dependent data is much larger than for the uniform data and the maximizing splits result in a larger statistic value than the random splits. These statistics can then be used to rank different associations or to generate a p-value. For more examples of the method in use, try `demo(simulatedPatterns)` (which is fast) or `demo(sp500)` (which takes a very long time to run due to the size of the dataset).
/scratch/gouwar.j/cran-all/cranData/AssocBin/inst/doc/AssocBin.Rmd
--- title: "An Introduction to `AssocBin`" author: "Chris Salahub" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{An Introduction to AssocBin} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) ``` The `AssocBin` package implements the core algorithm and several helpers to measure the association between two variables using a recursive binary partitioning (or binning) of the data. At each step, the algorithm is provided with a list of bins that have edges, contained points, and other features and then splits those bins which fail some stopping checks. Though it is not forced by any function contained, it is assumed the user will provide the algorithm with *ranked* variables along each margin, as every helper function assumes this is the case. The core logic of the test rests on the observation that variable ranks will have a joint uniform density only under independence. This vignette will demonstrate how to set up, run, and process the results from this core algorithm using some of the defined helpers. A more complete description can be found in the associated pre-print, https://arxiv.org/abs/2311.08561. We begin by loading the package. ```{r} library(AssocBin) ``` Now we can move on to some simple examples to establish how the algorithm works. We start by generating some independent data and taking the ranks. ```{r} set.seed(9023831) n <- 100 randx <- rnorm(n) randy <- rnorm(n) plot(randx, randy) ``` ```{r} rankx <- rank(randx, ties.method = "random") ranky <- rank(randy, ties.method = "random") plot(rankx, ranky) ``` With rank data in hand, we can work on setting up the algorithm. This requires (minimally) three steps: 1. Defining the stop criteria 2. Defining a stopping function 3. Defining a splitting function `AssocBin` has been built in a modular way so any of these can be custom-defined and swapped into the algorithm by a user, but helpers are provided for the suggested use case. Let's look at how the stop criteria are defined: ```{r} criteria <- makeCriteria(expn <= 10, n == 0, depth >= d) str(criteria) ``` The `makeCriteria` function captures its arguments and appends them into a character separated by `|`. This creates a single logical statement which is evaluated within each bin (treated as an environment) to determine whether than bin's named elements satisfy the stop criteria. The wrapper that performs this evaluation is `stopper`, which is set up by defining a closure using the criteria: ```{r} stopper stopFn <- function(bns) stopper(bns, criteria) ``` In this way, many criteria can be quickly checked and the `makeCriteria` function can be adapted to changes in the named elements of a bin. Note that `d` is not defined yet. This is deliberate choice that allows us to use R's lexical scoping to set it dynamically and modify the depth criteria on the fly. Next, we set up the splitting logic. The helper provided to support different splitting logic is the `maxScoreSplit` function, which accepts a bin, score function, and numeric value specifying the minimum expected number of observations (proportional to the area) allowed for bins to put a floor on the minimum bin size. It uses the score function to evaluate splits at every observation in the bin and chooses the one which maximizes the score subject to the minimum size constraint. `chiScores` computes $\chi^2$ statistic-like scores based on $$\frac{(o - e)^2}{e}$$ where $e$ is the expected number of points in a bin and $o$ is the observed number. `miScores` instead takes inspiration from the mutual information in $$\frac{o}{n} \log \frac{o}{e}.$$ Finally, `randScores` supports random splitting by sampling random uniform values in place of computing a score for each potential split. To set up their use, all can be placed in appropriate closures. ```{r} chiSplit <- function(bn) maxScoreSplit(bn, chiScores, minExp = 5) miSplit <- function(bn) maxScoreSplit(bn, miScores, minExp = 5) rndSplit <- function(bn) maxScoreSplit(bn, randScores, minExp = 5) ``` With all the necessary preliminaries set up, we can apply the algorithm to our data. This is done by using the `binner` function with the appropriate arguments. ```{r} d <- 5 randBin <- binner(x = rankx, y = ranky, stopper = stopFn, splitter = chiSplit) ``` We can visualize the result using the helper `plotBinning`: ```{r} plotBinning(randBin, pch = 19, cex = 1, col = adjustcolor("gray50", 0.5)) ``` This plot is augmented by filling the displayed bins. The `fill` argument to `plotBinning` accepts a vector of colour values, but two useful cases are shading by residual and shading by depth. These cases are completed by `residualFill`, which computes the Pearson residual $$\text{sign}(o - e)\sqrt{\frac{(o - e)^2}{e}}$$ and uses a divergent palette to shade the bins. Trying both for our simple data: ```{r} ## first fill by depth plotBinning(randBin, pch = 19, cex = 1, fill = depthFill(randBin), col = adjustcolor("gray50", 0.5)) ``` ```{r} ## next fill by residual plotBinning(randBin, pch = 19, cex = 1, fill = residualFill(randBin, colrng = c("steelblue", "white", "firebrick")), col = adjustcolor("gray50", 0.5)) ``` Both of these can be modified to change the palette used and the residual fill can be modified based on a value for maximum saturation. The residual fill is much more interesting when applied to a larger sample of data which is not random and uniform. ```{r} depx <- rnorm(10*n) depy <- depx + rnorm(10*n, sd = 0.4) plot(depx, depy) ``` ```{r} d <- 10 # change maximum depth due to larger sample size depx.rank <- rank(depx) depy.rank <- rank(depy) depBins <- binner(depx.rank, depy.rank, stopper = stopFn, splitter = chiSplit) plotBinning(depBins, pch = ".", cex = 1) ``` ```{r} plotBinning(depBins, pch = ".", cex = 1, fill = residualFill(depBins)) ``` The Pearson residuals, which use red to display areas of unusually high density and blue for areas of unusually low density, effectively highlight the pattern of the data and the regions which depart the most from an assumption of uniformity (and therefore independence). Even under random splitting they provide some idea of the structure in the data (though the bins aren't as regular). ```{r} set.seed(591241) depBins.rand <- binner(depx.rank, depy.rank, stopper = stopFn, splitter = rndSplit) plotBinning(depBins.rand, pch = ".", cex = 1, fill = residualFill(depBins.rand)) ``` Once a binning has been completed, we can compute the $\chi^2$ statistic on the bins using `binChi`. ```{r} binChi(randBin)$stat binChi(depBins)$stat binChi(depBins.rand)$stat ``` As might be expected, the statistic for the strongly dependent data is much larger than for the uniform data and the maximizing splits result in a larger statistic value than the random splits. These statistics can then be used to rank different associations or to generate a p-value. For more examples of the method in use, try `demo(simulatedPatterns)` (which is fast) or `demo(sp500)` (which takes a very long time to run due to the size of the dataset).
/scratch/gouwar.j/cran-all/cranData/AssocBin/vignettes/AssocBin.Rmd
AssociationTestWithCorrectPS <- function(PSfile="file1.geno", ASSfile="file2.geno", PHEfile="file3.pheno", m.splits=20, miss.val=9, outfile="ass_test_result.txt") { V <- pcoc(genoFile=PSfile, num.splits=m.splits, miss.val=miss.val) xStr <- readLines(con=ASSfile) n <- nchar(xStr[1]) m <- length(xStr) y <- read.table(file=PHEfile)[,1] res <- rep(0,m) for (i in 1:m) { g <- Str2Num(xStr[i]) is.na(g[g==miss.val]) <- TRUE temp <- glm(y~.+g, family=binomial(link="logit"),data=V) a.1 <- summary(temp)$coefficients t.1 <- dim(a.1) res[i] <- a.1[t.1[1], t.1[2]] } write(res, file=outfile, ncolumns=1) }
/scratch/gouwar.j/cran-all/cranData/AssocTests/R/AssociationTestWithCorrectPS.R
CalExpect <- function(H) { a <- apply(H, 1, sum) b <- apply(H, 2, sum) n <- sum(H) kronecker(a,b)/n }
/scratch/gouwar.j/cran-all/cranData/AssocTests/R/CalExpect.R
## Gap Statistics CalculateGapK <- function(W, WStar, kG, n.monteCarlo) { WLog <- log(W) WStarLog <- log(WStar) Gap <- apply(WStarLog, 2, mean) - WLog s <- apply( WStarLog,2,sd) * sqrt( (n.monteCarlo-1)/n.monteCarlo ) * sqrt(1+1/n.monteCarlo) I <- 2; gap_kHat <- 1 for (k in I:(kG-1)) { if ( Gap[k] >= Gap[k+1]-s[k+1] ) { gap_kHat <- k break } } gap_kHat }
/scratch/gouwar.j/cran-all/cranData/AssocTests/R/CalculateGapK.R
## calculate the distance between inner clusters, W CalculateWAll <- function(x, n, kG) { W <- rep(1, kG) for (k in 2:kG) { W[k] <- cluster::clara(x, k, samples=20, medoids.x=FALSE)$objective } W }
/scratch/gouwar.j/cran-all/cranData/AssocTests/R/CalculateWAll.R
ChangeX <- function(N, geno, covariates, num.test) { # geno is matrix or data.frame # covariates is NULL or matrix if (is.null(covariates)) { Len <- (1 + length(covariates) + ncol(geno)) * num.test }else { Len <- (1 + ncol(covariates) + ncol(geno)) * num.test } x.mat <- matrix(data=0, nrow=num.test*N, ncol=Len) change.id <- matrix(data=1:(num.test*N), nrow=N, ncol=num.test) dex.rec <- change.id[,1] dex.add <- change.id[,2] dex.dom <- change.id[,3] change.id <- as.vector(t(change.id)) geno.rec <- geno geno.add <- geno geno.dom <- geno geno.rec[geno.rec==1] <- 0 geno.rec[geno.rec==2] <- 1 geno.dom[geno.dom==2] <- 1 mat.rec <- cbind(1, covariates, geno.rec) mat.add <- cbind(1, covariates, geno.add) mat.dom <- cbind(1, covariates, geno.dom) k <- ncol(mat.rec) list.1 <- 1:k x.mat[dex.rec, list.1] <- mat.rec x.mat[dex.add, list.1+k] <- mat.add x.mat[dex.dom, list.1+k*2] <- mat.dom x.mat[change.id,] }
/scratch/gouwar.j/cran-all/cranData/AssocTests/R/ChangeX.R
CorrMatNRTest <- function(y, g) { n0 <- sum(g==0) n1 <- sum(g==1) n2 <- sum(g==2) n <- n0 + n1 + n2 Y0 <- y[g==0] Y1 <- y[g==1] Y2 <- y[g==2] # genotype 1 relative to genotype 0 d0 <- 1:n0 d1 <- (n0+1):(n0+n1) RK1 <- rank(c(Y0,Y1)) f01 <- ( sum(RK1[d1]) - n1*(n1+1)/2 ) / (n0*n1) #genotype 2 relative to genotype 0 d2 <- (n0+1):(n0+n2) RK2 <- rank(c(Y0,Y2)) f02 <- ( sum(RK2[d2])- n2*(n2+1)/2 ) / (n0*n2) # genotype 2 relative to genotype 1 d3 <- (n1+1):(n1+n2) RK12 <- rank(c(Y1,Y2)) f12 <- (sum(RK12[d3])-n2*(n2+1)/2 ) / (n1*n2) # standard deviation temp1 <- rep(NA, n0) temp2 <- rep(NA, n0) for (i in 1:n0) { temp1[i] <- ( sum(Y0[i]<Y1)/n1 - 1/2 )^2 temp2[i] <- ( sum(Y0[i]<Y2)/n2 - 1/2 )^2 } temp3 <- rep(NA, n1) temp4 <- rep(NA, n2) for (j in 1:n1) { temp3[j] <- ( sum(Y0<Y1[j])/n0 - 1/2 )^2 } for (k in 1:n2) { temp4[k] <- ( sum(Y0<Y2[k])/n0 - 1/2 )^2 } temp5 <- rep(NA, n1) temp6 <- rep(NA, n1) for (j in 1:n1) { temp5[j] <- sum(Y0<Y1[j])/n0 - 1/2 temp6[j] <- sum(Y1[j]<Y2)/n2 - 1/2 } temp7 <- temp6^2 temp8 <- rep(NA, n2) for(k in 1:n2) { temp8[k] <- (sum(Y1<Y2[k])/n1 - 1/2)^2 } #### the necessary variance expressions f01.var <- 1/(n0*n1) * ( (n1-1)/n0 *sum(temp1) + (n0-1)/n1*sum(temp3) + 1/4 ) f02.var <- 1/(n0*n2) * ( (n2-1)/n0 *sum(temp2) + (n0-1)/n2*sum(temp4) + 1/4 ) f12.var <- 1/(n1*n2) * ( (n2-1)/n1 *sum(temp7) + (n1-1)/n2*sum(temp8) + 1/4 ) cov01.12 <- (1/n1)^2*sum(temp5*temp6) a1 <- sqrt((n0+n1)/f01.var) a2 <- sqrt((n1+n2)/f12.var) w1 <- a1/(a1+a2) w2 <- a2/(a1+a2) sigma.A <- w1^2*f01.var+ w2^2*f12.var+ 2*w1*w2*cov01.12 temp9 <- rep(NA, n2) for(k in 1:n2) { temp9[k] <- (sum(Y0<Y2[k])/n0 -1/2 )*(sum(Y1<Y2[k])/n1 - 1/2) } cov02.12 <- (1/n2)^2*sum(temp9) sigma.R <- (n0/(n0+n1))^2* f02.var + (n1/(n0+n1))^2* f12.var +2*n0*n1/((n0+n1)^2)*cov02.12 temp10 <- rep(NA, n0) for(i in 1:n0) { temp10[i] <- (sum(Y0[i]<Y1)/n1 -1/2 )*(sum(Y0[i]<Y2)/n2 -1/2) } cov01.02 <- 1/(n0^2)*sum(temp10) sigma.D <- (n1/(n1+n2))^2*f01.var + (n2/(n1+n2))^2*f02.var + 2*n1*n2/((n1+n2)^2)*cov01.02 ############## estimate of the variance between the NRT test statistics Z_R, Z_A, Z_D ### 1\ the covariance between Z_R, Z_A RA.term1 <- n0/(n0+n1)*w1*cov01.02 RA.term2 <- n0/(n0+n1)*w2*cov02.12 RA.term3 <- n1/(n0+n1)*w1*cov01.12 RA.term4 <- n1/(n0+n1)*w2*f12.var cov.RA <- RA.term1 + RA.term2 + RA.term3 + RA.term4 ### 2\ the covariance between Z_A, Z_D AD.term1 <- n1/(n1+n2)*w1*f01.var AD.term2 <- n2/(n1+n2)*w1*cov01.02 AD.term3 <- n1/(n1+n2)*w2*cov01.12 AD.term4 <- n2/(n1+n2)*w2*cov02.12 cov.AD <- AD.term1 + AD.term2 + AD.term3 + AD.term4 ### 3\ the covariance between Z_R, Z_D RD.term1 <- n0*n1/((n0+n1)*(n1+n2))*cov01.02 RD.term2 <- n0*n2/((n0+n1)*(n1+n2))*f02.var RD.term3 <- n1*n1/((n0+n1)*(n1+n2))*cov01.12 RD.term4 <- n1*n2/((n0+n1)*(n1+n2))*cov02.12 cov.RD <- RD.term1 + RD.term2 + RD.term3 + RD.term4 ### the covariance matrix cor.RA <- cov.RA/sqrt(sigma.R*sigma.A) cor.AD <- cov.AD/sqrt(sigma.A*sigma.D) cor.RD <- cov.RD/sqrt(sigma.R*sigma.D) rho <- matrix(0, nrow=3, ncol=3) rho[1,2:3] <- c(cor.RA, cor.RD) rho[2,3] <- cor.AD cov.mat <- rho + t(rho) cov.mat <- cov.mat + diag(3) cov.mat }
/scratch/gouwar.j/cran-all/cranData/AssocTests/R/CorrMatNRTest.R
##' Conduct the distance regression with or without the adjustment of ##' the covariates to detect the association between a distance matrix ##' and some independent variants of interest. ##' ##' The pseudo \emph{F} statistic based on the distance regression with ##' or without the adjustment of the covariates detects the ##' association between a distance matrix and some independent ##' variants of interest. A distance matrix can be transformed into a ##' similarity matrix easily. ##' @title Distance regression ##' @param simi.mat a similarity matrix among the subjects. ##' @param null.space a numeric vector to show the column numbers of ##' the null space in \code{x.mat}. ##' @param x.mat the covariate matrix which combines the null space ##' and the matrix of interest. ##' @param permute logical. If \code{TRUE}, the Monte Carlo sampling is used ##' without replacement; otherwise, with replacement. The default is ##' \code{TRUE}. ##' @param n.MonteCarlo the number of times for the Monte Carlo ##' procedure. The default is \code{1000}. ##' @param seed if it is not \code{NULL}, set the random number generator ##' state for random number generation. The default is \code{NULL}. ##' @return A list with class "\code{htest}" containing the following components: ##' \tabular{llll}{ ##' \code{statistic} \tab \tab \tab \cr ##' \tab \tab \tab the observed value of the test statistic.\cr ##' \code{p.value} \tab \tab \tab \cr ##' \tab \tab \tab the p-value for the test.\cr ##' \code{alternative} \tab \tab \tab \cr ##' \tab \tab \tab a character string describing the alternative hypothesis.\cr ##' \code{method} \tab \tab \tab \cr ##' \tab \tab \tab a character string indicating the type of test performed.\cr ##' \code{data.name} \tab \tab \tab \cr ##' \tab \tab \tab a character string giving the names of the data. ##' } ##' @author Lin Wang, Wei Zhang, and Qizhai Li. ##' @references Lin Wang, Wei Zhang, and Qizhai Li. AssocTests: An R Package ##' for Genetic Association Studies. \emph{Journal of Statistical Software}. ##' 2020; 94(5): 1-26. ##' @references Q Li, S Wacholder, DJ Hunter, RN Hoover, S Chanock, G ##' Thomas, and K Yu. Genetic Background Comparison Using ##' Distance-Based Regression, with Applications in Population ##' Stratification Evaluation and Adjustment. \emph{Genetic ##' Epidemiology}. 2009; 33(5): 432-441. ##' @references J Wessel and NJ Schork. Generalized Genomic ##' Distance-Based Regression Methodology for Multilocus Association ##' Analysis. \emph{American Journal of Human Genetics}. 2006; 79(5): ##' 792-806. ##' @references MA Zapala and NJ Schork. Multivariate Regression ##' Analysis of Distance Matrices for Testing Associations Between ##' Gene Expression Patterns and Related Variables. \emph{Proceedings ##' of the National Academy of Sciences of the United States of ##' America}. 2006; 103(51): 19430-19435. ##' @examples ##' data(drS.eg) ##' null.space <- 1 ##' x.mat <- matrix(c(rep(1, 600), rep(0, 200)), ncol=2) ##' dr(drS.eg, null.space, x.mat, permute = TRUE, n.MonteCarlo = 50, seed = NULL) ##' @export dr <- function(simi.mat, null.space, x.mat, permute=TRUE, n.MonteCarlo=1000, seed=NULL) { if (!is.null(seed)) { set.seed(seed) } x1 <- x.mat[,null.space] if (length(null.space)==1) { x1 <- matrix(x1, ncol=1) } x.hat <- x.mat %*% solve(t(x.mat)%*%x.mat) %*% t(x.mat) x1.hat <- x1 %*% solve(t(x1)%*%x1) %*% t(x1) n <- nrow(simi.mat) I.n <- diag(n) cent <- I.n - matrix(1,nrow=n,ncol=n)/n i.x <- I.n - x.hat i.x1 <- I.n - x1.hat Q <- i.x1 %*% cent %*% simi.mat %*% cent %*% i.x1 alter.hat <- x.hat - x1.hat F.obs <- sum(alter.hat*Q) / sum(i.x*Q) U <- 1:n F.star <- rep(NA, n.MonteCarlo) for (i in 1:n.MonteCarlo) { id.sam <- sample(U, replace=!permute) Q.star <- Q[id.sam, id.sam] F.star[i] <- sum(alter.hat*Q.star) / sum(i.x*Q.star) } pv <- sum(F.star >= F.obs)/n.MonteCarlo a <- deparse(substitute(simi.mat)) b <- deparse(substitute(x.mat)) structure( list(statistic=c(F = F.obs), p.value = pv, alternative = "the pair-wise similarity is influenced by the variants of interest", method = "Distance regression", data.name = paste(a, "and", b, sep=" ") ), .Names=c("statistic", "p.value", "alternative", "method", "data.name"), class="htest" ) }
/scratch/gouwar.j/cran-all/cranData/AssocTests/R/DR_main.R
##' Find the eigenvectors of the similarity matrix among the subjects ##' used for correcting for population stratification in the ##' population-based genetic association studies. ##' ##' Suppose that a total of \emph{n} cases and controls are randomly ##' enrolled in the source population and a panel of \emph{m} ##' single-nucleotide polymorphisms are genotyped. The genotype at a ##' marker locus is coded as 0, 1, or 2, with the value corresponding ##' to the copy number of risk alleles. All the genotypes are given in ##' the form of a \emph{m*n} matrix, in which the element in the ##' \emph{i}th row and the \emph{j}th column represents the genotype ##' of the \emph{j}th subject at the \emph{i}th marker. This function ##' calculates the top eigenvectors or the eigenvectors with ##' significant eigenvalues of the similarity matrix among the ##' subjects to infer the potential population structure. See also ##' \link{tw}. ##' @title EIGENSTRAT for correcting for population stratification ##' @param genoFile a txt file containing the genotypes (0, 1, 2, or ##' 9). The element of the file in Row \emph{i} and Column \emph{j} ##' represents the genotype at the \emph{i}th marker of the \emph{j}th ##' subject. 0, 1, and 2 denote the number of risk alleles, and 9 ##' (default) is for the missing genotype. ##' @param outFile.Robj the name of an R object for saving the list of ##' the results which is the same as the return value of this ##' function. The default is "\code{out.list}". ##' @param outFile.txt a txt file for saving the eigenvectors ##' corresponding to the top significant eigenvalues. ##' @param rm.marker.index a numeric vector for the indices of the ##' removed markers. The default is \code{NULL}. ##' @param rm.subject.index a numeric vector for the indices of the ##' removed subjects. The default is \code{NULL}. ##' @param miss.val the number representing the missing data in the ##' input data. The default is \code{9}. The element 9 for the missing data ##' in the \code{genoFile} should be changed according to the value of ##' \code{miss.val}. ##' @param num.splits the number of groups into which the markers are ##' split. The default is \code{10}. ##' @param topK the number of eigenvectors to return. If \code{NULL}, it is ##' calculated by the Tracy-Widom test. The default is \code{NULL}. ##' @param signt.eigen.level a numeric value which is the significance ##' level of the Tracy-Widom test. It should be \code{0.05}, \code{0.01}, \code{0.005}, or ##' \code{0.001}. The default is \code{0.01}. ##' @param signal.outlier logical. If \code{TRUE}, delete the outliers of the ##' subjects; otherwise, do not search for the outliers. The default ##' is \code{FALSE}. ##' @param iter.outlier a numeric value that is the iteration time for ##' finding the outliers of the subjects. The default is \code{5}. ##' @param sigma.thresh a numeric value that is the lower limit for ##' eliminating the outliers. The default is \code{6}. ##' @return \code{eigenstrat} returns a list, which contains the following components: ##' \tabular{llll}{ ##' \code{num.markers} \tab \tab \tab the number of markers excluding the removed markers.\cr ##' \code{num.subjects} \tab \tab \tab the number of subjects excluding the outliers.\cr ##' \code{rm.marker.index} \tab \tab \tab the indices of the removed markers.\cr ##' \code{rm.subject.index} \tab \tab \tab the indices of the removed subjects.\cr ##' \code{TW.level} \tab \tab \tab the significance level of the Tracy-Widom test.\cr ##' \code{signal.outlier} \tab \tab \tab dealing with the outliers in the subjects or not.\cr ##' \code{iter.outlier} \tab \tab \tab the iteration time for finding the outliers.\cr ##' \code{sigma.thresh} \tab \tab \tab the lower limit for eliminating the outliers.\cr ##' \code{num.outliers} \tab \tab \tab the number of outliers.\cr ##' \code{outliers.index} \tab \tab \tab the indices of the outliers.\cr ##' \code{num.used.subjects} \tab \tab \tab the number of the used subjects.\cr ##' \code{used.subjects.index} \tab \tab \tab the indices of the used subjects.\cr ##' \code{similarity.matrix} \tab \tab \tab the similarity matrix among the subjects.\cr ##' \code{eigenvalues} \tab \tab \tab the eigenvalues of the similarity matrix.\cr ##' \code{eigenvectors} \tab \tab \tab the eigenvectors corresponding to the eigenvalues.\cr ##' \code{topK} \tab \tab \tab the number of significant eigenvalues.\cr ##' \code{TW.stat} \tab \tab \tab the observed values of the Tracy-Widom statistics.\cr ##' \code{topK.eigenvalues} \tab \tab \tab the top eigenvalues.\cr ##' \code{topK.eigenvectors} \tab \tab \tab the eigenvectors corresponding to the top eigenvalues.\cr ##' \code{runtime} \tab \tab \tab the running time of this function. ##' } ##' @author Lin Wang, Wei Zhang, and Qizhai Li. ##' @references Lin Wang, Wei Zhang, and Qizhai Li. AssocTests: An R Package ##' for Genetic Association Studies. \emph{Journal of Statistical Software}. ##' 2020; 94(5): 1-26. ##' @references AL Price, NJ Patterson, RM Plenge, ME Weinblatt, NA ##' Shadick, and D Reich. Principal Components Analysis Corrects for ##' Stratification in Genome-Wide Association Studies. \emph{Nature ##' Genetics}. 2006; 38(8): 904-909. ##' @references N Patterson, AL Price, and D Reich. Population ##' Structure and Eigenanalysis. \emph{PloS Genetics}. 2006; 2(12): ##' 2074-2093. ##' @references CA Tracy and H Widom. Level-Spacing Distributions and ##' the Airy Kernel. \emph{Communications in Mathematical ##' Physics}. 1994; 159(1): 151-174. ##' @examples ##' eigenstratG.eg <- matrix(rbinom(3000, 2, 0.5), ncol = 30) ##' write.table(eigenstratG.eg, file = "eigenstratG.eg.txt", quote = FALSE, ##' sep = "", row.names = FALSE, col.names = FALSE) ##' eigenstrat(genoFile = "eigenstratG.eg.txt", outFile.Robj = "eigenstrat.result.list", ##' outFile.txt = "eigenstrat.result.txt", rm.marker.index = NULL, ##' rm.subject.index = NULL, miss.val = 9, num.splits = 10, ##' topK = NULL, signt.eigen.level = 0.01, signal.outlier = FALSE, ##' iter.outlier = 5, sigma.thresh = 6) ##' file.remove("eigenstratG.eg.txt", "eigenstrat.result.list", "eigenstrat.result.txt") ##' @export eigenstrat <- function(genoFile, outFile.Robj = "out.list", outFile.txt = "out.txt", rm.marker.index = NULL, rm.subject.index = NULL, miss.val = 9, num.splits = 10, topK = NULL, signt.eigen.level = 0.01, signal.outlier = FALSE, iter.outlier = 5, sigma.thresh = 6) { timeX <- proc.time() SignEigenPoint <- 0 if (signt.eigen.level == 0.05) { SignEigenPoint <- 0.9793 }else if (signt.eigen.level == 0.01) { SignEigenPoint <- 2.0234 }else if (signt.eigen.level == 0.005) { SignEigenPoint <- 2.4224 }else if (signt.eigen.level == 0.001) { SignEigenPoint <- 3.2724 }else { stop("signt.eigen.level must belong to the set { 0.05, 0.01, 0.005, 0.001 }\n") } # read file #xStr <- scan(file=genoFile, what='character') xStr <- readLines(con=genoFile) num.subjects <- nchar(xStr[1]) num.original <- num.subjects if (is.null(rm.marker.index)) { num.markers <- length(xStr) }else { xStr <- xStr[-rm.marker.index] num.markers <- length(xStr) } # calculate the lines of using scan each time if (num.splits==1) { each.lines <- num.markers used.lines <- c(0,num.markers) }else { each.lines <- rep(0, num.splits) y <- floor(num.markers/num.splits) each.lines[1:(num.splits-1)] <- y each.lines[num.splits] <- num.markers - y*(num.splits-1) used.lines <- cumsum(c(0, each.lines)) } # if need to eliminate subjects, if (!is.null(rm.subject.index)) { num.subjects <- num.subjects-length(rm.subject.index) } #print(paste("num.subjects=",num.subjects,sep="")) # if need to eliminate outliers, generate outlier vector if (signal.outlier) { outlier <- rep(0, num.subjects) } indicator <- TRUE iter <- 0 Q <- 1:num.subjects # deposit the index of individuals while (indicator) { if (signal.outlier) # eliminate outliers { addr <- which(outlier==1) num.outlier <- length(addr) n <- num.subjects-num.outlier }else { n <- num.subjects } xM <- matrix(data=0, nrow=n, ncol=n) for (i in 1:num.splits) { w <- (used.lines[i]+1):used.lines[i+1] x <- matrix(unlist(lapply(xStr[w], Str2Num)), nrow=each.lines[i], ncol=num.original, byrow=TRUE) is.na(x[x==miss.val]) <- TRUE # remove rm.subject.index if (!is.null(rm.subject.index)) { x <- x[,-rm.subject.index] } #print(x) # remove outlier if (signal.outlier) { if (num.outlier>0) { x <- x[,-addr] # eliminate outliers } } x <- x/2 z <- apply(x, 1, ModifyNormalization) # modify normalization by rows rm(w, x) xM <- xM + (z %*% t(z)) rm(z) } xM <- xM/num.markers #print(dim(xM)) # eigen analysis eig.list <- eigen(xM) eVal <- eig.list$values eVec <- eig.list$vectors # eigen test TW.stat <- NULL if (is.null(topK)) { a <- n - 1 wet <- tw(eVal[1:a], a, SignEigenPoint) topK <- wet$SigntEigenL TW.stat <- wet$statistic if (topK==0) { topK <- 2 } } else { a <- n - 1 wet <- tw(eVal[1:a], a, SignEigenPoint) TW.stat <- wet$statistic } # stop the iteration if (iter == iter.outlier) { indicator <- FALSE break } # find outliers #print("It is time to remove outliers") if (signal.outlier) { norm.eVec <- abs(scale(eVec[,(1:topK)], center = TRUE, scale = TRUE)) # normalization as ususal out.dex <- unique(which(norm.eVec >= sigma.thresh) %% n) #out.dex <- unique(which(abs(eVec[,1:5]) >= 0.2) %% n) out.dex[out.dex==0] <- n if (length(out.dex) > 0) { iter <- iter + 1 if (num.outlier > 0) { q1 <- Q[-addr] outlier[q1[out.dex]] <- 1 }else { outlier[out.dex] <- 1 } }else { indicator <- FALSE break } }else # don't eliminate outliers { indicator <- FALSE break } #print("------------------------------------------------------------------------") #print(list(outlier=which(outlier==1),indicator=indicator,iter=iter,iter.outlier=iter.outlier)) } #print("Eigen similarity finished") # output if (signal.outlier) { if (num.outlier < 1) { outQ <- Q outAddr <- NULL }else { outQ <- Q[-addr] outAddr <- addr } }else { outQ <- Q outAddr <- NULL num.outlier <- NULL } # compute the executed time timeY <- proc.time() - timeX #print(paste("run time=", timeY[1], sep="")) # return a list res.list <- list(num.markers=num.markers, num.subjects=num.subjects, rm.marker.index=rm.marker.index, rm.subject.index=rm.subject.index, TW.level=signt.eigen.level, signal.outlier=signal.outlier, iter.outlier=iter.outlier, sigma.thresh=sigma.thresh, num.outliers=num.outlier, outliers.index=outAddr, num.used.subjects=n, used.subjects.index=outQ, similarity.matrix=xM, eigenvalues=eVal, eigenvectors=eVec, topK=topK, TW.stat=TW.stat, topK.eigenvalues=eVal[1:topK], topK.eigenvectors=eVec[,(1:topK)], runtime=timeY[3] ) if (!is.null(outFile.Robj)) { save(res.list, file=outFile.Robj) } if (!is.null(outFile.txt)) { write.table(eVec[,1:topK], file=outFile.txt, row.names=F, col.names=F, sep="\t") } res.list }
/scratch/gouwar.j/cran-all/cranData/AssocTests/R/EigenStrat_main.R
## Bootstrap to determine the number of groups FindCNumRandom <- function(x, n, kG, n.monteCarlo) { # x --> data matrix # n --> nrow(x) # kG --> number of total clusters # n.monteCarlo --> simulation times # centralize x x <- scale(x, center=TRUE, scale=FALSE) W <- CalculateWAll(x, n, kG) WStar <- matrix(data=0, nrow=n.monteCarlo, ncol=kG) bound <- rbind(n, apply(x,2,range)) for (i in 1:n.monteCarlo) { y <- apply(bound, 2, UniformSample) WStar[i,] <- CalculateWAll(y, n, kG) } CalculateGapK(W, WStar, kG, n.monteCarlo) }
/scratch/gouwar.j/cran-all/cranData/AssocTests/R/FindCNumRandom.R