content
stringlengths
0
14.9M
filename
stringlengths
44
136
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 buildcov_deriv <- function(beta, dist, l, covmodel, nugget) { .Call(`_ARCokrig_buildcov_deriv`, beta, dist, l, covmodel, nugget) } log_objective_prior <- function(beta, dist, RInv, X, covmodel, nugget, prior) { .Call(`_ARCokrig_log_objective_prior`, beta, dist, RInv, X, covmodel, nugget, prior) } buildcov <- function(phi, dist, covmodel, nugget) { .Call(`_ARCokrig_buildcov`, phi, dist, covmodel, nugget) } compute_distance <- function(input1, input2) { .Call(`_ARCokrig_compute_distance`, input1, input2) } sample_mvt <- function(mu, L, sigma, df, nsample) { .Call(`_ARCokrig_sample_mvt`, mu, L, sigma, df, nsample) } compute_S <- function(output, Q) { .Call(`_ARCokrig_compute_S`, output, Q) } compute_Svec <- function(output, Q) { .Call(`_ARCokrig_compute_Svec`, output, Q) } compute_S_sum <- function(y_t, H_t, y_t1, RInv, K) { .Call(`_ARCokrig_compute_S_sum`, y_t, H_t, y_t1, RInv, K) } compute_prediction <- function(y_t, Ht, y_t1, yhat_t1, vhat_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk) { .Call(`_ARCokrig_compute_prediction`, y_t, Ht, y_t1, yhat_t1, vhat_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk) } conditional_simulation <- function(y_t, Ht, y_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk) { .Call(`_ARCokrig_conditional_simulation`, y_t, Ht, y_t1, RInv, Hnew, Wnew_t1, Rmo, R_sk) } compute_param <- function(y_t, Ht, y_t1, RInv) { .Call(`_ARCokrig_compute_param`, y_t, Ht, y_t1, RInv) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/RcppExports.R
############################################################################# ############################################################################# #### Functions in Univariate Autoregressive Cokriging Models ############################################################################# ############################################################################# margin.posterior= function(param, input, output, level, H, dist, cov.model="matern_5_2", prior="JR", hyperparam=NULL){ Dim = dim(dist)[3] p.x = Dim S = length(output) if(is.null(hyperparam)){ al = 0.5-p.x bl = 1 * dim(dist)[1]^(-1/p.x) * (al + p.x) nugget.UB = 1 }else{ al = hyperparam$a bl = hyperparam$b * dim(dist)[1]^(-1/p.x) * (al + p.x) nugget.UB = hyperparam$nugget.UB } # param contains phi and nugget (maybe) if(length(param)==Dim){ #no nugget phi = exp(-param) is.nugget=FALSE }else{ phi = exp(-param[1:Dim]) nugget = nugget.UB*exp(param[Dim+1]) / (1 + exp(param[Dim+1])) is.nugget = TRUE phi = c(phi, nugget) } q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] } n = sapply(output, length) # input.max = apply(input[[level]], 2, max) # input.min = apply(input[[level]], 2, min) # Cl = abs(input.max-input.min) Cl = hyperparam$Cl t = level if(level==1){ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) X.aug = H[[t]] y.aug = output[[t]] HRH = t(X.aug)%*%RInv%*%X.aug #+ diag(1e-6,dim(X.aug)[2]) HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) betahat = HRHInv%*%(t(X.aug)%*%(RInv%*%y.aug)) res = y.aug - X.aug%*%betahat SSE = c(t(res)%*%RInv%*%res) logdetG = -sum(log(diag(U))) g = logdetG - sum(log(diag(HRHchol))) + (-0.5)*(n[t]-q[t])*log(SSE) if(prior=="Ind_Jeffreys"){ g = g + 0.5*(q[t])*log(SSE) } }else{ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) IB = match.input(input[[t]], input[[t-1]])$IB y_t1 = as.matrix(output[[t-1]][IB]) X.aug = cbind(H[[t]], y_t1) y.aug = output[[t]] HRH = t(X.aug)%*%RInv%*%X.aug #+ diag(1e-6,dim(X.aug)[2]) HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) betahat = HRHInv%*%(t(X.aug)%*%(RInv%*%y.aug)) res = y.aug - X.aug%*%betahat SSE = c(t(res)%*%RInv%*%res) logdetG = -sum(log(diag(U))) g = logdetG - sum(log(diag(HRHchol))) + (-0.5)*(n[t]-q[t]-1)*log(SSE) if(prior=="Ind_Jeffreys"){ g = g + 0.5*(q[t]+1)*log(SSE) } } X = X.aug ## compute priors if(is.nugget){ lnJacobian = sum(param[1:Dim]) + log(nugget) + log(nugget.UB - nugget) - log(abs(nugget.UB+(nugget.UB-1)*nugget)) # - sum(log(phi)) + log(nugget) if(prior=="JR"){ temp = sum(1/phi[1:Dim] * Cl) + nugget ln_prior = lnJacobian + al*log(temp) - bl*temp }else{ lnIJ_prior = log_objective_prior(c(1/phi, nugget), dist, RInv, X, cov.model, is.nugget, prior) ln_prior = lnIJ_prior + lnJacobian } }else{ lnJacobian = sum(param) if(prior=="JR"){ temp = sum(1/phi * Cl) ln_prior = lnJacobian + al*log(temp) - bl*temp }else{ lnIJ_prior = log_objective_prior(c(1/phi), dist, RInv, X, cov.model, is.nugget, prior) ln_prior = lnIJ_prior + lnJacobian } } g = ln_prior + g return(g) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# condsim.ND.univariate = function(formula, output, input, input.new, phi, cov.model="matern_5_2", nsample){ Dim = dim(input[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } S = length(output) n = sapply(output, length) np = dim(input.new)[1] y = output ## add new inputs to missing inputs # input.miss = list() # input.union = list() # for(t in 1:S){ # input.miss[[t]] = input.new # input.union[[t]] = rbind(input.new, input[[t]]) # } input.miss = list() input.union = list() pred.ID = list() ID.orig = list() index.full = 1:np indB = list() for(t in 1:(S)){ ind.list = match.input(input[[t]], input.new) # indlist = ismember(input.new, input[[t]]) if(!is.null(ind.list$IA)){ indA = ind.list$IA indB[[t]] = ind.list$IB pred.ID.exist = indA input.exist = input[[t]][indA, ,drop=FALSE] # common inputs input.added = input.new[-indB[[t]], ,drop=FALSE] # inputs in input.new but not in input[[t]] n.added = dim(input.added)[1] pred.ID.added = seq(1, n.added, by=1) ID.orig[[t]] = c(index.full[-indB[[t]]], indB[[t]]) pred.ID[[t]] = c(pred.ID.added, pred.ID.exist+n.added) input.miss[[t]] = input.added input.union[[t]] = rbind(input.added, input[[t]]) }else{ ID.orig[[t]] = 1:np pred.ID[[t]] = 1:np input.miss[[t]] = input.new input.union[[t]] = rbind(input.new, input[[t]]) } } n.m = rep(NA, S) for(t in 1:S){ n.m[t] = dim(input.miss[[t]])[1] } ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) #if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) #} distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } ym.hat = list() q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] ym.hat[[t]] = matrix(NA, nsample, n.m[[t]]) } ################################################################################# #### Sampling from predictive distribution ################################################################################# t = 1 R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) #UH = backsolve(U, H[[t]], transpose=TRUE) #HRHInv = solve(crossprod(UH)) HRHInv = solve(t(H[[t]])%*%RInv%*%H[[t]]) betahat = HRHInv%*%t(H[[t]])%*%(RInv%*%y[[t]]) res = y[[t]] - H[[t]]%*%betahat SSE = c(t(res)%*%RInv%*%res) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) RmoU = t(backsolve(U, t(Rmo), transpose=TRUE)) RmoRInv = Rmo%*%RInv # XXRR = t(Hm[[t]]) - t(H[[t]])%*%RInv%*%t(Rmo) XXRR = Hm[[t]] - RmoRInv%*%H[[t]] # Sig_ymymy = Rm - Rmo%*%RInv%*%t(Rmo) + (XXRR)%*%HRHInv%*%t(XXRR) Sig_ymymy = Rm - tcrossprod(RmoU) + (XXRR)%*%HRHInv%*%t(XXRR) Sig = Sig_ymymy * SSE/(n[t] - q[t]) #Sig = Sig_ymymy * SSE/(n[t]) mu_ymy = Hm[[t]]%*%betahat + RmoRInv%*%res #ym.hat[[t]] = c(mu_ymy) #krige[[t]] = ym.hat[[t]][pred.ID[[t]]] ym.hat[[t]] = mvtnorm::rmvt(nsample, sigma=Sig, df=n[t]-q[t], delta=mu_ymy, type="shifted") #Sig[Sig<0] = 0 #krigeSE[[t]] = sqrt(diag(Sig)[pred.ID[[t]]]) # for(k in 1:nsample){ for(t in 2:(S)){ ############################################################################ #### estimate missing data ym ############################################################################ R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) IB = match.input(input[[t]], input[[t-1]])$IB y_t1 = y[[t-1]][IB] X = cbind(H[[t]], y_t1) XRXInv = solve(t(X)%*%RInv%*%X) betahat = XRXInv%*%t(X)%*%(RInv%*%y[[t]]) res = y[[t]] - X%*%betahat SSE = c(t(res)%*%RInv%*%res) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) # IB = match.input(input.miss[[t]], input.miss[[t-1]])$IB # ym_t1 = ym.hat[[t-1]][ ,IB] ym_t1 = matrix(NA, nsample, n.m[t]) for(k in 1:nsample){ ym_t1[k, ] = create.w.new(t=t, input=input[[t-1]], input.miss=input.miss, y=y[[t-1]], ym=as.matrix(ym.hat[[t-1]][k,])) Xm = cbind(Hm[[t]], as.matrix(ym_t1[k, ])) XXRR = t(Xm) - t(X)%*%RInv%*%t(Rmo) Sig_ymymy = Rm - Rmo%*%RInv%*%t(Rmo) + t(XXRR)%*%XRXInv%*%XXRR Sig = Sig_ymymy * SSE/(n[t]-q[t]-1) mu_ymy = Xm%*%betahat + Rmo%*%(RInv%*%res) ym.hat[[t]][k, ] = c(mvtnorm::rmvt(1, sigma=Sig, df=n[t]-q[t]-1, delta=mu_ymy, type="shifted")) } } # } ## get summary statistics krige = list() krigeSE = list() krige.lower95 = list() krige.upper95 = list() for(t in 1:S){ # yhat[[t]] = ym.hat[[t]][ ,pred.ID[[t]], ] krige[[t]] = apply(ym.hat[[t]], 2, mean) krigeSE[[t]] = apply(ym.hat[[t]], 2, sd) krige.lower95[[t]] = apply(ym.hat[[t]], 2, quantile, 0.025) krige.upper95[[t]] = apply(ym.hat[[t]], 2, quantile, 0.975) } pred.mu = list() pred.SE = list() pred.lower95 = list() pred.upper95 = list() for(t in 1:S){ pred.mu[[t]] = rep(NA, np) pred.SE[[t]] = rep(0, np) pred.lower95[[t]] = rep(NA, np) pred.upper95[[t]] = rep(NA, np) ind.list = ismember(input.new, input.miss[[t]]) pred.mu[[t]][ind.list$IIA] = krige[[t]][ind.list$IA] pred.SE[[t]][ind.list$IIA] = krigeSE[[t]][ind.list$IA] pred.lower95[[t]][ind.list$IIA] = krige.lower95[[t]][ind.list$IA] pred.upper95[[t]][ind.list$IIA] = krige.upper95[[t]][ind.list$IA] if(length(ind.list$IIA)<np){ ind.input = ismember(input.new, input[[t]]) pred.mu[[t]][ind.input$IIA] = y[[t]][ind.input$IA] pred.lower95[[t]][ind.input$IIA] = y[[t]][ind.input$IA] pred.upper95[[t]][ind.input$IIA] = y[[t]][ind.input$IA] } } names(pred.mu) = paste0("Level", seq(1:S), "") names(pred.SE) = paste0("Level", seq(1:S), "") names(pred.lower95) = paste0("Level", seq(1:S), "") names(pred.upper95) = paste0("Level", seq(1:S), "") pred = list(mu=pred.mu, SE=pred.SE, lower95=pred.lower95,upper95=pred.upper95) return(pred) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# compute.g.univ = function(param, input.list, level, y, H, ym, Hm, dist, hyper, cov.model){ ## compute g function at fidelity level t ## # param is a vector Dim = dim(dist)[3] p.x = Dim N = ncol(y[[1]]) nsample = dim(ym[[1]])[1] if(is.null(hyper)){ al = 0.5-p.x bl = dim(dist)[1]^(-1/p.x) * (al + p.x) nugget.UB = 1 }else{ al = hyper$a bl = hyper$b * dim(dist)[1]^(-1/p.x) * (al + p.x) nugget.UB = hyper$nugget.UB } # param contains phi and nugget (maybe) if(length(param)==Dim){ #no nugget phi = exp(-param) # phi = 1/param is.nugget=FALSE }else{ phi = exp(-param[1:Dim]) nugget = nugget.UB*exp(param[Dim+1]) / (1 + exp(param[Dim+1])) is.nugget = TRUE phi = c(phi, nugget) } # inputlist = augment.input(input) # input.miss = inputlist$miss # input.union = inputlist$union input = input.list$input input.miss = input.list$input.miss S = length(y) n = rep(NA, S) n.aug = rep(NA, S) for(t in 1:S){ n[t] = dim(y[[t]])[1] if(t<S){ n.aug[t] = n[t] + dim(Hm[[t]])[1] }else{ n.aug[t] = n[t] } } q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] } # input.max = apply(input[[level]], 2, max) # input.min = apply(input[[level]], 2, min) # Cl = abs(input.max-input.min) Cl = hyper$Cl if(is.nugget){ lnJacobian = sum(param[1:Dim]) + log(nugget) + log(nugget.UB - nugget) - log(abs(nugget.UB+(nugget.UB-1)*nugget)) temp = sum(1/phi[1:Dim] * Cl) + nugget }else{ lnJacobian = sum(param) temp = sum(1/phi * Cl) } # if(is.nugget){ # lnJacobian = sum(log(param[1:Dim])) + log(nugget) + log(nugget.UB - nugget) - # log(abs(nugget.UB+(nugget.UB-1)*nugget)) # temp = sum(1/phi[1:Dim] * Cl) + nugget # }else{ # lnJacobian = sum(log(param)) # temp = sum(1/phi * Cl) # } lnJacobian = lnJacobian + al*log(temp) - bl*temp #print(phi) t = level if(level==1){ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) X = H[[t]] Xm = Hm[[t]] X.aug = rbind(X, Xm) RInvX = RInv%*%X.aug HRH = t(X.aug)%*%RInvX HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) # compute Q Q = RInv - RInvX%*%HRHInv%*%t(RInvX) S_2_log = 0 for(k in 1:nsample){ y.aug = rbind(y[[t]], as.matrix(ym[[t]][k, , ])) S_2_log = S_2_log + compute_S(y.aug, Q) } S_2_log = S_2_log / nsample # S_2_log = compute_S3D(y[[t]], ym[[t]], Q) # too slow g = -N*sum(log(diag(U))) - N*sum(log(diag(HRHchol))) - 0.5*(n.aug[t]-q[t])*S_2_log }else if(level>1 & level<S){ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) ## compute QH H.aug = rbind(H[[t]], Hm[[t]]) RInvH = RInv%*%H.aug HRH = t(H.aug)%*%RInvH HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) K = RInvH%*%HRHInv%*%t(RInvH) #QH = RInv - K y_t1 = array(NA, dim=c(nsample, n[t], N)) ym_t1 = array(NA, dim=c(nsample, nrow(Hm[[t]]), N)) S_2_log_sum = 0 for(k in 1:nsample){ # IB = match.input(input[[t]], input.miss[[t-1]])$IB # y_t1 = as.matrix(ym[[t-1]][IB]) y_t1[k, , ] = create.w(t=t, input=input, input.miss=input.miss[[t-1]], y=y[[t-1]], ym=as.matrix(ym[[t-1]][k, , ])) # IB = match.input(input.miss[[t]], input.miss[[t-1]])$IB # ym_t1 = as.matrix(ym[[t-1]][IB]) ym_t1[k, , ] = create.w(t=t, input=input.miss, input.miss=input.miss[[t-1]], y=as.matrix(ym[[t-1]][k, , ]), ym=as.matrix(ym[[t-1]][k, , ])) y.aug = rbind(y[[t]], as.matrix(ym[[t]][k, , ])) y_t1.aug = rbind(as.matrix(y_t1[k, , ]), as.matrix(ym_t1[k, , ])) S_2_log_sum = S_2_log_sum + compute_S_sum(y.aug, H.aug, y_t1.aug, RInv, K) } S_2_log_sum = S_2_log_sum / nsample g = - N*sum(log(diag(U))) - 0.5*S_2_log_sum }else{ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) RInvH = RInv%*%H[[t]] HRH = t(H[[t]])%*%RInvH HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) K = RInvH%*%HRHInv%*%t(RInvH) #QH = RInv - K y_t1 = array(NA, dim=c(nsample, n[t], N)) logdetX = matrix(0, nsample, N) S_2_log = matrix(0, nsample, N) S_2_log_sum = 0 for(k in 1:nsample){ y_t1[k, , ] = create.w(t=t, input=input, input.miss=input.miss[[t-1]], y=y[[t-1]], ym=as.matrix(ym[[t-1]][k, , ])) S_2_log_sum = S_2_log_sum + compute_S_sum(y[[t]], H[[t]], as.matrix(y_t1[k, ,]), RInv, K) # out = compute_S_sum2(y[[t]], H[[t]], y_t1[k, ,], RInv) } S_2_log_sum = S_2_log_sum / nsample g = -N*sum(log(diag(U))) - 0.5*S_2_log_sum } g = lnJacobian + g return(g) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# condsim.NN.univariate <- function(formula,output,input,input.new,phi,cov.model, nsample){ Dim = dim(input[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } ################################################################### #### augment input ################################################################### S = length(output) # number of code out = augment.input(input) input.union = out$union input.miss = out$miss np = dim(input.new)[1] n = sapply(output, length) n.aug = rep(NA, S) y = output ## add new inputs to missing inputs # for(t in 1:(S-1)){ # input.miss[[t]] = rbind(input.new, input.miss[[t]]) # } pred.ID = list() ID.org = list() index.full = 1:np for(t in 1:(S-1)){ ind.list = match.input(input.union[[t]], input.new) if(!is.null(ind.list$IA)){ indA = ind.list$IA indB = ind.list$IB pred.ID.exist = indA input.exist = input.union[[t]][indA, ,drop=FALSE] input.added = input.new[-indB, ,drop=FALSE] n.added = dim(input.added)[1] pred.ID.added = seq(1,n.added,by=1) ID.org[[t]] = c(index.full[-indB], indB) pred.ID[[t]] = c(pred.ID.added, pred.ID.exist+n.added) input.miss[[t]] = rbind(input.added, input.miss[[t]]) input.union[[t]] = rbind(input.miss[[t]], input[[t]]) }else{ ID.org[[t]] = 1:np pred.ID[[t]] = 1:np input.miss[[t]] = rbind(input.new, input.miss[[t]]) input.union[[t]] = rbind(input.new, input.union[[t]]) } } t = S ind.list = match.input(input[[t]], input.new) if(!is.null(ind.list$IA)){ indA = ind.list$IA indB = ind.list$IB pred.ID.exist = indA input.exist = input.union[[t]][indA, , drop=FALSE] input.added = input.new[-indB, , drop=FALSE] n.added = dim(input.added)[1] pred.ID.added = seq(1, n.added, by=1) ID.org[[t]] = c(index.full[-indB], indB) pred.ID[[t]] = c(pred.ID.added, pred.ID.exist+n.added) input.miss[[t]] = input.added }else{ input.miss[[t]] = input.new ID.org[[t]] = 1:np pred.ID[[t]] = 1:np } input.union[[S]] = rbind(input.new, input.union[[S]]) ## add new inputs to union of inputs # for(t in 1:S){ # input.union[[t]] = rbind(input.new, input.union[[t]]) # } ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } n.m = rep(NA, S) for(t in 1:S){ n.m[t] = dim(input.miss[[t]])[1] } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) #if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) #} distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } ## ym.hat = list() q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] ym.hat[[t]] = matrix(NA, nsample, dim(Hm[[t]])[1]) } ################################################################################# #### Sampling from predictive distribution ################################################################################# t = 1 R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) #UH = backsolve(U, H[[t]], transpose=TRUE) #HRHInv = solve(crossprod(UH)) HRHInv = solve(t(H[[t]])%*%RInv%*%H[[t]]) betahat = HRHInv%*%t(H[[t]])%*%(RInv%*%y[[t]]) res = y[[t]] - H[[t]]%*%betahat SSE = c(t(res)%*%RInv%*%res) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) #RmoU = t(backsolve(U, t(Rmo), transpose=TRUE)) XXRR = t(Hm[[t]]) - t(H[[t]])%*%RInv%*%t(Rmo) #XXRR = t(Hm[[t]]) - t(RmoU%*%UH) Sig_ymymy = Rm - Rmo%*%RInv%*%t(Rmo) + t(XXRR)%*%HRHInv%*%XXRR #Sig_ymymy = Rm - tcrossprod(RmoU) + t(XXRR)%*%HRHInv%*%XXRR Sig = Sig_ymymy * SSE/(n[t] - q[t]) #Sig = Sig_ymymy * SSE/(n[t]) mu_ymy = Hm[[t]]%*%betahat + Rmo%*%(RInv%*%res) ym.hat[[t]] = mvtnorm::rmvt(nsample, sigma=Sig, df=n[t]-q[t], delta=mu_ymy, type="shifted") for(t in 2:(S)){ ############################################################################ #### estimate missing data ym ############################################################################ R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) R_sk = Rm - Rmo%*%RInv%*%t(Rmo) for(k in 1:nsample){ y_t1 = create.w.univ(t=t, input=input, input.miss=input.miss[[t-1]], y=y[[t-1]], ym=(ym.hat[[t-1]][k, ])) ym_t1 = create.w.new(t=t, input=input[[t-1]], input.miss=input.miss, y=y[[t-1]], ym=(ym.hat[[t-1]][k, ])) # ym.hat[[t]][k, ] = conditional_simulation(y[[t]], H[[t]], as.matrix(y_t1), # RInv, Hm[[t]], as.matrix(ym_t1), Rmo, R_sk) X = cbind(H[[t]], y_t1) XRXInv = solve(t(X)%*%RInv%*%X) betahat = XRXInv%*%t(X)%*%(RInv%*%y[[t]]) res = y[[t]] - X%*%betahat SSE = c(t(res)%*%RInv%*%res) Xm = cbind(Hm[[t]], ym_t1) XXRR = t(Xm) - t(X)%*%RInv%*%t(Rmo) Sig_ymymy = R_sk + t(XXRR)%*%XRXInv%*%XXRR Sig = Sig_ymymy * SSE/(n[t]-q[t]-1) mu_ymy = Xm%*%betahat + Rmo%*%(RInv%*%res) ym.hat[[t]][k, ] = c(mvtnorm::rmvt(1, sigma=Sig, df=n[t]-q[t]-1, delta=mu_ymy, type="shifted")) } } ################################################################################ ## get summary statistics krige = list() krigeSE = list() krige.lower95 = list() krige.upper95 = list() for(t in 1:S){ # yhat[[t]] = ym.hat[[t]][ ,pred.ID[[t]], ] krige[[t]] = apply(ym.hat[[t]], 2, mean) krigeSE[[t]] = apply(ym.hat[[t]], 2, sd) krige.lower95[[t]] = apply(ym.hat[[t]], 2, quantile, 0.025) krige.upper95[[t]] = apply(ym.hat[[t]], 2, quantile, 0.975) } pred.mu = list() pred.SE = list() pred.lower95 = list() pred.upper95 = list() for(t in 1:S){ pred.mu[[t]] = rep(NA, np) pred.SE[[t]] = rep(0, np) pred.lower95[[t]] = rep(NA, np) pred.upper95[[t]] = rep(NA, np) ind.list = ismember(input.new, input.miss[[t]]) pred.mu[[t]][ind.list$IIA] = krige[[t]][ind.list$IA] pred.SE[[t]][ind.list$IIA] = krigeSE[[t]][ind.list$IA] pred.lower95[[t]][ind.list$IIA] = krige.lower95[[t]][ind.list$IA] pred.upper95[[t]][ind.list$IIA] = krige.upper95[[t]][ind.list$IA] if(length(ind.list$IIA)<np){ ind.input = ismember(input.new, input[[t]]) pred.mu[[t]][ind.input$IIA] = y[[t]][ind.input$IA] pred.lower95[[t]][ind.input$IIA] = y[[t]][ind.input$IA] pred.upper95[[t]][ind.input$IIA] = y[[t]][ind.input$IA] } } names(pred.mu) = paste0("Level", seq(1:S), "") names(pred.SE) = paste0("Level", seq(1:S), "") names(pred.lower95) = paste0("Level", seq(1:S), "") names(pred.upper95) = paste0("Level", seq(1:S), "") pred = list(mu=pred.mu, SE=pred.SE, lower95=pred.lower95,upper95=pred.upper95) return(pred) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# #### Functions in Multivariate Autoregressive Cokriging Models ############################################################################# ############################################################################# margin.posterior.mv = function(param, input, output, level, H, dist, cov.model="matern_5_2", hyper=NULL){ Dim = dim(dist)[3] p.x = Dim t = level n = dim(output[[t]])[1] # number of model runs N = dim(output[[t]])[2] # number of spatial lomessageions if(is.null(hyper)){ al = 0.5-p.x bl = 1 * dim(dist)[1]^(-1/p.x) * (al + p.x) nugget.UB = 1 }else{ al = hyper$a bl = hyper$b * dim(dist)[1]^(-1/p.x) * (al + p.x) nugget.UB = hyper$nugget.UB } # param contains phi and nugget (maybe) if(length(param)==Dim){ #no nugget phi = exp(-param) is.nugget=FALSE }else{ phi = exp(-param[1:Dim]) nugget = nugget.UB*exp(param[Dim+1]) / (1 + exp(param[Dim+1])) is.nugget = TRUE phi = c(phi, nugget) } q = dim(H)[2] #Cl = dim(dist)[1]^(-1/p.x) # input.max = apply(input[[level]], 2, max) # input.min = apply(input[[level]], 2, min) # Cl = abs(input.max-input.min) Cl = hyper$Cl if(is.nugget){ lnJacobian = sum(param[1:Dim]) + log(nugget) + log(nugget.UB - nugget) - log(abs(nugget.UB+(nugget.UB-1)*nugget)) temp = sum(1/phi[1:Dim] * Cl) + nugget }else{ lnJacobian = sum(param) temp = sum(1/phi * Cl) } lnJacobian = lnJacobian + al*log(temp) - bl*temp if(level==1){ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) X.aug = H HRH = t(X.aug)%*%RInv%*%X.aug #+ diag(1e-6,dim(X.aug)[2]) HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) # compute Q Q = RInv%*%(diag(n)-X.aug%*%HRHInv%*%t(X.aug)%*%RInv) S_2_log = compute_S(output[[t]], Q) g = -N*sum(log(diag(U))) - N*sum(log(diag(HRHchol))) - 0.5*(n-q)*S_2_log }else{ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) RInvH = RInv%*%H HRH = t(H)%*%RInvH HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) K = RInvH%*%HRHInv%*%t(RInvH) #QH = RInv - K IB = match.input(input[[t]], input[[t-1]])$IB y_t1 = output[[t-1]][IB, ] S_2_log_sum = compute_S_sum(output[[t]], H, y_t1, RInv, K) g = -N*sum(log(diag(U))) - 0.5*S_2_log_sum } g = lnJacobian + g return(g) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# compute.g = function(param, input.list, level, y, H, ym, Hm, dist, hyper, cov.model="matern_5_2"){ ## compute g function at fidelity level t ## # param is a vector Dim = dim(dist)[3] p.x = Dim N = ncol(y[[1]]) nsample = dim(ym[[1]])[1] if(is.null(hyper)){ al = 0.5-p.x bl = dim(dist)[1]^(-1/p.x) * (al + p.x) nugget.UB = 1 }else{ al = hyper$a bl = hyper$b * dim(dist)[1]^(-1/p.x) * (al + p.x) nugget.UB = hyper$nugget.UB } # param contains phi and nugget (maybe) if(length(param)==Dim){ #no nugget phi = exp(-param) # phi = 1/param is.nugget=FALSE }else{ phi = exp(-param[1:Dim]) nugget = nugget.UB*exp(param[Dim+1]) / (1 + exp(param[Dim+1])) is.nugget = TRUE phi = c(phi, nugget) } # inputlist = augment.input(input) # input.miss = inputlist$miss # input.union = inputlist$union input = input.list$input input.miss = input.list$input.miss S = length(y) n = rep(NA, S) n.aug = rep(NA, S) for(t in 1:S){ n[t] = dim(y[[t]])[1] if(t<S){ n.aug[t] = n[t] + dim(Hm[[t]])[1] }else{ n.aug[t] = n[t] } } q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] } # input.max = apply(input[[level]], 2, max) # input.min = apply(input[[level]], 2, min) # Cl = abs(input.max-input.min) Cl = hyper$Cl if(is.nugget){ lnJacobian = sum(param[1:Dim]) + log(nugget) + log(nugget.UB - nugget) - log(abs(nugget.UB+(nugget.UB-1)*nugget)) temp = sum(1/phi[1:Dim] * Cl) + nugget }else{ lnJacobian = sum(param) temp = sum(1/phi * Cl) } # if(is.nugget){ # lnJacobian = sum(log(param[1:Dim])) + log(nugget) + log(nugget.UB - nugget) - # log(abs(nugget.UB+(nugget.UB-1)*nugget)) # temp = sum(1/phi[1:Dim] * Cl) + nugget # }else{ # lnJacobian = sum(log(param)) # temp = sum(1/phi * Cl) # } lnJacobian = lnJacobian + al*log(temp) - bl*temp #print(phi) t = level if(level==1){ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) X = H[[t]] Xm = Hm[[t]] X.aug = rbind(X, Xm) RInvX = RInv%*%X.aug HRH = t(X.aug)%*%RInvX HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) # compute Q Q = RInv - RInvX%*%HRHInv%*%t(RInvX) S_2_log = 0 for(k in 1:nsample){ y.aug = rbind(y[[t]], as.matrix(ym[[t]][k, , ])) S_2_log = S_2_log + compute_S(y.aug, Q) } S_2_log = S_2_log / nsample # S_2_log = compute_S3D(y[[t]], ym[[t]], Q) # too slow g = -N*sum(log(diag(U))) - N*sum(log(diag(HRHchol))) - 0.5*(n.aug[t]-q[t])*S_2_log }else if(level>1 & level<S){ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) ## compute QH H.aug = rbind(H[[t]], Hm[[t]]) RInvH = RInv%*%H.aug HRH = t(H.aug)%*%RInvH HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) K = RInvH%*%HRHInv%*%t(RInvH) #QH = RInv - K y_t1 = array(NA, dim=c(nsample, n[t], N)) ym_t1 = array(NA, dim=c(nsample, nrow(Hm[[t]]), N)) S_2_log_sum = 0 for(k in 1:nsample){ # IB = match.input(input[[t]], input.miss[[t-1]])$IB # y_t1 = as.matrix(ym[[t-1]][IB]) y_t1[k, , ] = create.w(t=t, input=input, input.miss=input.miss[[t-1]], y=y[[t-1]], ym=as.matrix(ym[[t-1]][k, , ])) # IB = match.input(input.miss[[t]], input.miss[[t-1]])$IB # ym_t1 = as.matrix(ym[[t-1]][IB]) ym_t1[k, , ] = create.w(t=t, input=input.miss, input.miss=input.miss[[t-1]], y=as.matrix(ym[[t-1]][k, , ]), ym=as.matrix(ym[[t-1]][k, , ])) y.aug = rbind(y[[t]], as.matrix(ym[[t]][k, , ])) y_t1.aug = rbind(as.matrix(y_t1[k, , ]), as.matrix(ym_t1[k, , ])) S_2_log_sum = S_2_log_sum + compute_S_sum(y.aug, H.aug, y_t1.aug, RInv, K) } S_2_log_sum = S_2_log_sum / nsample g = - N*sum(log(diag(U))) - 0.5*S_2_log_sum }else{ R = buildcov(phi, dist, covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) RInvH = RInv%*%H[[t]] HRH = t(H[[t]])%*%RInvH HRHchol = chol(HRH) HRHInv = chol2inv(HRHchol) K = RInvH%*%HRHInv%*%t(RInvH) #QH = RInv - K y_t1 = array(NA, dim=c(nsample, n[t], N)) logdetX = matrix(0, nsample, N) S_2_log = matrix(0, nsample, N) S_2_log_sum = 0 for(k in 1:nsample){ y_t1[k, , ] = create.w(t=t, input=input, input.miss=input.miss[[t-1]], y=y[[t-1]], ym=as.matrix(ym[[t-1]][k, , ])) S_2_log_sum = S_2_log_sum + compute_S_sum(y[[t]], H[[t]], y_t1[k, ,], RInv, K) # out = compute_S_sum2(y[[t]], H[[t]], y_t1[k, ,], RInv) } S_2_log_sum = S_2_log_sum / nsample g = -N*sum(log(diag(U))) - 0.5*S_2_log_sum } g = lnJacobian + g return(g) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# fit.ND=function(formula, output, input, phi, cov.model, prior, opt){ hyperparam = prior$hyper Dim = dim(input[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } S = length(output) ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) } ################################################################### #### compute intermediate quantities ################################################################### distlist = list() for(t in 1:S){ distlist[[t]] = compute_distance(input[[t]], input[[t]]) } Cl = list() for(t in 1:S){ input.max = apply(input[[t]], 2, max) input.min = apply(input[[t]], 2, min) Cl[[t]] = abs(input.max-input.min) } for(t in 1:S){ hyperparam[[t]]$Cl = Cl[[t]] } ################################################################### #### begin optimization algorithm ################################################################### phi.new = phi for(t in 1:S){ if(is.nugget){ nu = log(phi[p.x+1, t]) - log(hyperparam[[t]]$nugget.UB-phi[p.x+1, t]) # logit of nugget init.val = c(-log(phi[1:p.x, t]), nu) }else{ init.val = -log(phi[ ,t]) } fit = try(optim(init.val, margin.posterior.mv, input=input, output=output, level=t, H=H[[t]], dist=distlist[[t]], cov.model=cov.model, hyper=hyperparam[[t]], control=list(fnscale=-1, maxit=opt$maxit), method=opt$method, lower=opt$lower, upper=opt$upper), silent=T) if(inherits(fit, "try-error")){ phi.new[ ,t] = phi[ ,t] message("\n optimization error, skip t=", t, "\n") print(fit) }else{ if(is.nugget){ nugget = hyperparam[[t]]$nugget.UB*exp(fit$par[p.x+1]) / (1+exp(fit$par[p.x+1])) phi.new[ ,t] = c(exp(-fit$par[1:p.x]), nugget) }else{ phi.new[ ,t] = exp(-fit$par) } } } colnames(phi.new) = paste0("Level", seq(1:S), "") return(list(par=phi.new)) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# fit.NN <- function(formula,output,input,phi,cov.model,prior, opt, MCEM){ hyperparam = prior$hyperparam maxit = MCEM$maxit tol = MCEM$tol n.sample = MCEM$n.sample verbose = MCEM$verbose Dim = dim(input[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } ################################################################### #### augment input ################################################################### S = length(output) # number of code out = augment.input(input) input.union = out$union input.miss = out$miss input.list = list(input=input, input.miss=input.miss) Cl = list() for(t in 1:S){ input.max = apply(input.list$input[[t]], 2, max) input.min = apply(input.list$input[[t]], 2, min) Cl[[t]] = abs(input.max-input.min) } for(t in 1:S){ hyperparam[[t]]$Cl = Cl[[t]] } ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) if(t<S){ colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) } distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } ################################################################### #### begin MCEM algorithm ################################################################### phi.new = phi conv = FALSE iter = 1 while(!conv){ ############################################################### #### generate M Monte Carlo samples for missing data ############################################################### # y.m = list() #system.time( # for(k in 1:n.sample){ # y.m[[k]] = sample.ym(y=output,input=input,param=phi,Ho=H,Hm=Hm,dist.o=dist.o, # dist.m=dist.m,dist.mo=dist.mo,cov.model=cov.model) # } y.m = sample.ym(y=output,input=input,param=phi,Ho=H,Hm=Hm,dist.o=dist.o, dist.m=dist.m,dist.mo=dist.mo,cov.model=cov.model, nsample=n.sample) #) ############################################################### #### compute and maximize the Q function at each fidelity ############################################################### for(t in 1:S){ if(is.nugget){ nu = log(phi[p.x+1, t]) - log(hyperparam[[t]]$nugget.UB-phi[p.x+1, t]) # logit of nugget init.val = c(-log(phi[1:p.x, t]), nu) }else{ init.val = -log(phi[ ,t]) } fit = try(optim(init.val, compute.g, input.list=input.list, level=t, y=output, H=H, ym=y.m, Hm=Hm, dist=distlist[[t]], hyper=hyperparam[[t]], cov.model=cov.model, control=list(fnscale=-1, maxit=opt$maxit), method=opt$method, lower=opt$lower, upper=opt$upper), silent=T) # fit = try(optimr(init.val, compute.Q.default, input=input, level=t, y=output, H=H, y.m=y.m, Hm=Hm, # distlist=distlist, cov.model=cov.model, # control=list(fnscale=-1, maxit=opt$maxit), # method=opt$method, lower=opt$lower, upper=opt$upper), # silent=T) if(inherits(fit, "try-error")){ phi.new[ ,t] = phi[ ,t] message("\n optimization error, skip t=", t, "\n") print(fit) }else{ if(is.nugget){ nugget = hyperparam[[t]]$nugget.UB*exp(fit$par[p.x+1]) / (1+exp(fit$par[p.x+1])) phi.new[ ,t] = c(exp(-fit$par[1:p.x]), nugget) }else{ phi.new[ ,t] = exp(-fit$par) } } } ############################################################### #### check convergence if(inherits(fit, "try-error")){ diff = tol + 1 }else{ diff = mean((phi.new - phi)^2) } if(verbose){ message("iter=", iter, "\n") } if(iter>maxit){ conv = TRUE }else{ if(diff<tol){ conv = TRUE } } phi = phi.new iter = iter + 1 } colnames(phi) = paste0("Level", seq(1:S), "") return(list(par=phi, eps=diff, iter=iter)) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# predict.ND = function(formula, output, input, input.new, phi, cov.model){ Dim = dim(input[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } S = length(output) n = rep(NA, S) for(t in 1:S){ n[t] = dim(output[[t]])[1] } np = dim(input.new)[1] y = output ## add new inputs to missing inputs input.miss = list() input.union = list() for(t in 1:S){ input.miss[[t]] = input.new input.union[[t]] = rbind(input.new, input[[t]]) } ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) #if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) #} distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } ym.hat = list() q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] } krige = list() krige.var = list() ################################################################################# #### Get predictive mean and predictive variance ################################################################################# t = 1 R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) #UH = backsolve(U, H[[t]], transpose=TRUE) #HRHInv = solve(crossprod(UH)) HRHInv = solve(t(H[[t]])%*%RInv%*%H[[t]]) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) # compute predictive mean RmoRInv = Rmo%*%RInv KW = (Hm[[t]] - RmoRInv%*%H[[t]])%*%HRHInv%*%t(H[[t]])%*%RInv + RmoRInv krige[[t]] = KW%*%y[[t]] # compute predictive variance XXRR = t(Hm[[t]]) - t(H[[t]])%*%RInv%*%t(Rmo) c_star = diag(Rm - Rmo%*%RInv%*%t(Rmo) + t(XXRR)%*%HRHInv%*%XXRR) c_star[c_star<0] = 0 # avoid numerical problems Q = RInv - (RInv%*%H[[t]])%*%HRHInv%*%t(RInv%*%H[[t]]) sigma2.hat = compute_Svec(y[[t]], Q) / (n[t]-q[t]) constant = (n[t]-q[t])/(n[t]-q[t]-2) krige.var[[t]] = constant*c_star %*% t(sigma2.hat) # m-by-N matrix for(t in 2:S){ ############################################################################ #### estimate missing data ym ############################################################################ R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) # RInvH = RInv%*%H[[t]] # HRH = t(H[[t]])%*%RInvH # HRHchol = chol(HRH) # HRHInv = chol2inv(HRHchol) # K = RInvH%*%HRHInv%*%t(RInvH) # QH = RInv - K IB = match.input(input[[t]], input[[t-1]])$IB y_t1 = y[[t-1]][IB, ] IB = match.input(input.miss[[t]], input.miss[[t-1]])$IB ym_t1 = krige[[t-1]][IB, ] # Xm = cbind(Hm[[t]], ym_t1) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) R_sk_diag = diag(Rm - Rmo%*%RInv%*%t(Rmo)) R_sk_diag[R_sk_diag<0] = 0 pred.list = compute_prediction(y[[t]], H[[t]], y_t1, krige[[t-1]],krige.var[[t-1]], RInv, Hm[[t]], ym_t1, Rmo, R_sk_diag) krige[[t]] = pred.list$krige krige.var[[t]] = pred.list$krige.var } ################################################################################ # for(t in 1:(S-1)){ # ind = sort(ID.org[[t]], index.return=TRUE)$ix # krige[[t]][ind] = krige[[t]] # krigeSE[[t]][ind] = krigeSE[[t]] # } krigeSE = list() lower95 = list() upper95 = list() for(t in 1:S){ krige.var[[t]][krige.var[[t]]<0] = 0 krigeSE[[t]] = sqrt(krige.var[[t]]) # degree = ifelse(t==1, n[t]-q[t], n[t]-q[t]-1) lower95[[t]] = krige[[t]] - 2*krigeSE[[t]] upper95[[t]] = krige[[t]] + 2*krigeSE[[t]] } names(krige) = paste0("Level", seq(1:S), "") names(krigeSE) = paste0("Level", seq(1:S), "") names(lower95) = paste0("Level", seq(1:S), "") names(upper95) = paste0("Level", seq(1:S), "") out = list(mu=krige, SE=krigeSE, lower95=lower95, upper95=upper95) return(out) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# predict.NN <- function(formula,output,input,input.new,phi,cov.model="matern_5_2", nsample=30){ Dim = dim(input[[1]])[2] N = dim(output[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } ################################################################### #### augment input ################################################################### S = length(output) # number of code out = augment.input(input) input.union = out$union input.miss = out$miss np = dim(input.new)[1] n = rep(NA, S) for(t in 1:S){ n[t] = dim(input[[t]])[1] } y = output ## add new inputs to missing inputs # for(t in 1:(S-1)){ # input.miss[[t]] = rbind(input.new, input.miss[[t]]) # } pred.ID = list() ID.org = list() index.full = 1:np for(t in 1:(S-1)){ ind.list = match.input(input.union[[t]], input.new) if(!is.null(ind.list$IA)){ indA = ind.list$IA indB = ind.list$IB pred.ID.exist = indA input.exist = input.union[[t]][indA, ,drop=FALSE] input.added = input.new[-indB, ,drop=FALSE] n.added = dim(input.added)[1] pred.ID.added = seq(1,n.added,by=1) ID.org[[t]] = c(index.full[-indB], indB) pred.ID[[t]] = c(pred.ID.added, pred.ID.exist+n.added) input.miss[[t]] = rbind(input.added, input.miss[[t]]) input.union[[t]] = rbind(input.miss[[t]], input[[t]]) }else{ ID.org[[t]] = 1:np pred.ID[[t]] = 1:np input.miss[[t]] = rbind(input.new, input.miss[[t]]) input.union[[t]] = rbind(input.new, input.union[[t]]) } } t = S ind.list = match.input(input[[t]], input.new) if(!is.null(ind.list$IA)){ indA = ind.list$IA indB = ind.list$IB pred.ID.exist = indA input.exist = input.union[[t]][indA, , drop=FALSE] input.added = input.new[-indB, , drop=FALSE] n.added = dim(input.added)[1] pred.ID.added = seq(1, n.added, by=1) ID.org[[t]] = c(index.full[-indB], indB) pred.ID[[t]] = c(pred.ID.added, pred.ID.exist+n.added) input.miss[[t]] = input.added }else{ input.miss[[t]] = input.new ID.org[[t]] = 1:np pred.ID[[t]] = 1:np } input.union[[S]] = rbind(input.new, input.union[[S]]) ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } n.m = rep(NA, S) for(t in 1:S){ n.m[t] = dim(input.miss[[t]])[1] } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) #if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) #} distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } ## krige = list() krigeSE = list() ym.hat = list() q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] ym.hat[[t]] = array(NA, dim=c(nsample, dim(Hm[[t]])[1], N)) } ################################################################################# #### get predictive mean and predictive variance ################################################################################# t = 1 R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) HRHInv = solve(t(H[[t]])%*%RInv%*%H[[t]]) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) # compute conditional mean RmoRInv = Rmo%*%RInv KW = (Hm[[t]]-RmoRInv%*%H[[t]]) %*% HRHInv %*% t(H[[t]]) %*% RInv + RmoRInv mu_ymy = KW %*% y[[t]] # n.m-by-N matrix # compute predictive variance XXRR = t(Hm[[t]]) - t(H[[t]])%*%RInv%*%t(Rmo) c_star = Rm - Rmo%*%RInv%*%t(Rmo) + t(XXRR)%*%HRHInv%*%XXRR ## c_star is not positive definite!!! So, there is no cholesky decomposition available # compute S2 Q = RInv - RInv%*%H[[t]]%*%HRHInv%*%t(H[[t]])%*%t(RInv) sigma2.hat = compute_Svec(y[[t]], Q) / (n[t]-q[t]) L = t(chol(c_star)) ym.hat[[t]] = sample_mvt(mu_ymy, L=L, sigma=sigma2.hat, df=n[t]-q[t], nsample) #for(k in 1:nsample){ for(t in 2:(S)){ ############################################################################ #### simulating missing data ym ############################################################################ R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) R_sk = Rm - Rmo%*%RInv%*%t(Rmo) # RmoRInv = Rmo%*%RInv y_t1 = array(NA, dim=c(nsample, n[t], N)) ym_t1 = array(NA, dim=c(nsample, n.m[t], N)) # IB = match.input(input[[t]], input.miss[[t-1]])$IB # y_t1 = ym.hat[[t-1]][IB] # b = matrix(NA, q[t]+1, N) # mu_y = matrix(NA, n.m[t], N) for(k in 1:nsample){ y_t1[k, , ] = create.w(t=t, input=input, input.miss=input.miss[[t-1]], y=y[[t-1]], ym=as.matrix(ym.hat[[t-1]][k, , ])) ym_t1[k, , ] = create.w.pred(t=t, input=input[[t-1]], input.miss=input.miss, y=y[[t-1]], ym=as.matrix(ym.hat[[t-1]][k, ,])) ym.hat[[t]][k, , ] = conditional_simulation(y[[t]], H[[t]], y_t1[k, , ], RInv, Hm[[t]], ym_t1[k, , ], Rmo, R_sk) } } #} ################################################################################ krige = list() krigeSE = list() krige.lower95 = list() krige.upper95 = list() for(t in 1:S){ # yhat[[t]] = ym.hat[[t]][ ,pred.ID[[t]], ] krige[[t]] = apply(ym.hat[[t]], c(2,3), mean) krigeSE[[t]] = apply(ym.hat[[t]], c(2,3), sd) krige.lower95[[t]] = apply(ym.hat[[t]], c(2,3), quantile, 0.025) krige.upper95[[t]] = apply(ym.hat[[t]], c(2,3), quantile, 0.975) } pred.mu = list() pred.SE = list() pred.lower95 = list() pred.upper95 = list() for(t in 1:S){ pred.mu[[t]] = matrix(NA, np, N) pred.SE[[t]] = matrix(0, np, N) pred.lower95[[t]] = matrix(NA, np, N) pred.upper95[[t]] = matrix(NA, np, N) ind.list = ismember(input.new, input.miss[[t]]) pred.mu[[t]][ind.list$IIA, ] = krige[[t]][ind.list$IA, ] pred.SE[[t]][ind.list$IIA, ] = krigeSE[[t]][ind.list$IA, ] pred.lower95[[t]][ind.list$IIA, ] = krige.lower95[[t]][ind.list$IA, ] pred.upper95[[t]][ind.list$IIA, ] = krige.upper95[[t]][ind.list$IA, ] if(length(ind.list$IIA)<np){ ind.input = ismember(input.new, input[[t]]) pred.mu[[t]][ind.input$IIA, ] = y[[t]][ind.input$IA, ] pred.lower95[[t]][ind.input$IIA, ] = y[[t]][ind.input$IA, ] pred.upper95[[t]][ind.input$IIA, ] = y[[t]][ind.input$IA, ] } } names(pred.mu) = paste0("Level", seq(1:S), "") names(pred.SE) = paste0("Level", seq(1:S), "") names(pred.lower95) = paste0("Level", seq(1:S), "") names(pred.upper95) = paste0("Level", seq(1:S), "") pred = list(mu=pred.mu, SE=pred.SE, lower95=pred.lower95,upper95=pred.upper95) return(pred) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# condsim.ND = function(formula, output, input, input.new, phi, cov.model="matern_5_2", nsample=30){ Dim = dim(input[[1]])[2] N = dim(output[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } S = length(output) np = dim(input.new)[1] n = rep(NA, S) for(t in 1:S){ n[t] = dim(input[[t]])[1] } y = output ## add new inputs to missing inputs # input.miss = list() # input.union = list() # for(t in 1:S){ # input.miss[[t]] = input.new # input.union[[t]] = rbind(input.new, input[[t]]) # } # input.miss = list() # for(t in 1:S){ # input.miss[[t]] = input.new # } input.miss = list() input.union = list() pred.ID = list() ID.orig = list() index.full = 1:np indB = list() for(t in 1:(S)){ ind.list = match.input(input[[t]], input.new) # indlist = ismember(input.new, input[[t]]) if(!is.null(ind.list$IA)){ indA = ind.list$IA indB[[t]] = ind.list$IB pred.ID.exist = indA input.exist = input[[t]][indA, ,drop=FALSE] # common inputs input.added = input.new[-indB[[t]], ,drop=FALSE] # inputs in input.new but not in input[[t]] n.added = dim(input.added)[1] pred.ID.added = seq(1, n.added, by=1) ID.orig[[t]] = c(index.full[-indB[[t]]], indB[[t]]) pred.ID[[t]] = c(pred.ID.added, pred.ID.exist+n.added) input.miss[[t]] = input.added input.union[[t]] = rbind(input.added, input[[t]]) }else{ ID.orig[[t]] = 1:np pred.ID[[t]] = 1:np input.miss[[t]] = input.new input.union[[t]] = rbind(input.new, input[[t]]) } } # n.m = rep(NA, S) for(t in 1:S){ n.m[t] = dim(input.miss[[t]])[1] } ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) #if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) #} distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } ym.hat = list() q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] ym.hat[[t]] = array(NA, dim=c(nsample, dim(Hm[[t]])[1], N)) } ################################################################################# #### Sampling from predictive distribution ################################################################################# t = 1 R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) HRHInv = solve(t(H[[t]])%*%RInv%*%H[[t]]) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) # compute conditional mean RmoRInv = Rmo%*%RInv KW = (Hm[[t]]-RmoRInv%*%H[[t]]) %*% HRHInv %*% t(H[[t]]) %*% RInv + RmoRInv mu_ymy = KW %*% y[[t]] RmoU = t(backsolve(U, t(Rmo), transpose=TRUE)) # compute predictive variance XXRR = t(Hm[[t]]) - t(H[[t]])%*%RInv%*%t(Rmo) c_star = Rm - Rmo%*%RInv%*%t(Rmo) + t(XXRR)%*%HRHInv%*%XXRR ## c_star is not positive definite!!! So, there is no cholesky decomposition available # compute S2 Q = RInv - RInv%*%H[[t]]%*%HRHInv%*%t(H[[t]])%*%t(RInv) sigma2.hat = compute_Svec(y[[t]], Q) / (n[t]-q[t]) # for(j in 1:N){ # Sig = c_star * sigma2.hat[j] # ym.hat[[t]][ , ,j] = mvtnorm::rmvt(nsample, sigma=Sig, df=n[t]-q[t], delta=mu_ymy[ ,j], type="shifted") # } L = t(chol(c_star)) ym.hat[[t]] = sample_mvt(mu_ymy, L=L, sigma=sigma2.hat, df=n[t]-q[t], nsample) # ym.hatC = array(NA, dim=c(nsample, dim(Hm[[t]])[1], N)) for(t in 2:(S)){ ############################################################################ #### estimate missing data ym ############################################################################ R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) IB = match.input(input[[t]], input[[t-1]])$IB y_t1 = y[[t-1]][IB, ] # for(k in 1:nsample){ # ym_t1[k, ,] = create.w.pred(t=t, input=input[[t-1]], input.miss=input.miss, # y=y[[t-1]], ym=ym.hat[k, , ]) # } ym_t1 = array(NA, dim=c(nsample, n.m[t], N)) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) R_sk = Rm - Rmo%*%RInv%*%t(Rmo) RmoRInv = Rmo%*%RInv # b = matrix(NA, q[t]+1, N) # mu_y = matrix(NA, nrow(Hm[[t]]), N) # ts = proc.time() for(k in 1:nsample){ ym_t1[k, ,] = create.w.pred(t=t, input=input[[t-1]], input.miss=input.miss, y=y[[t-1]], ym=as.matrix(ym.hat[[t-1]][k, , ])) # ym.hatC = array(NA, dim=c(nsample, dim(Hm[[t]])[1], N)) ym.hat[[t]][k, , ] = conditional_simulation(y[[t]], H[[t]], y_t1, RInv, Hm[[t]], ym_t1[k, , ], Rmo, R_sk) # ym.hatC[[t]][k, , ] = pred.list$ym # sigma = pred.list$sigma # for(j in 1:N){ # X = cbind(H[[t]], y_t1[ ,j]) # XRXInv = solve(t(X)%*%RInv%*%X) # b[ ,j] = XRXInv%*%t(X)%*%RInv%*%y[[t]][ ,j] # Xp = cbind(Hm[[t]], ym_t1[k, ,j]) # mu_y[ ,j] = Xp%*%b[ ,j] + RmoRInv%*%(y[[t]][ ,j]-X%*%b[ ,j]) # temp = Xp - RmoRInv%*%X # c_star = R_sk + temp%*%XRXInv%*%t(temp) # ym.hat[[t]][k, ,j] = mvtnorm::rmvt(1, sigma=sigma[j]*c_star, df=n[t]-q[t]-1, # delta=mu_y[ ,j], type="shifted") # } } # te = proc.time() - ts } ## get summary statistics krige = list() krigeSE = list() krige.lower95 = list() krige.upper95 = list() for(t in 1:S){ # yhat[[t]] = ym.hat[[t]][ ,pred.ID[[t]], ] krige[[t]] = apply(ym.hat[[t]], c(2,3), mean) krigeSE[[t]] = apply(ym.hat[[t]], c(2,3), sd) krige.lower95[[t]] = apply(ym.hat[[t]], c(2,3), quantile, 0.025) krige.upper95[[t]] = apply(ym.hat[[t]], c(2,3), quantile, 0.975) } pred.mu = list() pred.SE = list() pred.lower95 = list() pred.upper95 = list() for(t in 1:S){ pred.mu[[t]] = matrix(NA, np, N) pred.SE[[t]] = matrix(0, np, N) pred.lower95[[t]] = matrix(NA, np, N) pred.upper95[[t]] = matrix(NA, np, N) ind.list = ismember(input.new, input.miss[[t]]) pred.mu[[t]][ind.list$IIA, ] = krige[[t]][ind.list$IA, ] pred.SE[[t]][ind.list$IIA, ] = krigeSE[[t]][ind.list$IA, ] pred.lower95[[t]][ind.list$IIA, ] = krige.lower95[[t]][ind.list$IA, ] pred.upper95[[t]][ind.list$IIA, ] = krige.upper95[[t]][ind.list$IA, ] if(length(ind.list$IIA)<np){ ind.input = ismember(input.new, input[[t]]) pred.mu[[t]][ind.input$IIA, ] = y[[t]][ind.input$IA, ] pred.lower95[[t]][ind.input$IIA, ] = y[[t]][ind.input$IA, ] pred.upper95[[t]][ind.input$IIA, ] = y[[t]][ind.input$IA, ] } } names(pred.mu) = paste0("Level", seq(1:S), "") names(pred.SE) = paste0("Level", seq(1:S), "") names(pred.lower95) = paste0("Level", seq(1:S), "") names(pred.upper95) = paste0("Level", seq(1:S), "") pred = list(mu=pred.mu, SE=pred.SE, lower95=pred.lower95,upper95=pred.upper95) return(pred) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# condsim.NN <- function(formula,output,input,input.new,phi,cov.model="matern_5_2", nsample=30){ Dim = dim(input[[1]])[2] N = dim(output[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } ################################################################### #### augment input ################################################################### S = length(output) # number of code out = augment.input(input) input.union = out$union input.miss = out$miss np = dim(input.new)[1] n = rep(NA, S) for(t in 1:S){ n[t] = dim(input[[t]])[1] } y = output ## add new inputs to missing inputs # for(t in 1:(S-1)){ # input.miss[[t]] = rbind(input.new, input.miss[[t]]) # } pred.ID = list() ID.org = list() index.full = 1:np for(t in 1:(S-1)){ ind.list = match.input(input.union[[t]], input.new) if(!is.null(ind.list$IA)){ indA = ind.list$IA indB = ind.list$IB pred.ID.exist = indA input.exist = input.union[[t]][indA, ,drop=FALSE] input.added = input.new[-indB, ,drop=FALSE] n.added = dim(input.added)[1] pred.ID.added = seq(1,n.added,by=1) ID.org[[t]] = c(index.full[-indB], indB) pred.ID[[t]] = c(pred.ID.added, pred.ID.exist+n.added) input.miss[[t]] = rbind(input.added, input.miss[[t]]) input.union[[t]] = rbind(input.miss[[t]], input[[t]]) }else{ ID.org[[t]] = 1:np pred.ID[[t]] = 1:np input.miss[[t]] = rbind(input.new, input.miss[[t]]) input.union[[t]] = rbind(input.new, input.union[[t]]) } } t = S ind.list = match.input(input[[t]], input.new) if(!is.null(ind.list$IA)){ indA = ind.list$IA indB = ind.list$IB pred.ID.exist = indA input.exist = input.union[[t]][indA, , drop=FALSE] input.added = input.new[-indB, , drop=FALSE] n.added = dim(input.added)[1] pred.ID.added = seq(1, n.added, by=1) ID.org[[t]] = c(index.full[-indB], indB) pred.ID[[t]] = c(pred.ID.added, pred.ID.exist+n.added) input.miss[[t]] = input.added }else{ input.miss[[t]] = input.new ID.org[[t]] = 1:np pred.ID[[t]] = 1:np } input.union[[S]] = rbind(input.new, input.union[[S]]) ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } n.m = rep(NA, S) for(t in 1:S){ n.m[t] = dim(input.miss[[t]])[1] } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) #if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) #} distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } ## krige = list() krigeSE = list() ym.hat = list() q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] ym.hat[[t]] = array(NA, dim=c(nsample, dim(Hm[[t]])[1], N)) } ################################################################################# #### get predictive mean and predictive variance ################################################################################# t = 1 R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) HRHInv = solve(t(H[[t]])%*%RInv%*%H[[t]]) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) # compute conditional mean RmoRInv = Rmo%*%RInv KW = (Hm[[t]]-RmoRInv%*%H[[t]]) %*% HRHInv %*% t(H[[t]]) %*% RInv + RmoRInv mu_ymy = KW %*% y[[t]] # n.m-by-N matrix # compute predictive variance XXRR = t(Hm[[t]]) - t(H[[t]])%*%RInv%*%t(Rmo) c_star = Rm - Rmo%*%RInv%*%t(Rmo) + t(XXRR)%*%HRHInv%*%XXRR ## c_star is not positive definite!!! So, there is no cholesky decomposition available # compute S2 Q = RInv - RInv%*%H[[t]]%*%HRHInv%*%t(H[[t]])%*%t(RInv) sigma2.hat = compute_Svec(y[[t]], Q) / (n[t]-q[t]) L = t(chol(c_star)) ym.hat[[t]] = sample_mvt(mu_ymy, L=L, sigma=sigma2.hat, df=n[t]-q[t], nsample) #for(k in 1:nsample){ for(t in 2:(S)){ ############################################################################ #### simulating missing data ym ############################################################################ R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) R_sk = Rm - Rmo%*%RInv%*%t(Rmo) # RmoRInv = Rmo%*%RInv y_t1 = array(NA, dim=c(nsample, n[t], N)) ym_t1 = array(NA, dim=c(nsample, n.m[t], N)) # IB = match.input(input[[t]], input.miss[[t-1]])$IB # y_t1 = ym.hat[[t-1]][IB] # b = matrix(NA, q[t]+1, N) # mu_y = matrix(NA, n.m[t], N) for(k in 1:nsample){ y_t1[k, , ] = create.w(t=t, input=input, input.miss=input.miss[[t-1]], y=y[[t-1]], ym=as.matrix(ym.hat[[t-1]][k, , ])) ym_t1[k, , ] = create.w.pred(t=t, input=input[[t-1]], input.miss=input.miss, y=y[[t-1]], ym=as.matrix(ym.hat[[t-1]][k, ,])) ym.hat[[t]][k, , ] = conditional_simulation(y[[t]], H[[t]], y_t1[k, , ], RInv, Hm[[t]], ym_t1[k, , ], Rmo, R_sk) } } #} ################################################################################ krige = list() krigeSE = list() krige.lower95 = list() krige.upper95 = list() for(t in 1:S){ # yhat[[t]] = ym.hat[[t]][ ,pred.ID[[t]], ] krige[[t]] = apply(ym.hat[[t]], c(2,3), mean) krigeSE[[t]] = apply(ym.hat[[t]], c(2,3), sd) krige.lower95[[t]] = apply(ym.hat[[t]], c(2,3), quantile, 0.025) krige.upper95[[t]] = apply(ym.hat[[t]], c(2,3), quantile, 0.975) } pred.mu = list() pred.SE = list() pred.lower95 = list() pred.upper95 = list() for(t in 1:S){ pred.mu[[t]] = matrix(NA, np, N) pred.SE[[t]] = matrix(0, np, N) pred.lower95[[t]] = matrix(NA, np, N) pred.upper95[[t]] = matrix(NA, np, N) ind.list = ismember(input.new, input.miss[[t]]) pred.mu[[t]][ind.list$IIA, ] = krige[[t]][ind.list$IA, ] pred.SE[[t]][ind.list$IIA, ] = krigeSE[[t]][ind.list$IA, ] pred.lower95[[t]][ind.list$IIA, ] = krige.lower95[[t]][ind.list$IA, ] pred.upper95[[t]][ind.list$IIA, ] = krige.upper95[[t]][ind.list$IA, ] if(length(ind.list$IIA)<np){ ind.input = ismember(input.new, input[[t]]) pred.mu[[t]][ind.input$IIA, ] = y[[t]][ind.input$IA, ] pred.lower95[[t]][ind.input$IIA, ] = y[[t]][ind.input$IA, ] pred.upper95[[t]][ind.input$IIA, ] = y[[t]][ind.input$IA, ] } } names(pred.mu) = paste0("Level", seq(1:S), "") names(pred.SE) = paste0("Level", seq(1:S), "") names(pred.lower95) = paste0("Level", seq(1:S), "") names(pred.upper95) = paste0("Level", seq(1:S), "") pred = list(mu=pred.mu, SE=pred.SE, lower95=pred.lower95,upper95=pred.upper95) return(pred) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# sample.ym <- function(y, input, param, Ho, Hm, dist.o, dist.m, dist.mo, cov.model="matern_5_2", nsample=30){ S = length(y) Dim = dim(dist.o[[1]])[3] N = dim(y[[1]])[2] # param contains phi and nugget (maybe) if(length(param[ ,1])==Dim){ #no nugget #phi = exp(-param) is.nugget=FALSE }else{ #phi = exp(-param[1:Dim, ,drop=FALSE]) #nugget = exp(param[Dim+1, ,drop=FALSE]) / (1 + exp(param[Dim+1, ,drop=FALSE])) is.nugget = TRUE #phi = rbind(phi, nugget) } phi = param inputlist = augment.input(input) input.miss = inputlist$miss nm = rep(NA, S-1) for(t in 1:(S-1)){ nm[t] = dim(dist.m[[t]])[1] } n = rep(NA, S) q = rep(NA, S) for(t in 1:S){ n[t] = dim(y[[t]])[1] q[t] = dim(Ho[[t]])[2] } ym = list() for(t in 1:(S-1)){ ym[[t]] = array(NA, dim=c(nsample, nm[t], N)) } names(ym) = paste0("Level", seq(1:(S-1)), "") t=1 R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) HRHInv = solve(t(Ho[[t]])%*%RInv%*%Ho[[t]]) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=is.nugget) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=FALSE) # compute conditional mean RmoRInv = Rmo%*%RInv KW = (Hm[[t]]-RmoRInv%*%Ho[[t]]) %*% HRHInv %*% t(Ho[[t]]) %*% RInv + RmoRInv mu_ymy = KW %*% y[[t]] # n.m-by-N matrix RmoU = t(backsolve(U, t(Rmo), transpose=TRUE)) # compute predictive variance XXRR = t(Hm[[t]]) - t(Ho[[t]])%*%RInv%*%t(Rmo) c_star = Rm - Rmo%*%RInv%*%t(Rmo) + t(XXRR)%*%HRHInv%*%XXRR # compute S2 Q = RInv - RInv%*%Ho[[t]]%*%HRHInv%*%t(Ho[[t]])%*%t(RInv) sigma2.hat = compute_Svec(y[[t]], Q) / (n[t]-q[t]) # for(j in 1:N){ # Sig = c_star * sigma2.hat[j] # ym[[t]][ ,j] = c(mvtnorm::rmvt(1, sigma=Sig, df=n[t]-q[t], delta=mu_ymy[ ,j], type="shifted")) # } L = t(chol(c_star)) ym[[t]] = sample_mvt(mu=mu_ymy, L=L, sigma=sigma2.hat, df=n[t]-q[t], nsample) if(S>2){ for(t in 2:(S-1)){ R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) # IB = match.input(input[[t]], input.miss[[t-1]])$IB # y_t1 = ym[[t-1]][IB] y_t1 = create.w(t=t, input=input, input.miss=input.miss[[t-1]], y=y[[t-1]], ym=ym[[t-1]]) # IB = match.input(input.miss[[t]], input.miss[[t-1]])$IB # ym_t1 = ym[[t-1]][IB] ym_t1 = create.w(t=t, input=input.miss, input.miss=input.miss[[t-1]], y=ym[[t-1]], ym=ym[[t-1]]) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=is.nugget) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=FALSE) R_sk = Rm - Rmo%*%RInv%*%t(Rmo) pred.list = conditional_simulation(y[[t]], Ho[[t]], y_t1, RInv, Hm[[t]], ym_t1, Rmo, R_sk) # sample ym at t #ym[[t]] = c(mvtnorm::rmvt(1, sigma=Sig, df=n[t]-q[t]-1, delta=mu_ymy, type="shifted")) } } return(ym) } ############################################################################# ############################################################################# ############################################################################# ############################################################################# ############################################################################# ############################################################################# ############################################################################# ############################################################################# ############################################################################# #############################################################################
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/Rfunctions.R
#' @title Conditional simulation at new inputs in the autoregressive cokriging model #' @description This function simulate from predictive distributions in #' autogressive cokriging models #' #' @param obj a \code{\link{cokm}} object construted via the function \code{\link{cokm}} in #' this package #' @param input.new a matrix including new inputs for making prediction #' @param nsample a numerical value indicating the number of samples #' @author Pulong Ma <[email protected]> #' #' @seealso \code{\link{cokm}}, \code{\link{cokm.fit}}, \code{\link{cokm.predict}}, \code{\link{ARCokrig}} #' @export #' @examples #' Funcc = function(x){ #' return(0.5*(6*x-2)^2*sin(12*x-4)+10*(x-0.5)-5) #' } #' #' Funcf = function(x){ #' z1 = Funcc(x) #' z2 = 2*z1-20*x+20 + sin(10*cos(5*x)) #' return(z2) #' } #' #' ##################################################################### #' ###### Nested design #' ##################################################################### #' Dc <- seq(-1,1,0.1) #' indDf <- c(1, 3, 6, 8, 10, 13, 17, 21) #' zc <- Funcc(Dc) #' Df <- Dc[indDf] #' zf <- Funcf(Df) #' #' input.new = as.matrix(seq(-1,1,length.out=200)) #' #' #' ## create the cokm object #' prior = list(name="Reference") #' obj = cokm(formula=list(~1,~1+x1), output=list(c(zc), c(zf)), #' input=list(as.matrix(Dc), as.matrix(Df)), #' prior=prior, cov.model="matern_5_2") #' #' ## update model parameters in the cokm object #' #' obj = cokm.fit(obj) #' #' #' cokrige = cokm.condsim(obj, input.new, nsample=30) #' #' cokm.condsim <- function(obj, input.new, nsample=30){ formula = obj@formula output = obj@output input = obj@input param = obj@param cov.model = [email protected] NestDesign = obj@NestDesign phi = do.call(cbind, param) if(!is(input.new, "matrix")){ stop("input.new should be a matrix.") } if(NestDesign){ pred.list = condsim.ND.univariate(formula=formula, output=output, input=input, input.new=input.new, phi=phi, cov.model=cov.model, nsample=nsample) }else{ pred.list = condsim.NN.univariate(formula=formula, output=output, input=input, input.new=input.new, phi=phi, cov.model=cov.model, nsample=nsample) } return(pred.list) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/cokm.condsim.R
#' @title fit the autoregressive cokriging model #' @description This function estimates parameters in #' autogressive cokriging models #' #' @param obj a \code{\link{cokm}} object construted via the function \code{\link{cokm}} in #' this package #' #' @author Pulong Ma <[email protected]> #' #' @export #' #' @seealso \code{\link{cokm}}, \code{\link{cokm.param}}, \code{\link{cokm.predict}}, \code{\link{ARCokrig}} #' #' @examples #' #' Funcc = function(x){ #' return(0.5*(6*x-2)^2*sin(12*x-4)+10*(x-0.5)-5) #' } #' #' Funcf = function(x){ #' z1 = Funcc(x) #' z2 = 2*z1-20*x+20 + sin(10*cos(5*x)) #' return(z2) #' } #' #' ##################################################################### #' ###### Nested design #' ##################################################################### #' Dc <- seq(-1,1,0.1) #' indDf <- c(1, 3, 6, 8, 10, 13, 17, 21) #' zc <- Funcc(Dc) #' Df <- Dc[indDf] #' zf <- Funcf(Df) #' #' input.new = as.matrix(seq(-1,1,length.out=200)) #' #' #' ## create the cokm object #' prior = list(name="JR") #' obj = cokm(formula=list(~1,~1+x1), output=list(c(zc), c(zf)), #' input=list(as.matrix(Dc), as.matrix(Df)), #' prior=prior, cov.model="matern_5_2") #' #' ## update model parameters in the cokm object #' #' obj = cokm.fit(obj) #' #' cokm.fit <- function(obj){ formula = obj@formula output = obj@output input = obj@input param = obj@param cov.model = [email protected] prior = obj@prior hyperparam = prior$hyperparam opt = obj@opt NestDesign = obj@NestDesign phi = do.call(cbind, param) Dim = dim(input[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } S = length(output) ################################################################### #### create covariates ################################################################### H = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) } Cl = list() for(t in 1:S){ input.max = apply(input[[t]], 2, max) input.min = apply(input[[t]], 2, min) Cl[[t]] = abs(input.max-input.min) } for(t in 1:S){ hyperparam[[t]]$Cl = Cl[[t]] } ################################################################### #### begin optimization algorithm for Nested Design ################################################################### if(NestDesign){ ################################################################### #### compute intermediate quantities ################################################################### distlist = list() for(t in 1:S){ distlist[[t]] = compute_distance(input[[t]], input[[t]]) } phi.new = phi for(t in 1:S){ if(is.nugget){ nu = log(phi[p.x+1, t]) - log(hyperparam[[t]]$nugget.UB-phi[p.x+1, t]) # logit of nugget init.val = c(-log(phi[1:p.x, t]), nu) }else{ init.val = -log(phi[ ,t]) } fit = try(optim(init.val, margin.posterior, input=input, output=output, level=t, H=H, dist=distlist[[t]], cov.model=cov.model, prior=prior$name, hyperparam=hyperparam[[t]], control=list(fnscale=-1, maxit=opt$maxit), method=opt$method, lower=opt$lower, upper=opt$upper), silent=T) if(inherits(fit, "try-error")){ phi.new[ ,t] = phi[ ,t] print(paste0("optimization error, skip t=", as.character(t))) print(fit) }else{ if(is.nugget){ nugget = hyperparam[[t]]$nugget.UB*exp(fit$par[p.x+1]) / (1+exp(fit$par[p.x+1])) phi.new[ ,t] = c(exp(-fit$par[1:p.x]), nugget) }else{ phi.new[ ,t] = exp(-fit$par) } } } }else{ for(t in 1:S){ if(!is.matrix(output[[t]])){ output[[t]] = as.matrix(output[[t]]) } } ################################################################### #### augment input ################################################################### out = augment.input(input) input.union = out$union input.miss = out$miss input.list = list(input=input, input.miss=input.miss) ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) if(t<S){ colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) } distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } tuning = obj@tuning n.sample = tuning$n.sample maxit = tuning$maxit tol = tuning$tol verbose = tuning$verbose ################################################################### #### begin MCEM algorithm for Non-Nested Design ################################################################### phi.new = phi conv = FALSE iter = 1 while(!conv){ ############################################################### #### generate M Monte Carlo samples for missing data ############################################################### # y.m = list() #system.time( # for(k in 1:n.sample){ # y.m[[k]] = sample.ym(y=output,input=input,param=phi,Ho=H,Hm=Hm,dist.o=dist.o, # dist.m=dist.m,dist.mo=dist.mo,cov.model=cov.model) # } y.m = sample.ym(y=output,input=input,param=phi,Ho=H,Hm=Hm,dist.o=dist.o, dist.m=dist.m,dist.mo=dist.mo,cov.model=cov.model, nsample=n.sample) #) ############################################################### #### compute and maximize the Q function at each fidelity ############################################################### for(t in 1:S){ if(is.nugget){ nu = log(phi[p.x+1, t]) - log(hyperparam[[t]]$nugget.UB-phi[p.x+1, t]) # logit of nugget init.val = c(-log(phi[1:p.x, t]), nu) }else{ init.val = -log(phi[ ,t]) } fit = try(optim(init.val, compute.g.univ, input.list=input.list, level=t, y=output, H=H, ym=y.m, Hm=Hm, dist=distlist[[t]], hyper=hyperparam[[t]], cov.model=cov.model, control=list(fnscale=-1, maxit=opt$maxit), method=opt$method, lower=opt$lower, upper=opt$upper), silent=T) # fit = try(optimr(init.val, compute.Q.default, input=input, level=t, y=output, H=H, y.m=y.m, Hm=Hm, # distlist=distlist, cov.model=cov.model, # control=list(fnscale=-1, maxit=opt$maxit), # method=opt$method, lower=opt$lower, upper=opt$upper), # silent=T) if(inherits(fit, "try-error")){ phi.new[ ,t] = phi[ ,t] print(paste0("optimization error, skip t=", as.character(t))) print(fit) }else{ if(is.nugget){ nugget = hyperparam[[t]]$nugget.UB*exp(fit$par[p.x+1]) / (1+exp(fit$par[p.x+1])) phi.new[ ,t] = c(exp(-fit$par[1:p.x]), nugget) }else{ phi.new[ ,t] = exp(-fit$par) } } } ############################################################### #### check convergence if(inherits(fit, "try-error")){ diff = tol + 1 }else{ diff = mean((phi.new - phi)^2) } if(verbose){ print(paste0("iter=", as.character(iter))) } iter = iter + 1 if(iter>maxit){ conv = TRUE }else{ if(diff<tol){ conv = TRUE } } phi = phi.new } obj@info=list(iter=iter, eps=diff) } colnames(phi.new) = paste0("Level", seq(1:S), "") phi.new = split(phi.new, col(phi.new, as.factor = TRUE)) obj@param = phi.new return(obj) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/cokm.fit.R
#' @title Get model parameters in the autoregressive cokriging model #' @description This function compute estimates for regression and variance #' parameters given the correlation parameters are known. It is used to show #' all model parameters in one place. #' @param obj a \code{\link{cokm}} object construted via the function \code{\link{cokm}} in #' this package #' #' @return a list of model parameters including regression coefficients \eqn{\beta}, #' scale discrepancy \eqn{\gamma}, variance parameters #' \eqn{\sigma^2}, and correlation parameters \eqn{\phi} in covariance functions. #' If nugget parameters are included in the model, then nugget parameters are shown in \eqn{\phi}. #' #' @author Pulong Ma <[email protected]> #' #' @export #' #' @seealso \code{\link{cokm}}, \code{\link{cokm.fit}}, \code{\link{cokm.condsim}}, \code{\link{ARCokrig}} cokm.param <- function(obj){ formula = obj@formula output = obj@output input = obj@input param = obj@param cov.model = [email protected] phi = do.call(cbind, param) Dim = dim(input[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } S = length(output) y = output ################################################################### #### create covariates ################################################################### H = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) } ################################################################### #### compute intermediate quantities ################################################################### distlist = list() for(t in 1:S){ distlist[[t]] = compute_distance(input[[t]], input[[t]]) } ################################################################### #### get b and sigma ################################################################### X = list() b = list() sigma = rep(NA, S) for(t in 1:S){ R = buildcov(phi[ ,t], distlist[[t]], cov.model, is.nugget) n = dim(R)[1] U = chol(R) RInv = chol2inv(R) if(t==1){ X[[t]] = H[[t]] }else{ IB = match.input(input[[t]], input[[t-1]])$IB y_t1 = y[[t-1]][IB] X[[t]] = cbind(H[[t]], y_t1) } q = dim(X[[t]])[2] RInvX = RInv%*%X[[t]] XRXInv = solve(t(X[[t]])%*%RInvX) b[[t]] = c(XRXInv%*%(t(X[[t]])%*%(RInv%*%y[[t]]))) Q = RInv - RInvX%*%XRXInv%*%t(RInvX) sigma[t] = t(y[[t]])%*%Q%*%y[[t]] / (n-q+2) } names(b) = paste0("Level", seq(1:S), "") names(sigma) = paste0("Level", seq(1:S), "") out = list(corr=param, coef=b, var=sigma) return(out) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/cokm.param.R
#' @title Prediction at new inputs in the autoregressive cokriging model #' @description This function makes prediction in #' autogressive cokriging models. If a nested design is used, the predictive mean and predictive variance are #' computed exactly; otherwise, Monte Carlo simulation from the predictive distribution is used to approximate #' the predictive mean and predictive variance. #' #' @param obj a \code{\link{cokm}} object construted via the function \code{\link{cokm}} in #' this package #' @param input.new a matrix including new inputs for making prediction #' @author Pulong Ma <[email protected]> #' #' @export #' #' @seealso \code{\link{cokm}}, \code{\link{cokm.fit}}, \code{\link{cokm.condsim}}, \code{\link{ARCokrig}} #' #' @examples #' Funcc = function(x){ #' return(0.5*(6*x-2)^2*sin(12*x-4)+10*(x-0.5)-5) #' } #' #' Funcf = function(x){ #' z1 = Funcc(x) #' z2 = 2*z1-20*x+20 + sin(10*cos(5*x)) #' return(z2) #' } #' #' ##################################################################### #' ###### Nested design #' ##################################################################### #' Dc <- seq(-1,1,0.1) #' indDf <- c(1, 3, 6, 8, 10, 13, 17, 21) #' zc <- Funcc(Dc) #' Df <- Dc[indDf] #' zf <- Funcf(Df) #' #' input.new = as.matrix(seq(-1,1,length.out=200)) #' #' #' ## create the cokm object #' prior = list(name="Reference") #' obj = cokm(formula=list(~1,~1+x1), output=list(c(zc), c(zf)), #' input=list(as.matrix(Dc), as.matrix(Df)), #' prior=prior, cov.model="matern_5_2") #' #' ## update model parameters in the cokm object #' #' obj = cokm.fit(obj) #' #' cokrige = cokm.predict(obj, input.new) #' #' cokm.predict <- function(obj, input.new){ formula = obj@formula output = obj@output input = obj@input param = obj@param cov.model = [email protected] nugget.est = [email protected] NestDesign = obj@NestDesign phi = do.call(cbind, param) if(!is.matrix(input.new)){ stop("input.new should be a matrix.") } if(NestDesign){ Dim = dim(input[[1]])[2] p.x = Dim is.nugget = nugget.est S = length(output) n = sapply(output, length) np = dim(input.new)[1] y = output ## add new inputs to missing inputs input.miss = list() input.union = list() for(t in 1:S){ input.miss[[t]] = input.new input.union[[t]] = rbind(input.new, input[[t]]) } ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) #if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) #} distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } ym.hat = list() q = rep(NA, S) for(t in 1:S){ q[t] = dim(H[[t]])[2] } krige = list() krige.var = list() ################################################################################# #### Get predictive mean and predictive variance ################################################################################# t = 1 R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) #UH = backsolve(U, H[[t]], transpose=TRUE) #HRHInv = solve(crossprod(UH)) HRHInv = solve(t(H[[t]])%*%RInv%*%H[[t]]) betahat = HRHInv%*%t(H[[t]])%*%(RInv%*%y[[t]]) res = y[[t]] - H[[t]]%*%betahat SSE = c(t(res)%*%RInv%*%res) / (n[t]-q[t]) # \hat{sigma}^2 Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) #RmoU = t(backsolve(U, t(Rmo), transpose=TRUE)) XXRR = t(Hm[[t]]) - t(H[[t]])%*%RInv%*%t(Rmo) #XXRR = t(Hm[[t]]) - t(RmoU%*%UH) Sig_ymymy = Rm - Rmo%*%RInv%*%t(Rmo) + t(XXRR)%*%HRHInv%*%XXRR #Sig_ymymy = Rm - tcrossprod(RmoU) + t(XXRR)%*%HRHInv%*%XXRR Sig = Sig_ymymy * SSE #Sig = Sig_ymymy * SSE/(n[t]-q[t]) # mu_ymy = Hm[[t]]%*%betahat + Rmo%*%(RInv%*%res) krige[[t]] = c(Hm[[t]]%*%betahat + Rmo%*%(RInv%*%res)) const = (n[t]-q[t])/(n[t]-q[t]-2) #krigeSE[[t]] = sqrt(diag(Sig)[pred.ID[[t]]]) krige.var[[t]] = const*diag(Sig) krige.var[[t]][krige.var[[t]]<0] = 0 #for(k in 1:nsample){ for(t in 2:(S)){ ############################################################################ #### estimate missing data ym ############################################################################ R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) IB = match.input(input[[t]], input[[t-1]])$IB y_t1 = y[[t-1]][IB] IB = match.input(input.miss[[t]], input.miss[[t-1]])$IB ym_t1 = as.matrix(krige[[t-1]][IB]) X = cbind(H[[t]], y_t1) XRXInv = solve(t(X)%*%RInv%*%X) betahat = XRXInv%*%t(X)%*%(RInv%*%y[[t]]) gamma = betahat[q[t]+1] res = y[[t]] - X%*%betahat SSE = c(t(res)%*%RInv%*%res) / (n[t]-q[t]-1) Xm = cbind(Hm[[t]], ym_t1) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=FALSE) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=is.nugget) RInvH = RInv %*% H[[t]] HRHInv = solve(t(H[[t]])%*%RInvH) Q = RInv - RInvH %*% HRHInv %*% t(RInvH) XXRR = t(Xm) - t(X)%*%RInv%*%t(Rmo) #RmoU = t(backsolve(U, t(Rmo), transpose=TRUE)) Sig_ymymy = diag(Rm - Rmo%*%RInv%*%t(Rmo) + t(XXRR)%*%XRXInv%*%XXRR) + krige.var[[t-1]] / c(crossprod(y_t1, Q %*% y_t1)) #Sig_ymymy = Rm - tcrossprod(RmoU) + t(XXRR)%*%XRXInv%*%XXRR const = (n[t]-q[t]-1) / (n[t]-q[t]-3) Sig = gamma^2*krige.var[[t-1]] + const*Sig_ymymy * SSE #Sig = Sig_ymymy * SSE/(n[t]) krige[[t]] = c(Xm%*%betahat + Rmo%*%(RInv%*%res)) krige.var[[t]] = Sig krige.var[[t]][krige.var[[t]]<0] = 0 } #} ################################################################################ # for(t in 1:(S-1)){ # ind = sort(ID.org[[t]], index.return=TRUE)$ix # krige[[t]][ind] = krige[[t]] # krigeSE[[t]][ind] = krigeSE[[t]] # } confint = list() krigeSE = list() lower95 = list() upper95 = list() for(t in 1:S){ krigeSE[[t]] = sqrt(krige.var[[t]]) # degree = ifelse(t==1, n[t]-q[t], n[t]-q[t]-1) # confint[[t]] = cbind(krige[[t]] + qt(0.025, df=degree)*krigeSE[[t]], # krige[[t]] + qt(0.975, df=degree)*krigeSE[[t]]) lower95[[t]]=krige[[t]] - 2*krigeSE[[t]] upper95[[t]]=krige[[t]] + 2*krigeSE[[t]] } names(krige) = paste0("Level", seq(1:S), "") names(krigeSE) = paste0("Level", seq(1:S), "") names(lower95) = paste0("Level", seq(1:S), "") names(upper95) = paste0("Level", seq(1:S), "") out = list(mu=krige, SE=krigeSE, lower95=lower95, upper95=upper95) }else{ nsample = obj@tuning$n.sample out = condsim.NN.univariate(formula=formula, output=output, input=input, input.new=input.new, phi=phi, cov.model=cov.model, nsample=nsample) } return(out) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/cokm.predict.R
#' @title Construct the cokm object #' @description This function constructs the cokm object in #' autogressive cokriging models #' #' @param formula a list of \eqn{s} elements, each of which contains the formula to specify fixed basis functions or regressors. #' @param output a list of \eqn{s} elements, each of which contains a matrix of computer model outputs. #' @param input a list of \eqn{s} elements, each of which contains a matrix of inputs. #' @param cov.model a string indicating the type of covariance #' function in AR-cokriging models. Current covariance functions include #' \describe{ #' \item{exp}{product form of exponential covariance functions.} #' \item{matern_3_2}{product form of Matern covariance functions with #' smoothness parameter 3/2.} #' \item{matern_5_2}{product form of Matern covariance functions with #' smoothness parameter 5/2.} #' \item{Gaussian}{product form of Gaussian covariance functions.} #' \item{powexp}{product form of power-exponential covariance functions with roughness parameter fixed at 1.9.} #' } #' #' @param nugget.est a logical value indicating whether the nugget is included or not. Default value is \code{FALSE}. #' @param prior a list of arguments to setup the prior distributions with the reference prior as default. #' \describe{ #' \item{name}{the name of the prior. Current implementation includes #' \code{JR}, \code{Reference}, \code{Jeffreys}, \code{Ind_Jeffreys}} #' \item{hyperparam}{hyperparameters in the priors. #' For jointly robust (JR) prior, three parameters are included: #' \eqn{a} refers to the polynomial penalty to avoid singular correlation #' matrix with a default value 0.2; \eqn{b} refers to the exponenetial penalty to avoid #' diagonal correlation matrix with a default value 1; nugget.UB is the upper #' bound of the nugget variance with default value 1, which indicates that the #' nugget variance has support (0, 1).} #' #'} #' #' @param opt a list of arguments to setup the \code{\link{optim}} routine. #' @param NestDesign a logical value indicating whether the #' experimental design is hierarchically nested within each level #' of the code. #' @param tuning a list of arguments to control the MCEM algorithm for non-nested #' design. It includes the arguments #' \describe{ #' \item{maxit}{the maximum number of MCEM iterations.} #' \item{tol}{a tolerance to stop the MCEM algorithm. If the parameter #' difference between any two consecutive MCEM algorithm is less than #' this tolerance, the MCEM algorithm is stopped.} #' \item{n.sample}{the number of Monte Carlo samples in the #' MCEM algorithm.} #' \item{verbose}{a logical value to show the MCEM iterations if it is true.} #'} #' #' #' @param info a list that contains #' \describe{ #' \item{iter}{number of iterations used in the MCEM algorithm} #' \item{eps}{parameter difference after the MCEM algorithm stops} #'} #' @author Pulong Ma <[email protected]> #' #' @seealso \code{\link{ARCokrig}}, \code{\link{cokm.fit}}, \code{\link{cokm.predict}} #' @export cokm <- function(formula=list(~1,~1), output, input, cov.model="matern_5_2", nugget.est=FALSE, prior=list(), opt=list(), NestDesign=TRUE, tuning=list(), info=list()){ ## check the arguments .check.arg.cokm(formula=formula, output=output, input=input, prior=prior, opt=opt, NestDesign=NestDesign, tuning=tuning, info=info) S = length(output) # number of code levels Dim = dim(input[[1]])[2] # if(length(param[[1]])==Dim){ # is.nugget=FALSE # }else{ # is.nugget=TRUE # } is.nugget = nugget.est param = list() for(i in 1:S){ param.max = apply(input[[i]], 2, max) param.min = apply(input[[i]], 2, min) param[[i]] = (param.max-param.min)/2 } phi = do.call(cbind, param) if(length(opt)==0){ opt$maxit = 1000 if(dim(phi)[1]==1){ opt$method = "Brent" opt$lower = -10 opt$upper = 20 }else{ opt$method = "Nelder-Mead" opt$lower = -Inf opt$upper = Inf } }else{ if(dim(phi)[1]==1){ if(!exists("method", where=opt)){ opt$method = "Brent" } if(!exists("lower", where=opt)){ opt$lower = -10 } if(!exists("upper", where=opt)){ opt$upper = 20 } }else{ opt$method = "Nelder-Mead" opt$lower = -Inf opt$upper = Inf } } if(NestDesign){ #cat("\n Constructing cokm object for nested design.\n\n") tuning = list() }else{ #cat("\n Constructing cokm object for non-nested design.\n\n") if(!exists("maxit", where=tuning)){ tuning$maxit = 30 } if(!exists("tol", where=tuning)){ tuning$tol = 1e-3 } if(!exists("n.sample", where=tuning)){ tuning$n.sample = 30 } if(!exists("verbose", where=tuning)){ tuning$verbose = TRUE } } if(!exists("name", where=prior)){ prior$name = "Reference" } if(!exists("hyperparam", where=prior)){ prior$hyperparam = list() for(i in 1:S){ prior$hyperparam[[i]] = list(a=0.2, b=1, nugget.UB=1) } }else{ if(length(prior$hyperparam)==1){ for(i in 2:S){ hyperparam[[i]] = hyperparam[[1]] } prior$hyperparam = hyperparam } } ## construct the cokm object new("cokm", formula = formula, output = output, input = input, param = param, cov.model = cov.model, nugget.est = is.nugget, prior = prior, opt = opt, NestDesign = NestDesign, tuning = tuning, info = info ) } ##################################################################### ##################################################################### ##################################################################### ##################################################################### .check.arg.cokm <- function(formula, output, input, prior, opt, NestDesign, tuning, info){ if(!is(formula, "list")){ stop("\n\n formula should be a list contaning the regressors at each code level.\n\n") } if(!is(output, "list")){ stop("\n\noutput should be a list of responses. Each element in a list should contain output from a code level. The first level should contain output from the code with the lowest fidelity.\n\n") } s = length(output) if(!is(input, "list")){ stop("\n\ninput should be a list of inputs in computer models.\n\n") } for(t in 1:s){ if(!is(input[[t]], "matrix")){ message("\n\n coerce input to a matrix format.\n\n") input[[t]] = as.matrix(input[[t]]) } } # if(!is(param, "list")){ # stop("\n\nparam should be a list with each element containing initial values for # correlation parameters and nugget variance parameter (if needed).\n\n") # } if(!is(prior, "list")){ stop("\n\nprior should be a list containing arguments to setup the prior distributions.\n\n") } if(!is(opt, "list")){ stop("\n\nopt should be a list with each element containing optimization arguments at each code level.\n\n") } if(!is(NestDesign, "logical")){ stop("NestDesign should be a logical value indicating whether the design is hierarchically nested.") } if(!is(tuning, "list")){ stop("\n\n tuning should be a list containing tuning parameters to setup the MCEM algorithm in non-nested design.\n") } } setMethod("summary", signature(object="cokm"), function(object, ...){ message("cokm object\n") message("\n") message(paste0("Code levels:", length(object@data))) message("\n") message(paste0("Is nugget included:", [email protected])) message("\n") message(paste0("Nested Design:", object@NestDesign)) message("\n\n") })
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/cokmUtils.R
#' @title Conditional simulation at new inputs in autoregressive cokriging models for multivarite output #' @description This function makes prediction based on conditional simulation in #' autogressive cokriging models for multivariate output #' #' @param obj a \code{\link{mvcokm}} object construted via the function \code{\link{mvcokm}} in #' this package #' @param input.new a matrix including new inputs for making prediction #' @param nsample a numerical value indicating the number of samples #' @author Pulong Ma <[email protected]> #' #' @export #' #' @seealso \code{\link{mvcokm}}, \code{\link{mvcokm.fit}}, \code{\link{mvcokm.predict}}, \code{\link{ARCokrig}} #' mvcokm.condsim <- function(obj, input.new, nsample=30){ formula = obj@formula output = obj@output input = obj@input param = obj@param cov.model = [email protected] NestDesign = obj@NestDesign phi = do.call(cbind, param) if(!is.matrix(input.new)){ stop("input.new should be a matrix.") } if(NestDesign){ pred.list = condsim.ND(formula=formula, output=output, input=input, input.new=input.new, phi=phi, cov.model=cov.model, nsample=nsample) }else{ pred.list = condsim.NN(formula=formula, output=output, input=input, input.new=input.new, phi=phi, cov.model=cov.model, nsample=nsample) } return(pred.list) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/mvcokm.condsim.R
#' @title fit the autoregressive cokriging model for multivariate output #' @description This function estimates parameters in #' the parallel partial cokriging model #' #' @param obj a \code{\link{mvcokm}} object construted via the function \code{\link{mvcokm}} in #' this package #' #' @author Pulong Ma <[email protected]> #' #' @export #' @seealso \code{\link{mvcokm}}, \code{\link{mvcokm.predict}}, \code{\link{mvcokm.condsim}}, \code{\link{ARCokrig}} mvcokm.fit = function(obj){ formula = obj@formula output = obj@output input = obj@input param = obj@param cov.model = [email protected] nugget.est = [email protected] prior = obj@prior NestDesign = obj@NestDesign opt = obj@opt tuning = obj@tuning hyperparam = prior$hyperparam phi = do.call(cbind, param) Dim = dim(input[[1]])[2] p.x = Dim # if(dim(phi)[1]==Dim){ # is.nugget=FALSE # }else{ # is.nugget=TRUE # } is.nugget = nugget.est ################################################################### #### begin parameter esimation ################################################################### if(NestDesign){ fit = fit.ND(formula=formula, output=output, input=input, phi=phi, cov.model=cov.model, prior=prior, opt=opt) phi.new = fit$par phi.new = split(phi.new, col(phi.new, as.factor = TRUE)) }else{ fit = fit.NN(formula=formula, output=output, input=input, phi=phi, cov.model=cov.model, prior=prior, opt=opt, MCEM=tuning) phi.new = fit$par phi.new = split(phi.new, col(phi.new, as.factor = TRUE)) obj@info = list(iter=fit$iter, eps=fit$eps) } # colnames(phi.new) = paste0("Level", seq(1:S), "") obj@param = phi.new return(obj) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/mvcokm.fit.R
#' @title Get model parameters in autoregressive cokriging models for multivarite output #' @description This function computes estimates for regression and variance parameters #' given the correlation parameters are known. It is used to show all model #' parameters in one place. #' #' @param obj a \code{\link{mvcokm}} object construted via the function \code{\link{mvcokm}} in #' this package #' @return a list of model parameters including regression coefficients \eqn{\beta}, #' scale discrepancy \eqn{\gamma}, variance parameters #' \eqn{\sigma^2}, and correlation parameters \eqn{\phi} in covariance functions. #' If nugget parameters are included in the model, then nugget parameters are shown in \eqn{\phi}. #' #' @author Pulong Ma <[email protected]> #' #' @export #' #' @seealso \code{\link{mvcokm}}, \code{\link{mvcokm.fit}}, \code{\link{mvcokm.predict}}, \code{\link{ARCokrig}} #' mvcokm.param <- function(obj){ formula = obj@formula output = obj@output input = obj@input param = obj@param cov.model = [email protected] NestDesign = obj@NestDesign phi = do.call(cbind, param) Dim = dim(input[[1]])[2] p.x = Dim if(dim(phi)[1]==Dim){ is.nugget=FALSE }else{ is.nugget=TRUE } ################################################################### #### augment input ################################################################### S = length(output) # number of code out = augment.input(input) input.union = out$union input.miss = out$miss input.list = list(input=input, input.miss=input.miss) Cl = list() for(t in 1:S){ input.max = apply(input.list$input[[t]], 2, max) input.min = apply(input.list$input[[t]], 2, min) Cl[[t]] = abs(input.max-input.min) } y = output ################################################################### #### create covariates ################################################################### H = list() Hm = list() for(t in 1:S){ colnames(input[[t]]) = paste0("x", 1:p.x) df = data.frame(input[[t]]) H[[t]] = model.matrix(formula[[t]], df) if(t<S){ colnames(input.miss[[t]]) = paste0("x", 1:p.x) df = data.frame(input.miss[[t]]) Hm[[t]] = model.matrix(formula[[t]], df) } } ################################################################### #### compute intermediate quantities ################################################################### dist.o = list() dist.m = list() dist.mo = list() distlist = list() for(t in 1:S){ dist.o[[t]] = compute_distance(input[[t]], input[[t]]) if(t<S){ dist.m[[t]] = compute_distance(input.miss[[t]], input.miss[[t]]) dist.mo[[t]] = compute_distance(input.miss[[t]], input[[t]]) } distlist[[t]] = compute_distance(input.union[[t]], input.union[[t]]) } n.aug = rep(NA, S) q = rep(NA, S) for(t in 1:S){ n.aug[t] = dim(y[[t]])[1] q[t] = dim(H[[t]])[2] } if(NestDesign){ stop("Not implemented yet.") }else{ ################################################################### #### estimate sigma and beta with marginal posteriors ################################################################### sigma2.hat = list() beta.hat = list() ym.hat = list() ### estimate ym based on [ym|y,phi] for t=1 t=1 R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) HRHInv = solve(t(H[[t]])%*%RInv%*%H[[t]]) Rm = buildcov(phi[ ,t], dist.m[[t]], covmodel=cov.model, nugget=is.nugget) Rmo = buildcov(phi[ ,t], dist.mo[[t]], covmodel=cov.model, nugget=FALSE) # compute conditional mean RmoRInv = Rmo%*%RInv KW = (Hm[[t]]-RmoRInv%*%H[[t]]) %*% HRHInv %*% t(H[[t]]) %*% RInv + RmoRInv ym.hat[[t]] = KW %*% y[[t]] # n.m-by-N matrix ### estimate sigma2 based on [sigma2|ym,y,phi] for t=1 Q = RInv - RInv%*%H[[t]]%*%HRHInv%*%t(H[[t]])%*%RInv sigma2.hat[[t]] = compute_Svec(output=y[[t]], Q=Q) / (n.aug[t]-q[t]+2) ### estimate beta based on [beta|ym,y,phi] for t=1 beta.hat[[t]] = HRHInv%*%t(H[[t]])%*%RInv%*%y[[t]] ### estimate sigma and beta for t>1 for(t in 2:S){ R = buildcov(phi[ ,t], dist.o[[t]], covmodel=cov.model, nugget=is.nugget) U = chol(R) RInv = chol2inv(U) y_t1 = create.w(t=t, input=input, input.miss=input.miss[[t-1]], y=y[[t-1]], ym=ym.hat[[t-1]]) out = compute_param(y_t=y[[t]], Ht=H[[t]], y_t1=y_t1, RInv) sigma2.hat[[t]] = out$sigma2 beta.hat[[t]] = out$beta } names(beta.hat) = paste0("Level", seq(1:S), "") names(sigma2.hat) = paste0("Level", seq(1:S), "") } return(list(corr=param, coeff=beta.hat, var=sigma2.hat)) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/mvcokm.param.R
#' @title Prediction at new inputs in autoregressive cokriging models for multivarite output #' @description This function makes prediction in #' the parallel partial cokriging model. If a nested design is used, the predictive mean and predictive variance are #' computed exactly; otherwise, Monte Carlo simulation from the predictive distribution is used to approximate #' the predictive mean and predictive variance. #' @param obj a \code{\link{mvcokm}} object construted via the function \code{\link{mvcokm}} in #' this package #' @param input.new a matrix including new inputs for making prediction #' @author Pulong Ma <[email protected]> #' #' @export #' #' @seealso \code{\link{mvcokm}}, \code{\link{mvcokm.fit}}, \code{\link{mvcokm.condsim}}, \code{\link{ARCokrig}} #' mvcokm.predict <- function(obj, input.new){ formula = obj@formula output = obj@output input = obj@input param = obj@param cov.model = [email protected] NestDesign = obj@NestDesign phi = do.call(cbind, param) if(!is.matrix(input.new)){ stop("input.new should be a matrix.") } if(NestDesign){ pred.list = predict.ND(formula=formula, output=output, input=input, input.new=input.new, phi=phi, cov.model=cov.model) }else{ n.sample = obj@tuning$n.sample pred.list = predict.NN(formula=formula, output=output, input=input, input.new=input.new, phi=phi, cov.model=cov.model, nsample=n.sample) } return(pred.list) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/mvcokm.predict.R
#' @title Construct the mvcokm object #' @description This function constructs the mvcokm object in #' autogressive cokriging models for multivariate outputs. The model is known as the parallel partial (PP) cokriging emulator. #' #' @param formula a list of \eqn{s} elements, each of which contains the formula to specify fixed basis functions or regressors. #' @param output a list of \eqn{s} elements, each of which contains a matrix of computer model outputs. #' @param input a list of \eqn{s} elements, each of which contains a matrix of inputs. #' @param cov.model a string indicating the type of covariance #' function in the PP cokriging models. Current covariance functions include #' \describe{ #' \item{exp}{product form of exponential covariance functions.} #' \item{matern_3_2}{product form of Matern covariance functions with #' smoothness parameter 3/2.} #' \item{matern_5_2}{product form of Matern covariance functions with #' smoothness parameter 5/2.} #' \item{Gaussian}{product form of Gaussian covariance functions.} #' \item{powexp}{product form of power-exponential covariance functions with roughness parameter fixed at 1.9.} #' } #' #' @param nugget.est a logical value indicating whether the nugget is included or not. Default value is \code{FALSE}. #' @param prior a list of arguments to setup the prior distributions with the jointly robust prior as default #' \describe{ #' \item{name}{the name of the prior. Current implementation includes #' \code{JR}, \code{Reference}, \code{Jeffreys}, \code{Ind_Jeffreys}} #' \item{hyperparam}{hyperparameters in the priors. #' For jointly robust (JR) prior, three parameters are included: #' \eqn{a} refers to the polynomial penalty to avoid singular correlation #' matrix with a default value 0.2; \eqn{b} refers to the exponenetial penalty to avoid #' diagonal correlation matrix with a default value 1; nugget.UB is the upper #' bound of the nugget variance with default value 1, which indicates that the #' nugget variance has support (0, 1).} #' #'} #' #' @param opt a list of arguments to setup the \code{\link{optim}} routine. #' @param NestDesign a logical value indicating whether the #' experimental design is hierarchically nested within each level #' of the code. #' #' @param tuning a list of arguments to control the MCEM algorithm for non-nested #' design. It includes the arguments #' \describe{ #' \item{maxit}{the maximum number of MCEM iterations.} #' \item{tol}{a tolerance to stop the MCEM algorithm. If the parameter #' difference between any two consecutive MCEM algorithm is less than #' this tolerance, the MCEM algorithm is stopped.} #' \item{n.sample}{the number of Monte Carlo samples in the #' MCEM algorithm.} #' \item{verbose}{a logical value to show the MCEM iterations if it is true.} #'} #' #' #' @param info a list that contains #' \describe{ #' \item{iter}{number of iterations used in the MCEM algorithm} #' \item{eps}{parameter difference after the MCEM algorithm stops} #'} #' #' @author Pulong Ma <[email protected]> #' #' @seealso \code{\link{ARCokrig}}, \code{\link{mvcokm.fit}}, \code{\link{mvcokm.predict}}, \code{\link{mvcokm.condsim}} #' @export mvcokm <- function(formula=list(~1,~1), output, input, cov.model="matern_5_2", nugget.est=FALSE, prior=list(), opt=list(), NestDesign=TRUE, tuning=list(), info=list()){ ## check the arguments .check.arg.mvcokm(formula=formula, output=output, input=input, prior=prior, opt=opt, NestDesign=NestDesign, tuning=tuning, info=info) S = length(output) # number of code levels Dim = dim(input[[1]])[2] # if(length(param[[1]])==Dim){ # is.nugget=FALSE # }else{ # is.nugget=TRUE # } is.nugget = nugget.est param = list() for(i in 1:S){ param.max = apply(input[[i]], 2, max) param.min = apply(input[[i]], 2, min) param[[i]] = (param.max-param.min)/2 } phi = do.call(cbind, param) if(length(opt)==0){ opt$maxit = 600 if(dim(phi)[1]==1){ opt$method = "Brent" opt$lower = -10 opt$upper = 20 }else{ opt$method = "Nelder-Mead" opt$lower = -Inf opt$upper = Inf } }else{ if(dim(phi)[1]==1){ if(!exists("method", where=opt)){ opt$method = "Brent" } if(!exists("lower", where=opt)){ opt$lower = -10 } if(!exists("upper", where=opt)){ opt$upper = 20 } }else{ opt$method = "Nelder-Mead" opt$lower = -Inf opt$upper = Inf } } if(NestDesign){ tuning = list() }else{ if(!exists("maxit", where=tuning)){ tuning$maxit = 30 } if(!exists("tol", where=tuning)){ tuning$tol = 1e-3 } if(!exists("n.sample", where=tuning)){ tuning$n.sample = 30 } if(!exists("verbose", where=tuning)){ tuning$verbose = TRUE } } if(!exists("name", where=prior)){ prior$name = "JR" } if(!exists("hyperparam", where=prior)){ prior$hyperparam = list() for(i in 1:S){ prior$hyperparam[[i]] = list(a=0.2, b=1, nugget.UB=1) } }else{ if(length(prior$hyperparam)==1){ for(i in 2:S){ hyperparam[[i]] = hyperparam[[1]] } prior$hyperparam = hyperparam } } ## construct the mvcokm object new("mvcokm", formula = formula, output = output, input = input, param = param, cov.model = cov.model, nugget.est = is.nugget, prior = prior, opt = opt, NestDesign = NestDesign, tuning = tuning, info=info ) } ##################################################################### ##################################################################### ##################################################################### ##################################################################### .check.arg.mvcokm <- function(formula, output, input, prior, opt, NestDesign, tuning, info){ if(!is(formula, "list")){ stop("\n\n formula should be a list contaning the regressors at each code level.\n\n") } if(!is(output, "list")){ stop("\n\noutput should be a list of responses. Each element in a list should contain output from a code level. The first level should contain output from the code with the lowest fidelity.\n\n") } s = length(output) if(!is(input, "list")){ stop("\n\ninput should be a list of inputs in computer models.\n\n") } for(t in 1:s){ if(!is(input[[t]], "matrix")){ message("\n\n coerce input to a matrix format.\n\n") input[[t]] = as.matrix(input[[t]]) } } # if(!is(param, "list")){ # stop("\n\nparam should be a list with each element containing initial values for # correlation parameters and nugget variance parameter (if needed).\n\n") # } if(!is(prior, "list")){ stop("\n\nprior should be a list containing arguments to setup the prior distributions.\n\n") } if(!is(opt, "list")){ stop("\n\nopt should be a list with each element containing optimization arguments at each code level.\n\n") } if(!is(NestDesign, "logical")){ stop("NestDesign should be a logical value indicating whether the design is hierarchically nested.") } if(!is(tuning, "list")){ stop("\n\n tuning should be a list containing tuning parameters to setup the MCEM algorithm in non-nested design.\n") } } setMethod("summary", signature(object="mvcokm"), function(object, ...){ message("mvcokm object\n") message("\n") message(paste0("Code levels:", length(object@data))) message("\n") message(paste0("Is nugget included:", [email protected])) message("\n") message(paste0("Nested Design:", object@NestDesign)) message("\n\n") })
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/mvcokmUtils.R
########################################################################## ## start-up and clean-up functions ## ## This software is distributed under the terms of the GNU GENERAL ## PUBLIC LICENSE Version 2 and above, April 2020. ## ## Copyright (C) 2020-present by Pulong Ma ## ########################################################################## .onAttach <- function(...) { date <- date() x <- regexpr("[0-9]{4}", date) this.year <- substr(date, x[1], x[1] + attr(x, "match.length") - 1) # echo output to screen packageStartupMessage("\n##################################") packageStartupMessage("##\n## Multifidelity computer model emulation, ARCokrig Package") packageStartupMessage("## Copyright (C) 2020-", this.year, " by Pulong Ma", sep="") packageStartupMessage("## Please cite this package and related papers") packageStartupMessage("####################################") } .onUnload <- function(libpath) { library.dynam.unload("ARCokrig", libpath) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/startup.R
augment.input <- function(input){ if(is.list(input)){ S = length(input) }else{ stop("Input is not a list!") } input.union = list() for(t in 1:S){ input.union[[t]] = do.call(rbind, input[t:S]) } # get the missing inputs at each level input.miss = list() for(t in 1:(S-1)){ input.miss[[t]] = (match.input(input.union[[t+1]], input[[t]])$A) } # only combine missing and observed inputs with repetition for(t in 1:(S-1)){ input.union[[t]] = rbind(input[[t]], input.miss[[t]]) } input.union[[S]] = input[[S]] return(list(union=input.union, miss=input.miss)) } ## for univariate models create.w.new <- function(t, input, input.miss, y, ym){ n = nrow(input.miss[[t]]) w = matrix(NA, n) indtemp = ismember(input.miss[[t]], input.miss[[t-1]]) w[indtemp$IIA] = ym[indtemp$IA] if(length(indtemp$IIA)<n){ indtemp = ismember(input.miss[[t]], input) w[indtemp$IIA] = y[indtemp$IA] } return(w) } ## for univiariate models create.w.univ <- function(t, input, input.miss, y, ym){ n = nrow(input[[t]]) w = matrix(NA, n, 1) indtemp = ismember(input.miss, input[[t]]) w[indtemp$IA, 1] = ym[indtemp$IIA] # if(length(indtemp$IA)<n){ # input.temp = input[[t]][indtemp$NIB, , drop=FALSE] # ID = ismember(input.temp, input[[t-1]])$IA # w[indtemp$NIB, 1] = y[ID] # } if(length(indtemp$IA)<n){ input.temp = input[[t]][indtemp$NIA, , drop=FALSE] ID = ismember(input.temp, input[[t-1]])$IA w[indtemp$NIA, 1] = y[ID] } return(w) } ## This routine creates W's at missing inputs for multivaraite models create.w <- function(t, input, input.miss, y, ym){ n = nrow(input[[t]]) N = ncol(y) w = matrix(NA, n, N) indtemp = ismember(input.miss, input[[t]]) w[indtemp$IA, ] = ym[indtemp$IIA, ] # if(length(indtemp$IA)<n){ # input.temp = input[[t]][indtemp$NIB, , drop=FALSE] # ID = ismember(input.temp, input[[t-1]])$IA # w[indtemp$NIB, 1] = y[ID] # } if(length(indtemp$IA)<n){ input.temp = input[[t]][indtemp$NIA, , drop=FALSE] ID = ismember(input.temp, input[[t-1]])$IA w[indtemp$NIA, ] = y[ID, ] } return(w) } ## This routine creates W's at new inputs for multivariate models create.w.pred <- function(t, input, input.miss, y, ym){ n = nrow(input.miss[[t]]) N = ncol(y) w = matrix(NA, n, N) indtemp = ismember(input.miss[[t]], input.miss[[t-1]]) w[indtemp$IIA, ] = ym[indtemp$IA, ] if(length(indtemp$IIA)<n){ indtemp = ismember(input.miss[[t]], input) w[indtemp$IIA, ] = y[indtemp$IA, ] } return(w) } ismember <- function(matA, matB){ if(!is.matrix(matA)){ message("\n The first argument is not a matrix!\n") matA = as.matrix(matA) } if(!is.matrix(matB)){ message("\n The second argument is not a matrix!\n") matA = as.matrix(matB) } nB = nrow(matB) dfA = data.frame(t(matA)) dfB = data.frame(t(matB)) # find positions in matB such that all rows of matA are in matB ind = match(dfA, dfB) indA = ind[!is.na(ind)] # matA == matB[indA, ] indtemp = 1:nB NIA = indtemp[-indA] # matB[NIA, ] is not in matA # find positions in matA such that all rows of matA are in matB IIA = which(!is.na(ind)) # matA[IIA, ] == matB[indA, ] NIB = which(is.na(ind)) return(list(IA=indA, IIA=IIA, NIA=NIA, NIB=NIB)) # matA[IIA, ] = matB[IA, ] # matB[NIA, ] does not belong to any rows of matA # matA[NIB, ] does not belog to any rows of matB } match.input = function(input1, input2){ if(!is.matrix(input1)){ message("\n The first argument is not a matrix!\n") input1 = as.matrix(input1) } if(!is.matrix(input2)){ message("\n The second argument is not a matrix!\n") input2 = as.matrix(input2) } n1 = dim(input1)[1] n2 = dim(input2)[1] dfA = data.frame(t(input1)) dfB = data.frame(t(input2)) ind = match(dfA, dfB) indB = ind[!is.na(ind)] indtemp = 1:n2 indA = which(!is.na(ind)) NIB = which(is.na(ind)) A = input1[NIB, ,drop=FALSE] if(length(indA)==0){ indA = NULL } if(length(indB)==0){ indB = NULL } return(list(IA=indA, A=A, IB=indB)) # IA: positions of elements from input1 that are also in input2 # A: elements from input1 that are not in input2 # IB: positions of elements from input2 that are also in input1 } ############################################################################ #' @title Compute continous rank probability score for normal distributions #' @description This function compute the continous rank probability score for #' normal distributions. It is mainly used to evaluate the validility of #' predictive distributions. #' @param x a vector of true values (held-out data) #' @param mu a vector of predictive means #' @param sig a vector of predictive standard deviations #' #' @author Pulong Ma <[email protected]> #' @export CRPS <- function(x, mu, sig){ xo = (x-mu)/sig crps = sig*(xo*(2*pnorm(xo)-1) + 2*dnorm(xo) - 1/sqrt(pi)) return(crps) }
/scratch/gouwar.j/cran-all/cranData/ARCokrig/R/utils.R
# European Commission # ARDECO database # R packege "ardeco" exposing ARDECO data to be used in R # # Function: ardeco_get_dataset_data # Input: variable (mandatory) = variable code # unit: (optional) unit of measure of the dataset. If NULL return all units data # sector: (optional) if there is only one sector, this is coded 'Total', otherwise # it's one of the NACE sectors. If NULL return all sector data # version: (optional). If defined return only version belonging into the provided set. # If NULL return all versions # nutscode: (optional). If defined return only nuts codes including values defined by the provided regex # If NULL return all nutscodes # year: (optional). If defined return only year codes like the listed ones # If NULL return all years # level: (optional). If defined return only nutscodes belonging into the provided nuts level set # The nuts level is an integer code # - 0-3: nuts level from nuts0 to nuts3 # - 4: metro regions # - 9: EU average (eg. EU27_2020) # If NULL return all levels # Output: a dataframe collecting the data related to the requested dataset # detailed by year, nuts_code, unit and sector # The fields of the dataframe are the following: # - variable: code of the variable # - sector: if there is only one sector, this is coded 'Total' otherwise # it's a NACE sector # - unit: unit of measure of the value # - level: the nuts level of nutscode # - version: nuts version of the nutscode # - nutscode: code of the territory (NUTS code) of the value # - year: year of reference of the value # - value: value of the variable related to a specific unit, sector, version, level, nutscode, year # # Description: return the list of value for each version, level, year and nutscode related to # the requested dataset specified by variable code, unit and sector. # variable code is a mandatory parameter. All the others input parameter are optional. # If it's defined only the variable, the function return all data related the requested variable. # #' @export ardeco_get_dataset_data <- function(variable, unit=NULL, sector=NULL, version=NULL, nutscode=NULL, year=NULL, level=NULL) { # define variables exposed by external functions # binding variables to read dataset list with lastBatchList dt_unit <- dt_sector <- dt_version <- lastBatchList <- NULL # binding variables: api jsonstat data DATE <- LEVEL <- TERRITORY_ID <- NULL # root of the URL to access to graphQL API for ARDECO link <- 'https://urban.jrc.ec.europa.eu/ardeco-api-v2/graphql' conn <- GraphqlClient$new(url=link) ### 1) read all datasets for variable with batch_id ############### START: ardeco_get_dataset_list code requiring also lastBatch:id ####### # root of the URL to access to graphQL API for ARDECO link <- 'https://urban.jrc.ec.europa.eu/ardeco-api-v2/graphql' conn <- GraphqlClient$new(url=link) # build the graphql query to recover the list of dataset for # the requested variable query <- paste('query { datasetList (variableCode: "',variable,'") { variableCode unit sector lastBatchList(release: true) { nutsVersion, id } } }',sep="") new <- Query$new()$query('link', query) # submit the GraphQL API request result <- conn$exec(new$link) %>% fromJSON(flatten = F) # convert the result in formatted list dataset_list <- result$data$datasetList %>% as_tibble() dataset_list <- dataset_list %>% unnest(lastBatchList) # rename column name of dataset_list with suffix dt (dt_var, dt_unit, dt_sector) lookup <- c(dt_var="variableCode", dt_unit="unit", dt_sector="sector", dt_version="nutsVersion", dt_id="id") dataset_list = rename(dataset_list, all_of(lookup)) ############### END: ardeco_get_dataset_list code requiring also lastBatch:id ####### # if dataset_list is empty, return if (nrow(dataset_list) == 0) { return(paste("Variable", variable, "doesn't exist or return no data", sep=" ")) } ### 2) filter datasets list according to unit value if ( !is.null(unit)) { dataset_list <- dataset_list %>% filter(dt_unit %in% unit) } # if dataset_list is empty, return if (nrow(dataset_list) == 0) { return(paste("Unit", unit, "doesn't exist in variable", variable, sep=" ")) } ### 3) filter datasets list according to sector value if ( !is.null(sector)) { dataset_list <- dataset_list %>% filter(dt_sector %in% sector) } # if dataset_list is empty, return if (nrow(dataset_list) == 0) { return(paste("Sector", sector, "doesn't exist in variable", variable, sep=" ")) } ### 4) filter datasets list according to version value if ( !is.null(version)) { dataset_list <- dataset_list %>% filter(dt_version %in% version) } # if dataset_list is empty, return if (nrow(dataset_list) == 0) { return(paste("version", version, "doesn't exist for the selected variable/unit/sector", sep=" ")) } ### 5) read data for all selected dataset and filter by YEAR and NUTSCODE # For each identified dataset, recover the data using the lastBatchId for (i in 1:nrow(dataset_list)) { ## 5.1) read data using API message(paste("Recovering dataset: var", variable,"unit:", dataset_list[i,c("dt_unit")], "sector:",dataset_list[i,c("dt_sector")], "version:",dataset_list[i,c("dt_version")])) # submit REST API to recover dataset data using the batch_id # - Build the API rest request call <- paste('https://urban.jrc.ec.europa.eu/ardeco-api-v2/rest/batch/', dataset_list[i,c("dt_id")],sep="") # submit rest API request converting data from JSONstat to R data frame (function fromJSONstat) json_data <- fromJSONstat(call, naming="id") # add 'version' column to read dataframe (version is not returned by API) json_data['version'] <- dataset_list[i,c("dt_version")] #Store recovered data into a unique data.frame (dataset_data). Assign (first round) or add (rbind) into dataset_data if (i == 1) { dataset_data <- json_data } else { dataset_data <- rbind(dataset_data, json_data) } ## 5.2) Apply filter by nuts if ( !is.null(nutscode)) { #dataset_data <- data.frame(filter(dataset_data, grepl(nutscode, TERRITORY_ID, ignore.case = TRUE))) dataset_data <- dataset_data %>% filter(grepl(nutscode, TERRITORY_ID, ignore.case = TRUE)) #dataset_data <- dataset_data %>% filter(TERRITORY_ID %in% nutscode) } ## 5.3) Apply filter by year if ( !is.null(year)) { dataset_data <- dataset_data %>% filter(DATE %in% year) } } # add column LEVEL to the output data.frame # level definition: # nuts0-3: level 0-3 # metro: level 4 # EU27_2020: level 9 if (nrow(dataset_data) > 0) { dataset_data$LEVEL <- nchar(dataset_data$TERRITORY_ID) - 2 #Set LEVEL = NULL for NUTS EUR27_2020 (length = 7 i.e. > 3) if (nrow(subset(dataset_data, LEVEL > 6)) > 0) { dataset_data[dataset_data$LEVEL>6,]$LEVEL <- -1 } if (nrow(subset(dataset_data, LEVEL > 3)) > 0) { dataset_data[dataset_data$LEVEL>3,]$LEVEL <- 4 } if (nrow(subset(dataset_data, LEVEL < 0)) > 0) { dataset_data[dataset_data$LEVEL<0,]$LEVEL <- 9 } } ### 6) Apply filter by LEVEL if ( nrow(dataset_data) > 0 & !is.null(level)) { dataset_data <- dataset_data %>% filter(LEVEL %in% level) } ### 7) formatting output data.frame if (nrow(dataset_data) > 0) { # rename the column for output dataset_data lookup <- c(year="DATE", sector="SECTOR", nutscode="TERRITORY_ID", unit="UNIT", variable="VARIABLE", level="LEVEL") dataset_data = rename(dataset_data, all_of(lookup)) # reorder the columns of output dataset_data dataset_data <- dataset_data[, c("variable", "sector", "unit", "version", "level", "nutscode", "year", "value")] } else { dataset_data <- data.frame(matrix(ncol=7, nrow=0)) colnames(dataset_data) <- c("variable", "sector", "unit", "version", "level", "nutscode", "year", "value") } ### 7) return requested data return(dataset_data) }
/scratch/gouwar.j/cran-all/cranData/ARDECO/R/get_dataset_data.R
# European Commission # ARDECO database # R packege "ardeco" exposing ARDECO data to be used in R # # Function: ardeco_get_dataset_list # Input: var_code = variable code # Output: list of datasets for the requested variable # - dt_var: variable code # - dt_unit: unit: unit of measure for the dataset # - dt_sector: if there is only one sector, this is coded 'Total' otehrwise # it's a NACE sector # # Description: return the list of the datasets related to a variable (input parameter). # for each dataset is returned the variableCode, the unit of measure and # the sector. # For variable with just only one sector, this usually is identified # by "Total" code. # #' @export ardeco_get_dataset_list <- function(var_code) { # binding variables to read dataset list with lastBatchList lastBatchList <- NULL # root of the URL to access to graphQL API for ARDECO link <- 'https://urban.jrc.ec.europa.eu/ardeco-api-v2/graphql' conn <- GraphqlClient$new(url=link) # build the graphql query to recover the list of dataset for # the requested variable query <- paste('query { datasetList (variableCode: "',var_code,'") { variableCode unit sector lastBatchList(release: true) { nutsVersion } } }',sep="") new <- Query$new()$query('link', query) # submit the GraphQL API request result <- conn$exec(new$link) %>% fromJSON(flatten = F) # convert the result in formatted list dataset_list <- result$data$datasetList %>% as_tibble() dataset_list <- dataset_list %>% unnest(lastBatchList) # rename column name of dataset_list with suffix dt (dt_var, dt_unit, dt_sector) lookup <- c(dt_var="variableCode", dt_unit="unit", dt_sector="sector", dt_version="nutsVersion") dataset_list = rename(dataset_list, all_of(lookup)) # return the formatted data return(dataset_list) }
/scratch/gouwar.j/cran-all/cranData/ARDECO/R/get_dataset_list.R
# European Commission # ARDECO database # R packege "ardeco" exposing ARDECO data to be used in R # # Function: get_variable_list # Input: none # Output: list of variables # - code: variable code # - description: variable description # - datasets: list of datasets defined into the variable # - unit: unit of measure for the dataset # - sector: sector of the dataset # # Description: return the list of the variable collected into ARDECO exposing # code, description and the set of datasets defined for that variable. # for each dataset is returned the unit of measure and the sector. # For variable with just only one sector, this usually is identified # by "Total" code. # #' @export ardeco_get_variable_list <- function() { # set code variable to NULL (for CRAN check) code <- NULL # root of the URL to access to graphQL API for ARDECO link <- 'https://urban.jrc.ec.europa.eu/ardeco-api-v2/graphql' conn <- GraphqlClient$new(url=link) # build the graphql query to recover the list of variables # and related description query <- 'query{variableList{code, description}}' new <- Query$new()$query('link', query) # submit the GraphQL API request result <- conn$exec(new$link) %>% fromJSON(flatten = F) # convert the result in formatted list variable_list <- result$data$variableList %>% as_tibble() # remove variable which have to not be public (deflator, rate, others...) '%notin%' <- Negate('%in%') variable_list <- subset(variable_list, code %notin% c("RHVGDP", "XGVA_CLV2015", "XGVAGR_N2", "XGDPGR_N2", "XGVAGR_N3", "PVGD", "PVGT", "PVG1", "PVG2", "PVG4", "PVG5", "PIGT", "RUTYH", "SOKCT", "SUKCT", "SOKCZ", "SUKCZ", "ROKND", "ROKNZ", "SUKCT", "RNECN", "RNUTN", "RUVNH", "RUYNH")) # return the formatted data return(variable_list) }
/scratch/gouwar.j/cran-all/cranData/ARDECO/R/get_variable_list.R
#' ARDL: ARDL, ECM and Bounds-Test for Cointegration "_PACKAGE" #' #' @keywords internal #' @docType package #' @name ARDL-package #' #' @importFrom dplyr %>% #' @importFrom zoo merge.zoo #' # no visible binding for global variable '.' (solution line) globalVariables(c(".")) .onAttach <- function(libname, pkgname) { packageStartupMessage(format(utils::citation("ARDL"), bibtex = FALSE)[[1]], "\n\n", format(utils::citation("ARDL"), bibtex = FALSE)[[2]], "\n\n", format(utils::citation("ARDL"), bibtex = FALSE)[[3]]) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/ardl-package.R
#' ARDL model regression #' #' A simple way to construct complex ARDL specifications providing just the #' model order additional to the model formula. It uses #' \code{\link[dynlm]{dynlm}} under the hood. \code{ardl} is a generic function #' and the default method constructs an 'ardl' model while the other method #' takes a model of \code{\link[base]{class}} 'uecm' and converts in into an #' 'ardl'. #' #' The \code{formula} should contain only variables that exist in the data #' provided through \code{data} plus some additional functions supported by #' \code{\link[dynlm]{dynlm}} (i.e., \code{trend()}). #' #' You can also specify fixed variables that are not supposed to be lagged (e.g. #' dummies etc.) simply by placing them after \code{|}. For example, \code{y ~ #' x1 + x2 | z1 + z2} where \code{z1} and \code{z2} are the fixed variables and #' should not be considered in \code{order}. Note that the \code{|} notion #' should not be confused with the same notion in \code{dynlm} where it #' introduces instrumental variables. #' #' @param formula A "formula" describing the linear model. Details for model #' specification are given under 'Details'. #' @param data A time series object (e.g., "ts", "zoo" or "zooreg") or a data #' frame containing the variables in the model. In the case of a data frame, #' it is coerced into a \code{\link[stats]{ts}} object with \code{start = 1}, #' \code{end = nrow(data)} and \code{frequency = 1}. If not found in data, the #' variables are NOT taken from any environment. #' @param order A specification of the order of the ARDL model. A numeric vector #' of the same length as the total number of variables (excluding the fixed #' ones, see 'Details'). It should only contain positive integers or 0. An #' integer could be provided if all variables are of the same order. #' @param start Start of the time period which should be used for fitting the #' model. #' @param end End of the time period which should be used for fitting the model. #' @param ... Additional arguments to be passed to the low level regression #' fitting functions. #' #' @return \code{ardl} returns an object of \code{\link[base]{class}} #' \code{c("dynlm", "lm", "ardl")}. In addition, attributes 'order', 'data', #' 'parsed_formula' and 'full_formula' are provided. #' #' @section Mathematical Formula: #' The general form of an \eqn{ARDL(p,q_{1},\dots,q_{k})}{ARDL(p,q1,...,qk)} is: #' \deqn{y_{t} = c_{0} + c_{1}t + \sum_{i=1}^{p}b_{y,i}y_{t-i} + #' \sum_{j=1}^{k}\sum_{l=0}^{q_{j}}b_{j,l}x_{j,t-l} + \epsilon_{t}} #' #' @seealso \code{\link{uecm}}, \code{\link{recm}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords models ts #' @export #' @examples #' data(denmark) #' #' ## Estimate an ARDL(3,1,3,2) model ------------------------------------- #' #' ardl_3132 <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' summary(ardl_3132) #' #' ## Add dummies or other variables that should stay fixed --------------- #' #' d_74Q1_75Q3 <- ifelse(time(denmark) >= 1974 & time(denmark) <= 1975.5, 1, 0) #' #' # the date can also be setted as below #' d_74Q1_75Q3_ <- ifelse(time(denmark) >= "1974 Q1" & time(denmark) <= "1975 Q3", 1, 0) #' identical(d_74Q1_75Q3, d_74Q1_75Q3_) #' den <- cbind(denmark, d_74Q1_75Q3) #' ardl_3132_d <- ardl(LRM ~ LRY + IBO + IDE | d_74Q1_75Q3, #' data = den, order = c(3,1,3,2)) #' summary(ardl_3132_d) #' compare <- data.frame(AIC = c(AIC(ardl_3132), AIC(ardl_3132_d)), #' BIC = c(BIC(ardl_3132), BIC(ardl_3132_d))) #' rownames(compare) <- c("no dummy", "with dummy") #' compare #' #' ## Estimate an ARDL(3,1,3,2) model with a linear trend ----------------- #' #' ardl_3132_tr <- ardl(LRM ~ LRY + IBO + IDE + trend(LRM), #' data = denmark, order = c(3,1,3,2)) #' #' # Alternative time trend specifications: #' # time(LRM) 1974 + (0, 1, ..., 55)/4 time(data) #' # trend(LRM) (1, 2, ..., 55)/4 (1:n)/freq #' # trend(LRM, scale = FALSE) (1, 2, ..., 55) 1:n #' #' ## Subsample ARDL regression (start after 1975 Q4) --------------------- #' #' ardl_3132_sub <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' order = c(3,1,3,2), start = "1975 Q4") #' #' # the date can also be setted as below #' ardl_3132_sub2 <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' order = c(3,1,3,2), start = c(1975,4)) #' identical(ardl_3132_sub, ardl_3132_sub2) #' summary(ardl_3132_sub) #' #' ## Ease of use --------------------------------------------------------- #' #' # The model specification of the ardl_3132 model can be created as easy as order=c(3,1,3,2) #' # or else, it could be done using the dynlm package as: #' library(dynlm) #' m <- dynlm(LRM ~ L(LRM, 1) + L(LRM, 2) + L(LRM, 3) + LRY + L(LRY, 1) + IBO + L(IBO, 1) + #' L(IBO, 2) + L(IBO, 3) + IDE + L(IDE, 1) + L(IDE, 2), data = denmark) #' identical(m$coefficients, ardl_3132$coefficients) #' #' # The full formula can be extracted from the ARDL model, and this is equal to #' ardl_3132$full_formula #' m2 <- dynlm(ardl_3132$full_formula, data = ardl_3132$data) #' identical(m$coefficients, m2$coefficients) #' #' ## Post-estimation testing --------------------------------------------- #' #' # See examples in the help file of the uecm() function #' ardl <- function(...) { UseMethod("ardl") } #' @rdname ardl #' #' @param object An object of \code{\link[base]{class}} 'uecm'. #' #' @export #' ardl.uecm <- function(object, ...) { parsed_formula <- object$parsed_formula order <- object$order data <- object$data start <- start(object) end <- end(object) unbuild_formula <- paste0(paste0(parsed_formula$y_part$var, "~"), paste0(parsed_formula$x_part$var, collapse = "+"), if (length(parsed_formula$w_part$var) == 0) { # Case I "" } else if (parsed_formula$w_part$var[1] == "- 1") { # Cases II & III paste0(parsed_formula$w_part$var) } else { # Cases IV & V paste0("+", parsed_formula$w_part$var) }, ifelse(length(parsed_formula$fixed_part$var) != 0, paste0("|", paste0(parsed_formula$fixed_part$var, collapse = "+")), "")) return(ardl(stats::formula(unbuild_formula), data = data, order = order, start = start, end = end)) } #' @rdname ardl #' #' @export #' ardl.default <- function(formula, data, order, start = NULL, end = NULL, ...) { if (!any(c("ts", "zoo", "zooreg") %in% class(data))) { data <- stats::ts(data, start = 1, end = nrow(data), frequency = 1) } parsed_formula <- parse_formula(formula = formula, colnames_data = colnames(data)) order <- parse_order(orders = order, order_name = "order", var_names = parsed_formula$z_part$var, kz = parsed_formula$kz) ardl_formula <- build_ardl_formula(parsed_formula = parsed_formula, order = order) full_formula <- formula(ardl_formula$full) ardl_model <- dynlm::dynlm(full_formula, data = data, start = start, end = end, ...) # for model compatibility in the global env attr(ardl_model$terms, ".Environment") <- .GlobalEnv attr(attr(ardl_model$model, "terms"), ".Environment") <- .GlobalEnv attr(full_formula, ".Environment") <- .GlobalEnv ardl_model$order <- order ardl_model$data <- data ardl_model$parsed_formula <- parsed_formula ardl_model$full_formula <- full_formula attr(ardl_model, "class") <- c(class(ardl_model), "ardl") return(ardl_model) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/ardl.R
#' Automatic ARDL model selection #' #' It searches for the best ARDL order specification, according to the selected #' criterion, taking into account the constraints provided. #' #' @param formula A "formula" describing the linear model. Details for model #' specification are given under 'Details' in the help file of the #' \code{\link{ardl}} function. #' @param max_order It sets the maximum order for each variable where the search #' is taking place. A numeric vector of the same length as the total number of #' variables (excluding the fixed ones, see 'Details' in the help file of the #' \code{\link{ardl}} function). It should only contain positive integers. An #' integer could be provided if the maximum order for all variables is the #' same. #' @param fixed_order It allows setting a fixed order for some variables. The #' algorithm will not search for any other order than this. A numeric vector #' of the same length as the total number of variables (excluding the fixed #' ones). It should contain positive integers or 0 to set as a constraint. A #' -1 should be provided for any variable that should not be constrained. #' \code{fixed_order} overrides the corresponding \code{max_order} and #' \code{starting_order}. #' @param starting_order Specifies the order for each variable from which each #' search will start. It is a numeric vector of the same length as the total #' number of variables (excluding the fixed ones). It should contain positive #' integers or 0 or only one integer could be provided if the starting order #' for all variables is the same. Default is set to NULL. If unspecified #' (\code{NULL}) and \code{grid = FALSE}, then all possible \eqn{ARDL(p)} #' models are calculated (constraints are taken into account), where \eqn{p} #' is the minimum value in \code{max_order}. Note that where #' \code{starting_order} is provided, its first element will be the minimum #' value of \eqn{p} that the searching algorithm will consider (think of it like #' a 'minimum p order' restriction) (see 'Searching algorithm' below). If #' \code{grid = TRUE}, only the first argument (\eqn{p}) will have an effect. #' @param selection A character string specifying the selection criterion #' according to which the candidate models will be ranked. Default is #' \code{\link[stats]{AIC}}. Any other selection criterion can be used (a user #' specified or a function from another package) as long as it can be applied #' as \code{selection(model)}. The preferred model is the one with the smaller #' value of the selection criterion. If the selection criterion works the #' other way around (the bigger the better), \code{selection_minmax = "max"} #' should also be supplied (see 'Examples' below). #' @param selection_minmax A character string that indicates whether the #' criterion in \code{selection} is supposed to be minimized (default) or #' maximized. #' @param grid If \code{FALSE} (default), the stepwise searching regression #' algorithm will search for the best model by adding and subtracting terms #' corresponding to different ARDL orders. If \code{TRUE}, the whole set of #' all possible ARDL models (accounting for constraints) will be evaluated. #' Note that this method can be very time-consuming in case that #' \code{max_order} is big and there are many independent variables that #' create a very big number of possible combinations. #' @param search_type A character string describing the search type. If #' "horizontal" (default), the searching algorithm increases or decreases by 1 #' the order of each variable in each iteration. When the order of the last #' variable has been accessed, it begins again from the first variable until #' it converges. If "vertical", the searching algorithm increases or decreases #' by 1 the order of a variable until it converges. Then it continues the same #' for the next variable. The two options result to very similar top orders. #' The default ("horizontal"), sometimes is a little more accurate, but the #' "vertical" is almost 2 times faster. Not applicable if \code{grid = TRUE}. #' @inheritParams ardl #' #' @return \code{auto_ardl} returns a list which contains: #' \item{\code{best_model}}{An object of class \code{c("dynlm", "lm", "ardl")}} #' \item{\code{best_order}}{A numeric vector with the order of the best model selected} #' \item{\code{top_orders}}{A data.frame with the orders of the top 20 models} #' #' @section Searching algorithm: The algorithm performs the optimization process #' starting from multiple starting points concerning the autoregressive order #' \eqn{p}. The searching algorithm will perform a complete search, each time #' starting from a different starting order. These orders are presented in the #' tables below, for \code{grid = FALSE} and different values of #' \code{starting_order}. #' #' \code{starting_order = NULL}: #' \tabular{ccccccc}{ #' ARDL(p) \tab -> \tab p \tab q1 \tab q2 \tab ... \tab qk\cr #' ARDL(1) \tab -> \tab 1 \tab 1 \tab 1 \tab ... \tab 1\cr #' ARDL(2) \tab -> \tab 2 \tab 2 \tab 2 \tab ... \tab 2\cr #' : \tab -> \tab : \tab : \tab : \tab : \tab :\cr #' ARDL(P) \tab -> \tab P \tab P \tab P \tab ... \tab P #' } #' #' \code{starting_order = c(3, 0, 1, 2)}: #' \tabular{cccc}{ #' p \tab q1 \tab q2 \tab q3\cr #' 3 \tab 0 \tab 1 \tab 2\cr #' 4 \tab 0 \tab 1 \tab 2\cr #' : \tab : \tab : \tab :\cr #' P \tab 0 \tab 1 \tab 2 #' } #' #' @seealso \code{\link{ardl}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords optimize models ts #' @export #' @examples #' data(denmark) #' #' ## Find the best ARDL order -------------------------------------------- #' #' # Up to 5 for the autoregressive order (p) and 4 for the rest (q1, q2, q3) #' #' # Using the defaults search_type = "horizontal", grid = FALSE and selection = "AIC" #' # ("Not run" indications only for testing purposes) #' \dontrun{ #' model1 <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4)) #' model1$top_orders #' #' ## Same, with search_type = "vertical" ------------------------------- #' #' model1_h <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), search_type = "vertical") #' model1_h$top_orders #' #' ## Find the global optimum ARDL order ---------------------------------- #' #' # It may take more than 10 seconds #' model_grid <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), grid = TRUE) #' #' ## Different selection criteria ---------------------------------------- #' #' # Using BIC as selection criterion instead of AIC #' model1_b <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), selection = "BIC") #' model1_b$top_orders #' #' # Using other criteria like adjusted R squared (the bigger the better) #' adjr2 <- function(x) { summary(x)$adj.r.squared } #' model1_adjr2 <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), selection = "adjr2", #' selection_minmax = "max") #' model1_adjr2$top_orders #' #' # Using functions from other packages as selection criteria #' if (requireNamespace("qpcR", quietly = TRUE)) { #' #' library(qpcR) #' model1_aicc <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), selection = "AICc") #' model1_aicc$top_orders #' adjr2 <- function(x){ Rsq.ad(x) } #' model1_adjr2 <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), selection = "adjr2", #' selection_minmax = "max") #' model1_adjr2$top_orders #' #' ## DIfferent starting order -------------------------------------------- #' #' # The searching algorithm will start from the following starting orders: #' # p q1 q2 q3 #' # 1 1 3 2 #' # 2 1 3 2 #' # 3 1 3 2 #' # 4 1 3 2 #' # 5 1 3 2 #' #' model1_so <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), starting_order = c(1,1,3,2)) #' #' # Starting from p=3 (don't search for p=1 and p=2) #' # Starting orders: #' # p q1 q2 q3 #' # 3 1 3 2 #' # 4 1 3 2 #' # 5 1 3 2 #' #' model1_so_3 <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), starting_order = c(3,1,3,2)) #' #' # If starting_order = NULL, the starting orders for each iteration will be: #' # p q1 q2 q3 #' # 1 1 1 1 #' # 2 2 2 2 #' # 3 3 3 3 #' # 4 4 4 4 #' # 5 5 5 5 #' } #' #' ## Add constraints ----------------------------------------------------- #' #' # Restrict only the order of IBO to be 2 #' model1_ibo2 <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), fixed_order = c(-1,-1,2,-1)) #' model1_ibo2$top_orders #' #' # Restrict the order of LRM to be 3 and the order of IBO to be 2 #' model1_lrm3_ibo2 <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), fixed_order = c(3,-1,2,-1)) #' model1_lrm3_ibo2$top_orders #' #' ## Set the starting date for the regression (data starts at "1974 Q1") - #' #' # Set regression starting date to "1976 Q1" #' model1_76q1 <- auto_ardl(LRM ~ LRY + IBO + IDE, data = denmark, #' max_order = c(5,4,4,4), start = "1976 Q1") #' start(model1_76q1$best_model) #' } auto_ardl <- function(formula, data, max_order, fixed_order = -1, starting_order = NULL, selection = "AIC", selection_minmax = c("min", "max"), grid = FALSE, search_type = c("horizontal", "vertical"), start = NULL, end = NULL, ...) { if (!any(c("ts", "zoo", "zooreg") %in% class(data))) { data <- stats::ts(data, start = 1, end = nrow(data), frequency = 1) } if (missing(max_order) == TRUE) {stop("'max_order' is a mandatory argument.", call. = FALSE)} if (missing(selection) == TRUE) {selection <- "AIC"} search_type <- match.arg(search_type) selection_minmax <- match.arg(selection_minmax) parsed_formula <- parse_formula(formula = formula, colnames_data = colnames(data)) max_order <- parse_order(orders = max_order, order_name = "max_order", var_names = parsed_formula$z_part$var, kz = parsed_formula$kz) fixed_order <- parse_order(orders = fixed_order, order_name = "fixed_order", var_names = parsed_formula$z_part$var, kz = parsed_formula$kz, restriction = -1) if (!missing(starting_order)) { starting_order_null <- FALSE if (starting_order[1] < 1) { stop("In 'starting_order', the starting order of p (first argument) can't be less than 1.", call. = FALSE)} starting_order <- parse_order(orders = starting_order, order_name = "starting_order", var_names = parsed_formula$z_part$var, kz = parsed_formula$kz) if (any(starting_order > max_order)) {stop("'starting_order' can't be greater than 'max_order'.", call. = FALSE)} } else { starting_order_null <- TRUE } if (any(fixed_order > max_order)) {stop("'fixed_order' can't be greater than 'max_order'.", call. = FALSE)} start_sample <- start end_sample <- end # acceptable orders for each p and q order_list <- lapply(seq_len(length(max_order)), function(i) { # because order of y can't be 0 if (i == 1) { if (fixed_order[i] == (-1)) { if (is.null(starting_order)) { 1:max_order[i] } else { starting_order[1]:max_order[i] } } else { fixed_order[i] } } else { if (fixed_order[i] == (-1)) { 0:max_order[i] } else { fixed_order[i] } } }) # starting_order considering fixed_order if (is.null(starting_order)) { if (grid == FALSE) { starting_order <- ifelse(fixed_order[-1] == -1, 0, fixed_order[-1]) starting_order <- c(ifelse(fixed_order[1] == -1, 1, fixed_order[1]), starting_order) # build orders for VAR order_var <- lapply(1:min(max_order), function(i) rep(i, parsed_formula$kz)) if (any(fixed_order != -1)) { # build orders for var with rectricted terms for (j in 1:length(order_var)) { order_var[[j]][which(fixed_order != -1)] <- fixed_order[which(fixed_order != -1)] } } } } else { starting_order <- ifelse(fixed_order == -1, starting_order, fixed_order) } if (grid == TRUE) { # create the full search grid order_grid <- expand.grid(order_list, KEEP.OUT.ATTRS = FALSE) # run all models and estimate "selection" (using eval to avoid if for each "selection") best_selection <- lapply(seq_len(nrow(order_grid)), function(i) { eval(parse(text = paste(selection, "( ardl(formula = formula, data = data, order = unlist(order_grid[", i, ", ]), start = start_sample, end = end_sample, ...) )"))) }) %>% unlist() if (selection_minmax == "max") best_selection <- (-1)*best_selection # keep top 20 orders top_orders <- dplyr::bind_cols(order = order_grid, selection = best_selection) %>% dplyr::arrange(selection) %>% dplyr::slice(1:20) } else { top_orders <- c() #initialization if (starting_order_null) { for_each_p <- 1:length(order_var) #$#$ or try order_list[[1]] } else { for_each_p <- order_list[[1]] } for (i in for_each_p) { # for each parse if (starting_order_null) { order1 <- order_var[[i]] # access each var model } else { order1 <- starting_order order1[1] <- i } m1 <- ardl(formula = formula, data = data, order = order1, start = start_sample, end = end_sample, ...) selection1 <- eval(parse(text = paste(selection, "(m1)"))) if (selection_minmax == "max") selection1 <- (-1)*selection1 top_orders <- rbind(top_orders, c(order1, selection1)) if (search_type == "vertical") { for (j in 1:parsed_formula$kx) { if (length(order_list[[j + 1]]) != 1) { # if fixed order, don't search ardl_converge <- FALSE while ((ardl_converge == FALSE) & (order1[j + 1] < dplyr::last(order_list[[j + 1]]))) { order2_back <- order1 order2_forth <- order1 if (order1[j + 1] == 0) { # so that when q=0 it doesn't search for q<0. # order2_back is the order1 order2_forth[j + 1] <- order1[j + 1] + 1 } else { order2_back[j + 1] <- order1[j + 1] - 1 order2_forth[j + 1] <- order1[j + 1] + 1 } order_back_ready <- unlist(lapply(1:nrow(top_orders), FUN = function(z) sum(order2_back == top_orders[z,-ncol(top_orders)]))) order_forth_ready <- unlist(lapply(1:nrow(top_orders), FUN = function(z) sum(order2_forth == top_orders[z,-ncol(top_orders)]))) if (parsed_formula$kz %in% order_back_ready) { selection_m2_back <- top_orders[which(order_back_ready == parsed_formula$kz)[1], ncol(top_orders)] } else { m2_back <- ardl(formula = formula, data = data, order = order2_back, start = start_sample, end = end_sample, ...) selection_m2_back <- eval(parse(text = paste(selection, "(m2_back)"))) if (selection_minmax == "max") selection_m2_back <- (-1)*selection_m2_back } if (parsed_formula$kz %in% order_forth_ready) { selection_m2_forth <- top_orders[which(order_forth_ready == parsed_formula$kz)[1], ncol(top_orders)] } else { m2_forth <- ardl(formula = formula, data = data, order = order2_forth, start = start_sample, end = end_sample, ...) selection_m2_forth <- eval(parse(text = paste(selection, "(m2_forth)"))) if (selection_minmax == "max") selection_m2_forth <- (-1)*selection_m2_forth } selection2 <- min(selection_m2_back, selection_m2_forth)[1] order2 <- list(order2_back, order2_forth)[which(c(selection_m2_back, selection_m2_forth) == min(selection_m2_back, selection_m2_forth))][[1]] top_orders <- rbind(top_orders, c(order1, selection1), c(order2, selection2)) if (selection2 < selection1) { selection1 <- selection2 order1 <- order2 } else { ardl_converge <- TRUE } } } } } else { # horizontal ardl_converge <- FALSE failed_orders <- data.frame(matrix(0, 1, parsed_formula$kz)) # initialization while (ardl_converge == FALSE) { for(j in 1:parsed_formula$kx) { if ((length(order_list[[j + 1]]) != 1) & (order1[j + 1] < max_order[j + 1])) { # if fixed order or max_order reached, don't search order2_back <- order1 order2_forth <- order1 if (order1[j + 1] == 0) { # order2_back is the order1 order2_forth[j + 1] <- order1[j + 1] + 1 } else { order2_back[j + 1] <- order1[j + 1] - 1 order2_forth[j + 1] <- order1[j + 1] + 1 } order_back_ready <- unlist(lapply(1:nrow(top_orders), FUN = function(z) sum(order2_back == top_orders[z,-ncol(top_orders)]))) order_forth_ready <- unlist(lapply(1:nrow(top_orders), FUN = function(z) sum(order2_forth == top_orders[z,-ncol(top_orders)]))) if (parsed_formula$kz %in% order_back_ready) { selection_m2_back <- top_orders[which(order_back_ready == parsed_formula$kz)[1], ncol(top_orders)] } else { m2_back <- ardl(formula = formula, data = data, order = order2_back, start = start_sample, end = end_sample, ...) selection_m2_back <- eval(parse(text = paste(selection, "(m2_back)"))) if (selection_minmax == "max") selection_m2_back <- (-1)*selection_m2_back } if (parsed_formula$kz %in% order_forth_ready) { selection_m2_forth <- top_orders[which(order_forth_ready == parsed_formula$kz)[1], ncol(top_orders)] } else { m2_forth <- ardl(formula = formula, data = data, order = order2_forth, start = start_sample, end = end_sample, ...) selection_m2_forth <- eval(parse(text = paste(selection, "(m2_forth)"))) if (selection_minmax == "max") selection_m2_forth <- (-1)*selection_m2_forth } selection2 <- min(selection_m2_back, selection_m2_forth)[1] order2 <- list(order2_back, order2_forth)[which(c(selection_m2_back, selection_m2_forth) == min(selection_m2_back, selection_m2_forth))][[1]] top_orders <- rbind(top_orders, c(order1, selection1), c(order2, selection2)) if (selection2 < selection1) { selection1 <- selection2 order1 <- order2 } else { # to converge when it reaches an order that has already failed if (nrow(dplyr::distinct(rbind(failed_orders, order2), .keep_all = TRUE)) == nrow(failed_orders)) { ardl_converge <- TRUE } else { failed_orders <- rbind(failed_orders, order2) } } } else if (!(order1[j + 1] < max_order[j + 1])) { # to avoid infinite loop when all orders are maxed (the case where best mode is the bigest) # Either all fixed or maxed (except the 1st) either order > max (in case of "both" where max = max + 1) if ((all(((order1 == fixed_order) | (order1 == max_order))[-1] == TRUE)) | (order1[j + 1] > max_order[j + 1])) { ardl_converge <- TRUE } } } } } } # keep top 20 orders top_orders <- dplyr::as_tibble(data.frame(matrix(top_orders[,-ncol(top_orders)], ncol = ncol(top_orders)-1), selection = top_orders[,ncol(top_orders)]) %>% dplyr::distinct(.keep_all = TRUE) %>% dplyr::arrange(selection) %>% dplyr::slice(1:20)) } # choose best order best_order <- top_orders[1, 1:(ncol(top_orders) - 1)] %>% as.numeric() names(best_order) <- parsed_formula$z_part$var # choose best model best_model <- ardl(formula = formula, data = data, order = best_order, start = start_sample, end = end_sample, ...) # prepare objects before exporting ---------------------------------------- # rename top_orders columns temp <- names(top_orders) names(temp) <- c(parsed_formula$z_part$var, selection) top_orders <- top_orders %>% dplyr::rename(!!temp) # correct sign of the selection criterion if (selection_minmax == "max") { top_orders[,ncol(top_orders)] <- -top_orders[,ncol(top_orders)] } # return list return_list <- list(best_model = best_model, best_order = best_order, top_orders = data.frame(top_orders)) return(return_list) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/auto_ardl.R
#' Bounds Wald-test for no cointegration #' #' \code{bounds_f_test} performs the Wald bounds-test for no cointegration #' \cite{Pesaran et al. (2001)}. It is a Wald test on the parameters of a UECM #' (Unrestricted Error Correction Model) expressed either as a Chisq-statistic #' or as an F-statistic. #' #' @param alpha A numeric value between 0 and 1 indicating the significance #' level of the critical value bounds. If \code{NULL} (default), no critical #' value bounds for a specific level of significance are provide, only the #' p-value. See section 'alpha, bounds and p-value' below for details. #' @param pvalue A logical indicating whether you want the p-value to be #' provided. The default is \code{TRUE}. See section 'alpha, bounds and #' p-value' below for details. #' @param exact A logical indicating whether you want asymptotic (T = 1000) or #' exact sample size critical value bounds and p-value. The default is #' \code{FALSE} for asymptotic. See section 'alpha, bounds and p-value' below #' for details. #' @param R An integer indicating how many iterations will be used if #' \code{exact = TRUE}. Default is 40000. #' @param test A character vector indicating whether you want the Wald test to #' be expressed as 'F' or as 'Chisq' statistic. Default is "F". #' @param vcov_matrix The estimated covariance matrix of the random variable #' that the test uses to estimate the test statistic. The default is #' \code{vcov(object)} (when \code{vcov_matrix = NULL}), but other estimations #' of the covariance matrix of the regression's estimated coefficients can #' also be used (e.g., using \code{\link[sandwich]{vcovHC}} or #' \code{\link[sandwich]{vcovHAC}}). Only applicable if the input object is of #' class "uecm". #' @inheritParams recm #' #' @return A list with class "htest" containing the following components: #' \item{\code{method}}{a character string indicating what type of test was #' performed.} #' \item{\code{alternative}}{a character string describing the alternative #' hypothesis.} #' \item{\code{statistic}}{the value of the test statistic.} #' \item{\code{null.value}}{the value of the population parameters \code{k} #' (the number of independent variables) and \code{T} (the number of #' observations) specified by the null hypothesis.} #' \item{\code{data.name}}{a character string giving the name(s) of the data.} #' \item{\code{parameters}}{numeric vector containing the critical value #' bounds.} #' \item{\code{p.value}}{the p-value of the test.} #' \item{\code{PSS2001parameters}}{numeric vector containing the critical #' value bounds as presented by \cite{Pesaran et al. (2001)}. See section #' 'alpha, bounds and p-value' below for details.} #' \item{\code{tab}}{data.frame containing the statistic, the critical value #' bounds, the alpha level of significance and the p-value.} #' #' @section Hypothesis testing: #' \deqn{\Delta y_{t} = c_{0} + c_{1}t + #' \pi_{y}y_{t-1} + \sum_{j=1}^{k}\pi_{j}x_{j,t-1} + #' \sum_{i=1}^{p-1}\psi_{y,i}\Delta y_{t-i} + #' \sum_{j=1}^{k}\sum_{l=1}^{q_{j}-1} \psi_{j,l}\Delta x_{j,t-l} + #' \sum_{j=1}^{k}\omega_{j}\Delta x_{j,t} + \epsilon_{t}} #' #' \describe{ #' \item{Cases 1, 3, 5:}{} #' } #' \deqn{\mathbf{H_{0}:} \pi_{y} = \pi_{1} = \dots = \pi_{k} = 0} #' \deqn{\mathbf{H_{1}:} \pi_{y} \neq \pi_{1} \neq \dots \neq \pi_{k} \neq 0} #' #' \describe{ #' \item{Case 2:}{} #' } #' \deqn{\mathbf{H_{0}:} \pi_{y} = \pi_{1} = \dots = \pi_{k} = c_{0} = 0} #' \deqn{\mathbf{H_{1}:} \pi_{y} \neq \pi_{1} \neq \dots \neq \pi_{k} \neq c_{0} \neq 0} #' #' \describe{ #' \item{Case 4:}{} #' } #' \deqn{\mathbf{H_{0}:} \pi_{y} = \pi_{1} = \dots = \pi_{k} = c_{1} = 0} #' \deqn{\mathbf{H_{1}:} \pi_{y} \neq \pi_{1} \neq \dots \neq \pi_{k} \neq c_{1} \neq 0} #' #' @section alpha, bounds and p-value: In this section it is explained how the #' critical value bounds and p-values are obtained. #' \itemize{ #' \item If \code{exact = FALSE}, then the asymptotic (T = 1000) critical #' value bounds and p-value are provided. #' \item Only the asymptotic critical value bounds and p-values, and only #' for k <= 10 are precalculated, everything else has to be computed. #' \item Precalculated critical value bounds and p-values were simulated #' using \code{set.seed(2020)} and \code{R = 70000}. #' \item Precalculated critical value bounds exist only for \code{alpha} #' being one of the 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.15 or 0.2, #' everything else has to be computed. #' \item If \code{alpha} is one of the 0.1, 0.05, 0.025 or 0.01 (and #' \code{exact = FALSE} and k <= 10), \code{PSS2001parameters} shows #' the critical value bounds presented in \cite{Pesaran et al. (2001)} #' (less precise). #' } #' #' @inheritSection recm Cases #' @inheritSection recm References #' @seealso \code{\link{bounds_t_test}} \code{\link{ardl}} \code{\link{uecm}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords htest ts #' @export #' @examples #' data(denmark) #' #' ## How to use cases under different models (regarding deterministic terms) #' #' ## Construct an ARDL(3,1,3,2) model with different deterministic terms - #' #' # Without constant #' ardl_3132_n <- ardl(LRM ~ LRY + IBO + IDE -1, data = denmark, order = c(3,1,3,2)) #' #' # With constant #' ardl_3132_c <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' #' # With constant and trend #' ardl_3132_ct <- ardl(LRM ~ LRY + IBO + IDE + trend(LRM), data = denmark, order = c(3,1,3,2)) #' #' ## F-bounds test for no level relationship (no cointegration) ---------- #' #' # For the model without a constant #' bounds_f_test(ardl_3132_n, case = 1) #' # or #' bounds_f_test(ardl_3132_n, case = "n") #' #' # For the model with a constant #' # Including the constant term in the long-run relationship (restricted constant) #' bounds_f_test(ardl_3132_c, case = 2) #' # or #' bounds_f_test(ardl_3132_c, case = "rc") #' #' # Including the constant term in the short-run relationship (unrestricted constant) #' bounds_f_test(ardl_3132_c, case = "uc") #' # or #' bounds_f_test(ardl_3132_c, case = 3) #' #' # For the model with constant and trend #' # Including the constant term in the short-run and the trend in the long-run relationship #' # (unrestricted constant and restricted trend) #' bounds_f_test(ardl_3132_ct, case = "ucrt") #' # or #' bounds_f_test(ardl_3132_ct, case = 4) #' #' # For the model with constant and trend #' # Including the constant term and the trend in the short-run relationship #' # (unrestricted constant and unrestricted trend) #' bounds_f_test(ardl_3132_ct, case = "ucut") #' # or #' bounds_f_test(ardl_3132_ct, case = 5) #' #' ## Note that you can't restrict a deterministic term that doesn't exist #' #' # For example, the following tests will produce an error: #' \dontrun{ #' bounds_f_test(ardl_3132_c, case = 1) #' bounds_f_test(ardl_3132_ct, case = 3) #' bounds_f_test(ardl_3132_c, case = 4) #' } #' #' ## Asymptotic p-value and critical value bounds (assuming T = 1000) ---- #' #' # Include critical value bounds for a certain level of significance #' #' # F-statistic is larger than the I(1) bound (for a=0.05) as expected (p-value < 0.05) #' bft <- bounds_f_test(ardl_3132_c, case = 2, alpha = 0.05) #' bft #' bft$tab #' #' # Traditional but less precise critical value bounds, as presented in Pesaran et al. (2001) #' bft$PSS2001parameters #' #' # F-statistic is slightly larger than the I(1) bound (for a=0.005) #' # as p-value is slightly smaller than 0.005 #' bounds_f_test(ardl_3132_c, case = 2, alpha = 0.005) #' #' ## Exact sample size p-value and critical value bounds ----------------- #' #' # Setting a seed is suggested to allow the replication of results #' # 'R' can be increased for more accurate resutls #' #' # F-statistic is smaller than the I(1) bound (for a=0.01) as expected (p-value > 0.01) #' # Note that the exact sample p-value (0.01285) is very different than the asymptotic (0.004418) #' # It can take more than 30 seconds #' \dontrun{ #' set.seed(2020) #' bounds_f_test(ardl_3132_c, case = 2, alpha = 0.01, exact = TRUE) #' } #' #' ## "F" and "Chisq" statistics ------------------------------------------ #' #' # The p-value is the same, the test-statistic and critical value bounds are different but analogous #' bounds_f_test(ardl_3132_c, case = 2, alpha = 0.01) #' bounds_f_test(ardl_3132_c, case = 2, alpha = 0.01, test = "Chisq") bounds_f_test <- function(object, case, alpha = NULL, pvalue = TRUE, exact = FALSE, R = 40000, test = c("F", "Chisq"), vcov_matrix = NULL) { # no visible binding for global variable NOTE solution k <- I0 <- fI0 <- fI1 <- NULL; rm(k, I0, fI0, fI1) if (isTRUE(all.equal(c("dynlm", "lm", "ardl"), class(object)))) { object <- uecm(object) vcov_matrix <- stats::vcov(object) } if (is.null(vcov_matrix)) { vcov_matrix <- stats::vcov(object) } alpha_long_seq <- c(seq(0, 0.1, by = 0.001), seq(0.11, 1, by = 0.01)) alpha_short_seq <- c(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2) if (is.null(alpha) & (pvalue == FALSE)) { stop("Specify an 'alpha' level, set 'pvalue' = TRUE or both", call. = FALSE) } if ((!is.null(alpha) & (length(alpha) != 1)) | (!is.null(alpha) & !any(dplyr::near(alpha, alpha_long_seq)))) { stop("alpha must be either 'NULL' or one of the numbers produced by the following code:\n", "c(seq(0.001, 0.1, by = 0.001), seq(0.11, 0.99, by = 0.01))", call. = FALSE) } case <- parse_case(parsed_formula = object$parsed_formula, case = case) test <- match.arg(test) kx <- object$parsed_formula$kx T <- length(object$residuals) dname <- deparse(object$full_formula) if (exact == FALSE) { nullvalue <- c(kx, 1000) } else { nullvalue <- c(kx, T) } names(nullvalue) <- c("k", "T") indep_vars <- object$parsed_formula$x_part$var restricted_coef <- sapply(1:length(indep_vars), function(i) { ifelse(object$order[i + 1] == 0, indep_vars[i], paste0("L(", indep_vars[i], ", 1)")) }) restricted_names <- c(paste0("L(", object$parsed_formula$y_part$var, ", 1)"), restricted_coef) restricted_coef <- which(names(stats::coef(object)) %in% restricted_names) if (case == 2) { restricted_names <- c(restricted_names, names(stats::coef(object))[1]) restricted_coef <- which(names(stats::coef(object)) %in% restricted_names) } else if (case == 4) { restricted_names <- c(restricted_names, names(stats::coef(object))[2]) restricted_coef <- which(names(stats::coef(object)) %in% restricted_names) } chi2_statistic <- aod::wald.test(b = stats::coef(object), Sigma = vcov_matrix, Terms = restricted_coef)$result if (test == "F") { statistic <- chi2_statistic$chi2[1] / chi2_statistic$chi2[2] } else { statistic <- chi2_statistic$chi2[1] } names(statistic) <- test if ((exact == FALSE) & (kx <= 10) & any(c(is.null(alpha), alpha %in% alpha_short_seq))) { caselatin <- ifelse(case == 1, "i", ifelse(case == 2, "ii", ifelse(case == 3, "iii", ifelse(case == 4, "iv", "v")))) if (!is.null(alpha)) { if (alpha %in% c(0.1, 0.05, 0.025, 0.01)) { bounds_pss2001 <- eval(parse(text = paste0("crit_val_bounds_pss2001$f$", caselatin))) %>% dplyr::filter(alpha %in% !!alpha, k == kx) %>% dplyr::select(I0, I1) if (test != "F") { if (case %in% c(1, 3, 5)) { bounds_pss2001 <- bounds_pss2001*(kx+1) } else { bounds_pss2001 <- bounds_pss2001*(kx+2) } } } if (alpha %in% alpha_short_seq) { bounds <- eval(parse(text = paste0("crit_val_bounds$I0$", caselatin))) %>% dplyr::filter(alpha == !!alpha, k == kx) %>% dplyr::select(fI0) %>% dplyr::rename(I0 = fI0) bounds <- cbind(bounds, eval(parse(text = paste0("crit_val_bounds$I1$", caselatin))) %>% dplyr::filter(alpha == !!alpha, k == kx) %>% dplyr::select(fI1) %>% dplyr::rename(I1 = fI1)) if (test != "F") { if (case %in% c(1, 3, 5)) { bounds <- bounds*(kx+1) } else { bounds <- bounds*(kx+2) } } } } if (pvalue == TRUE) { critvalbounds <- eval(parse(text = paste0("crit_val_bounds$I1$", caselatin))) %>% dplyr::mutate(I1 = if (test == "F") { fI1 } else if (case %in% c(1, 3, 5)){ fI1*(kx+1) } else { fI1*(kx+2) }) if (statistic %in% critvalbounds$I1) { p_value <- critvalbounds %>% dplyr::filter(k == kx) %>% dplyr::filter(I1 == statistic) %>% dplyr::select(alpha) %>% unlist() } else { # linear interpolation if (max(dplyr::filter(critvalbounds, k == kx)$I1) < statistic) { p_value <- 0.000001 } else if (min(dplyr::filter(critvalbounds, k == kx)$I1) > statistic) { p_value <- 0.999999 } else { p_value <- critvalbounds %>% dplyr::filter(k == kx) %>% dplyr::filter(I1 %in% c(max(I1[which(I1 < statistic)]), min(I1[which(I1 > statistic)]))) %>% dplyr::mutate(p_value = min(alpha) + (max(alpha)-min(alpha)) * ((max(I1) - statistic) / (max(I1)-min(I1)))) %>% dplyr::select(p_value) %>% unlist() %>% .[1] } } } } else { T_asy_or_exact <- ifelse(exact == FALSE, 1000, T) wb <- f_bounds_sim(case = case, k = kx, alpha = alpha_long_seq, T = T_asy_or_exact, R = R) if (!is.null(alpha)) { if (test == "F") { bounds <- wb$f_bounds[which(dplyr::near(alpha, alpha_long_seq)),] } else { bounds <- wb$chisq_bounds[which(dplyr::near(alpha, alpha_long_seq)),] } } if (pvalue == TRUE) { len <- length(alpha_long_seq) if (test == "F") { I1 <- wb$f_bounds$I1[1:len] } else { I1 <- wb$chisq_bounds$I1[1:len] } if (statistic %in% I1) { p_value <- alpha_long_seq[statistic == I1] } else { # linear interpolation if (max(I1) < statistic) { p_value <- 0.000001 } else if (min(I1) > statistic) { p_value <- 0.999999 } else { interp_cond <- I1 %in% c(max(I1[which(I1 < statistic)]), min(I1[which(I1 > statistic)])) interp_I1 <- I1[interp_cond] interp_alpha <- alpha_long_seq[interp_cond] p_value <- min(interp_alpha) + (max(interp_alpha)-min(interp_alpha)) * ((max(interp_I1) - statistic) / (max(interp_I1)-min(interp_I1))) } } } } method <- paste0("Bounds ", test, "-test (Wald) for no cointegration") alternative <- "Possible cointegration" rval <- list(method = method, alternative = alternative, statistic = statistic, null.value = nullvalue, data.name = dname) tab <- data.frame(statistic = statistic) if (!is.null(alpha)) { parameters <- c(bounds$I0, bounds$I1) names(parameters) <- c("Lower-bound I(0)", "Upper-bound I(1)") tab <- cbind(tab, "Lower-bound I(0)" = parameters[1], "Upper-bound I(1)" = parameters[2], alpha = alpha) rval$parameters <- parameters if ((alpha %in% c(0.1, 0.05, 0.025, 0.01)) & exact == FALSE) { PSS2001parameters <- c(bounds_pss2001$I0, bounds_pss2001$I1) names(PSS2001parameters) <- c("Lower-bound I(0)", "Upper-bound I(1)") rval$PSS2001parameters <- PSS2001parameters } } if (pvalue == TRUE) { tab <- cbind(tab, p.value = p_value) rval$p.value <- p_value } rval$tab <- tab class(rval) <- "htest" return(rval) } #' Bounds t-test for no cointegration #' #' \code{bounds_t_test} performs the t-bounds test for no cointegration #' \cite{Pesaran et al. (2001)}. It is a t-test on the parameters of a UECM #' (Unrestricted Error Correction Model). #' #' @param case An integer (1, 3 or 5) or a character string specifying whether #' the 'intercept' and/or the 'trend' have to participate in the #' short-run relationship (see section 'Cases' below). Note that the t-bounds #' test can't be applied for cases 2 and 4. #' @inheritParams bounds_f_test #' @inherit bounds_f_test return #' #' @section Hypothesis testing: \deqn{\Delta y_{t} = c_{0} + c_{1}t + #' \pi_{y}y_{t-1} + \sum_{j=1}^{k}\pi_{j}x_{j,t-1} + #' \sum_{i=1}^{p-1}\psi_{y,i}\Delta y_{t-i} + #' \sum_{j=1}^{k}\sum_{l=1}^{q_{j}-1} \psi_{j,l}\Delta x_{j,t-l} + #' \sum_{j=1}^{k}\omega_{j}\Delta x_{j,t} + \epsilon_{t}} #' \deqn{\mathbf{H_{0}:} \pi_{y} = 0} #' \deqn{\mathbf{H_{1}:} \pi_{y} \neq 0} #' #' @inheritSection bounds_f_test alpha, bounds and p-value #' @inheritSection bounds_f_test Cases #' @inheritSection bounds_f_test References #' @seealso \code{\link{bounds_f_test}} \code{\link{ardl}} \code{\link{uecm}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords htest ts #' @export #' @examples #' data(denmark) #' #' ## How to use cases under different models (regarding deterministic terms) #' #' ## Construct an ARDL(3,1,3,2) model with different deterministic terms - #' #' # Without constant #' ardl_3132_n <- ardl(LRM ~ LRY + IBO + IDE -1, data = denmark, order = c(3,1,3,2)) #' #' # With constant #' ardl_3132_c <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' #' # With constant and trend #' ardl_3132_ct <- ardl(LRM ~ LRY + IBO + IDE + trend(LRM), data = denmark, order = c(3,1,3,2)) #' #' ## t-bounds test for no level relationship (no cointegration) ---------- #' #' # For the model without a constant #' bounds_t_test(ardl_3132_n, case = 1) #' # or #' bounds_t_test(ardl_3132_n, case = "n") #' #' # For the model with a constant #' # Including the constant term in the short-run relationship (unrestricted constant) #' bounds_t_test(ardl_3132_c, case = "uc") #' # or #' bounds_t_test(ardl_3132_c, case = 3) #' #' # For the model with constant and trend #' # Including the constant term and the trend in the short-run relationship #' # (unrestricted constant and unrestricted trend) #' bounds_t_test(ardl_3132_ct, case = "ucut") #' # or #' bounds_t_test(ardl_3132_ct, case = 5) #' #' ## Note that you can't use bounds t-test for cases 2 and 4, or use a wrong model #' #' # For example, the following tests will produce an error: #' \dontrun{ #' bounds_t_test(ardl_3132_n, case = 2) #' bounds_t_test(ardl_3132_c, case = 4) #' bounds_t_test(ardl_3132_ct, case = 3) #' } #' #' ## Asymptotic p-value and critical value bounds (assuming T = 1000) ---- #' #' # Include critical value bounds for a certain level of significance #' #' # t-statistic is larger than the I(1) bound (for a=0.05) as expected (p-value < 0.05) #' btt <- bounds_t_test(ardl_3132_c, case = 3, alpha = 0.05) #' btt #' btt$tab #' #' # Traditional but less precise critical value bounds, as presented in Pesaran et al. (2001) #' btt$PSS2001parameters #' #' # t-statistic doesn't exceed the I(1) bound (for a=0.005) as p-value is greater than 0.005 #' bounds_t_test(ardl_3132_c, case = 3, alpha = 0.005) #' #' ## Exact sample size p-value and critical value bounds ----------------- #' #' # Setting a seed is suggested to allow the replication of results #' # 'R' can be increased for more accurate resutls #' #' # t-statistic is smaller than the I(1) bound (for a=0.01) as expected (p-value > 0.01) #' # Note that the exact sample p-value (0.009874) is very different than the asymptotic (0.005538) #' # It can take more than 90 seconds #' \dontrun{ #' set.seed(2020) #' bounds_t_test(ardl_3132_c, case = 3, alpha = 0.01, exact = TRUE) #' } bounds_t_test <- function(object, case, alpha = NULL, pvalue = TRUE, exact = FALSE, R = 40000, vcov_matrix = NULL) { # no visible binding for global variable NOTE solution k <- I0 <- tI0 <- tI1 <- NULL; rm(k, I0, tI0, tI1) if (isTRUE(all.equal(c("dynlm", "lm", "ardl"), class(object)))) { object <- uecm(object) vcov_matrix <- stats::vcov(object) } if (is.null(vcov_matrix)) { vcov_matrix <- stats::vcov(object) } alpha_long_seq <- c(seq(0, 0.1, by = 0.001), seq(0.11, 1, by = 0.01)) alpha_short_seq <- c(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2) if (is.null(alpha) & (pvalue == FALSE)) { stop("Specify an 'alpha' level, set 'pvalue' = TRUE or both", call. = FALSE) } if ((!is.null(alpha) & (length(alpha) != 1)) | (!is.null(alpha) & !any(dplyr::near(alpha, alpha_long_seq)))) { stop("alpha must be either 'NULL' or one of the numbers produced by the following code:\n", "c(seq(0.001, 0.1, by = 0.001), seq(0.11, 0.99, by = 0.01))", call. = FALSE) } case <- parse_case(parsed_formula = object$parsed_formula, case = case) if (!(case %in% c(1, 3, 5))) { stop("The t-bounds test applies only when 'case' is either 1, 3 or 5", call. = FALSE) } kx <- object$parsed_formula$kx T <- length(object$residuals) dname <- deparse(object$full_formula) if (exact == FALSE) { nullvalue <- c(kx, 1000) } else { nullvalue <- c(kx, T) } names(nullvalue) <- c("k", "T") indep_vars <- object$parsed_formula$x_part$var restricted_coef <- sapply(1:length(indep_vars), function(i) { ifelse(object$order[i + 1] == 0, indep_vars[i], paste0("L(", indep_vars[i], ", 1)")) }) restricted_names <- c(paste0("L(", object$parsed_formula$y_part$var, ", 1)"), restricted_coef) restricted_coef <- which(names(stats::coef(object)) %in% restricted_names) statistic <- lmtest::coeftest(object, vcov = vcov_matrix)[restricted_coef[1], "t value"] names(statistic) <- "t" if ((exact == FALSE) & (kx <= 10) & any(c(is.null(alpha), alpha %in% alpha_short_seq))) { caselatin <- ifelse(case == 1, "i", ifelse(case == 3, "iii", "v")) if (!is.null(alpha)) { if (alpha %in% c(0.1, 0.05, 0.025, 0.01)) { bounds_pss2001 <- eval(parse(text = paste0("crit_val_bounds_pss2001$t$", caselatin))) %>% dplyr::filter(alpha %in% !!alpha, k == kx) %>% dplyr::select(I0, I1) } if (alpha %in% alpha_short_seq) { bounds <- eval(parse(text = paste0("crit_val_bounds$I0$", caselatin))) %>% dplyr::filter(alpha == !!alpha, k == kx) %>% dplyr::select(tI0) %>% dplyr::rename(I0 = tI0) bounds <- cbind(bounds, eval(parse(text = paste0("crit_val_bounds$I1$", caselatin))) %>% dplyr::filter(alpha == !!alpha, k == kx) %>% dplyr::select(tI1) %>% dplyr::rename(I1 = tI1)) } } if (pvalue == TRUE) { critvalbounds <- eval(parse(text = paste0("crit_val_bounds$I1$", caselatin))) if (statistic %in% critvalbounds$tI1) { p_value <- critvalbounds %>% dplyr::filter(k == kx) %>% dplyr::filter(tI1 == statistic) %>% dplyr::select(alpha) %>% unlist() } else { if (max(dplyr::filter(critvalbounds, k == kx)$tI1) < statistic) { p_value <- 0.999999 } else if (min(dplyr::filter(critvalbounds, k == kx)$tI1) > statistic) { p_value <- 0.000001 } else { p_value <- critvalbounds %>% dplyr::filter(k == kx) %>% dplyr::filter(tI1 %in% c(max(tI1[which(tI1 < statistic)]), min(tI1[which(tI1 > statistic)]))) %>% dplyr::mutate(p_value = max(alpha) - (max(alpha)-min(alpha)) * ((max(tI1) - statistic) / (max(tI1)-min(tI1)))) %>% dplyr::select(p_value) %>% unlist() %>% .[1] } } } } else { T_asy_or_exact <- ifelse(exact == FALSE, 1000, T) tb <- t_bounds_sim(case = case, k = kx, alpha = alpha_long_seq, T = T_asy_or_exact, R = R) if (!is.null(alpha)) { bounds <- tb[which(dplyr::near(alpha, alpha_long_seq)),] } if (pvalue == TRUE) { len <- length(alpha_long_seq) I1 <- tb$I1[1:len] if (statistic %in% I1) { p_value <- alpha_long_seq[statistic == I1] } else { # linear interpolation if (max(I1) < statistic) { p_value <- 0.999999 } else if (min(I1) > statistic) { p_value <- 0.000001 } else { interp_cond <- I1 %in% c(max(I1[which(I1 < statistic)]), min(I1[which(I1 > statistic)])) interp_I1 <- I1[interp_cond] interp_alpha <- alpha_long_seq[interp_cond] p_value <- max(interp_alpha) - (max(interp_alpha)-min(interp_alpha)) * ((max(interp_I1) - statistic) / (max(interp_I1)-min(interp_I1))) } } } } method <- "Bounds t-test for no cointegration" alternative <- "Possible cointegration" rval <- list(method = method, alternative = alternative, statistic = statistic, null.value = nullvalue, data.name = dname) tab <- data.frame(statistic = statistic) if (!is.null(alpha)) { parameters <- c(bounds$I0, bounds$I1) names(parameters) <- c("Lower-bound I(0)", "Upper-bound I(1)") tab <- cbind(tab, "Lower-bound I(0)" = parameters[1], "Upper-bound I(1)" = parameters[2], alpha = alpha) rval$parameters <- parameters if ((alpha %in% c(0.1, 0.05, 0.025, 0.01)) & exact == FALSE) { PSS2001parameters <- c(bounds_pss2001$I0, bounds_pss2001$I1) names(PSS2001parameters) <- c("Lower-bound I(0)", "Upper-bound I(1)") rval$PSS2001parameters <- PSS2001parameters } } if (pvalue == TRUE) { tab <- cbind(tab, p.value = p_value) rval$p.value <- p_value } rval$tab <- tab class(rval) <- "htest" return(rval) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/bounds_test.R
#' Cointegrating equation (long-run level relationship) #' #' Creates the cointegrating equation (long-run level relationship) providing an #' 'ardl', 'uecm' or 'recm' model. #' #' @param object An object of \code{\link[base]{class}} 'ardl', 'uecm' or #' 'recm'. #' @param case An integer from 1-5 or a character string specifying whether the #' 'intercept' and/or the 'trend' have to participate in the long-run level #' relationship (cointegrating equation) (see section 'Cases' below). If the #' input object is of class 'recm', \code{case} is not needed as the model is #' already under a certain case. #' @param ... Currently unused argument. #' #' @return \code{coint_eq} returns an numeric vector containing the #' cointegrating equation. #' #' @inheritSection recm Cases #' @inheritSection recm References #' @seealso \code{\link{plot_lr}} \code{\link{ardl}} \code{\link{uecm}} \code{\link{recm}} #' \code{\link{bounds_f_test}} \code{\link{bounds_t_test}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords ts #' @export #' @examples #' data(denmark) #' library(zoo) # for cbind.zoo() #' #' ## Estimate the Cointegrating Equation of an ARDL(3,1,3,2) model ------- #' #' # From an ARDL model (under case 2, restricted constant) #' ardl_3132 <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' ce2_ardl <- coint_eq(ardl_3132, case = 2) #' #' # From an UECM (under case 2, restricted constant) #' uecm_3132 <- uecm(ardl_3132) #' ce2_uecm <- coint_eq(uecm_3132, case = 2) #' #' # From a RECM (under case 2, restricted constant) #' # Notice that if a RECM has already been estimated under a certain case, #' # the 'coint_eq()' can't be under different case, so no 'case' argument needed. #' recm_3132 <- recm(uecm_3132, case = 2) #' # The RECM is already under case 2, so the 'case' argument is no needed #' ce2_recm <- coint_eq(recm_3132) #' #' identical(ce2_ardl, ce2_uecm, ce2_recm) #' #' ## Check for a degenerate level relationship --------------------------- #' #' # The bounds F-test under both cases reject the Null Hypothesis of no level relationship. #' bounds_f_test(ardl_3132, case = 2) #' bounds_f_test(ardl_3132, case = 3) #' #' # The bounds t-test also rejects the NUll Hypothesis of no level relationship. #' bounds_t_test(ardl_3132, case = 3) #' #' # But when the constant enters the long-run equation (case 3) #' # this becomes a degenerate relationship. #' ce3_ardl <- coint_eq(ardl_3132, case = 3) #' #' plot_lr(ardl_3132, coint_eq = ce2_ardl, show.legend = TRUE) #' #' plot_lr(ardl_3132, coint_eq = ce3_ardl, show.legend = TRUE) #' plot_lr(ardl_3132, coint_eq = ce3_ardl, facets = TRUE, show.legend = TRUE) coint_eq <- function(object, case) { UseMethod("coint_eq") } #' @rdname coint_eq #' @export #' coint_eq.recm <- function(object, ...) { object$data[,object$parsed_formula$y_part$var] - stats::lag(object$data[,"ect"], 1) } #' @rdname coint_eq #' @export #' coint_eq.default <- function(object, case) { if ("recm_indicator" %in% class(object)) { recm_indicator <- TRUE class(object) <- class(object)[-4] } else { recm_indicator <- FALSE } if (isTRUE(all.equal(c("dynlm", "lm", "ardl"), class(object)))) { object <- uecm(object) } else if (!(isTRUE(all.equal(c("dynlm", "lm", "uecm"), class(object))))) { stop(paste0("no applicable for an object of class \"", paste0(class(object), collapse = '" "'), "\""), call. = FALSE) } parsed_formula <- object$parsed_formula case <- parse_case(parsed_formula = parsed_formula, case = case) order <- object$order data <- object$data lr_multipliers <- multipliers(object = object, type = "lr") lr_mult_names <- as.character(lr_multipliers$Term) lr_multipliers <- lr_multipliers$Estimate if (case == 1) { lr_multipliers <- c(0, 0, lr_multipliers) trend_var <- rep(0, nrow(data)) } else if (case == 2) { lr_multipliers <- c(lr_multipliers[1], 0, lr_multipliers[2:length(lr_multipliers)]) trend_var <- rep(0, nrow(data)) } else if (case == 3) { lr_multipliers <- c(0, 0, lr_multipliers[-1]) trend_var <- rep(0, nrow(data)) } else if (case == 4) { lr_multipliers <- c(0, lr_multipliers[-1]) is_trend_in_lr <- "trend" %in% stringr::str_sub(stringr::str_replace_all(lr_mult_names[2], " ", ""), 1, 5) is_time_in_lr <- "time(" %in% stringr::str_sub(stringr::str_replace_all(lr_mult_names[2], " ", ""), 1, 5) if (is_trend_in_lr == TRUE) { trimed_trend <- stringr::str_replace_all(lr_mult_names[2], " ", "") short_match <- stringr::str_sub(trimed_trend, nchar(trimed_trend) - 7, nchar(trimed_trend)) long_match <- stringr::str_sub(trimed_trend, nchar(trimed_trend) - 11, nchar(trimed_trend)) if (any(c("scale=FALSE)", "scale=F)") %in% c(short_match, long_match))) { trend_var <- 1:nrow(data) } else { trend_var <- (1:nrow(data)) / stats::frequency(data) } } else if (is_time_in_lr == TRUE) { trend_var <- stats::time(data) %>% as.numeric() } } else if (case == 5) { lr_multipliers <- c(0, 0, lr_multipliers[-(1:2)]) trend_var <- rep(0, nrow(data)) } unit_vector <- rep(1, nrow(data)) # create the design matrix to compute the coint_eq (cointegrating equation in levels) design_matrix <- data.frame(unit_vector, trend_var, dplyr::as_tibble(data) %>% dplyr::select(parsed_formula$x_part$var)) # create the coint_eq components (multipliers * variable) design_matrix <- lapply(1:ncol(design_matrix), function(i) design_matrix[ ,i] * lr_multipliers[i]) %>% as.data.frame() # add the dependent variable design_matrix <- data.frame(y = data[, parsed_formula$y_part$var], design_matrix) # compute the coint_eq coint_eq <- design_matrix %>% dplyr::mutate(coint_eq = rowSums(.[2:ncol(design_matrix)])) %>% dplyr::select(coint_eq) %>% .[[1]] # to inherit the proper time-series class and properties res <- cbind(data, coint_eq) res <- res[, "coint_eq"] if (recm_indicator) { coint_eq <- stats::ts(coint_eq, start = stats::start(data), frequency = stats::frequency(data)) res <- list(coint_eq = coint_eq, design_matrix = design_matrix, data= data, parsed_formula = parsed_formula, case = case, order = order) } return(res) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/coint_eq.R
#' F-test of regression's overall significance #' #' \code{f_test_custom} performs an overall significance F-test on a regression. #' It is used along with \code{\link[stats]{.lm.fit}} to get the F-statistic as #' this is about 10 times faster than extracting it from a regression using #' \code{\link[stats]{lm}}. #' #' @param dep_var A numeric vector or a matrix with one column representing the #' dependent variable. #' @param indep_vars A matrix representing the independent variables. #' @param model_res A numeric vector representing the regression's residuals. #' @param const A logical indicating whether the constant term should be #' restricted too. #' #' @return \code{f_test_custom} returns a list containing the F-statistic and #' the numerator's degrees of freedom. #' @seealso \code{\link{vcov_custom}} \code{\link{f_bounds_sim}} #' \code{\link{t_bounds_sim}}independent #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal #' f_test_custom <- function(dep_var, indep_vars, model_res, const = TRUE){ df2 <- nrow(indep_vars) - ncol(indep_vars) dep_var_mean <- mean(dep_var) mss <- sum(model_res^2) if( const == TRUE ) { df1 <- ncol(indep_vars) - 1 #number of restricted coefficients mss0 <- sum((dep_var - dep_var_mean)^2) } else { df1 <- ncol(indep_vars) #number of restricted coefficients mss0 <- sum((dep_var)^2) } return(list(f = ((mss0 - mss)/df1) / (mss/df2), df1 = df1)) } #' Variance-Covariance matrix of a regression #' #' \code{vcov_custom} creates the Variance-Covariance matrix of a regression. It #' is used instead of the \code{\link[stats]{vcov}} because the latter doesn't #' work with \code{\link[stats]{.lm.fit}}. #' #' @param indep_vars A matrix representing the independent variables. #' @param model_res A numeric vector representing the regression's residuals. #' #' @return \code{vcov_custom} returns a Variance-Covariance matrix. #' @seealso \code{\link{f_test_custom}} \code{\link{f_bounds_sim}} #' \code{\link{t_bounds_sim}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal #' vcov_custom <- function(indep_vars, model_res){ xx <- t(indep_vars) %*% indep_vars return(solve(xx) * sum(model_res ^ 2) / (nrow(indep_vars) - ncol(indep_vars))) } #' Critical value bounds stochastic simulation for Wald bounds-test for no #' cointegration #' #' \code{f_bounds_sim} simulates the critical value bounds for the Wald #' bounds-test for no cointegration \cite{Pesaran et al. (2001)} expressed both #' as F-statistics and as Chisq-statistics. #' #' @param case An integer from 1-5 specifying whether the 'intercept' and/or the #' trend' have to participate in the long-run/cointegrating #' relationship/equation (see section 'Cases' in \code{\link{bounds_f_test}}). #' @param k The number of independent variables. #' @param alpha A numeric vector between 0 and 1 indicating the significance #' level of the critical value bounds. Multiple values can be used. #' @param T An integer indicating the number of observations. #' @param R An integer indicating how many iterations will be used. Default is #' 40000. #' #' @return \code{f_bounds_sim} returns a list containing two data frames. One #' with the critical value bounds for the F-statistic and one with the #' critical value bounds for the Chisq-statistic. #' @seealso \code{\link{t_bounds_sim}} \code{\link{bounds_f_test}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal ts #' f_bounds_sim <- function(case, k, alpha, T, R = 40000) { chi2_crit_val0 <- c() chi2_crit_val1 <- c() f_crit_val0 <- c() f_crit_val1 <- c() for(i in 1:R) { # set the independent covariates y1 <- cumsum(stats::rnorm(T)) dep_var <- as.matrix(diff(y1, 1)) y1lag1 <- as.matrix(y1[1:(T-1)]) if (k != 0) { x <- matrix(stats::rnorm(T * k), T, k) x1 <- apply(x, 2, cumsum) xlag1 <- x[1:(T - 1), ] x1lag1 <- x1[1:(T - 1), ] } # set the deterministic linear trend if (case %in% c(1, 2, 3)) { # dataset already set } else if (case %in% c(4, 5)) { if (k == 0) { xlag1 <- c(t = 1:(T - 1)) x1lag1 <- c(t = 1:(T - 1)) } else { xlag1 <- cbind(t = c(1:(T - 1)), xlag1) x1lag1 <- cbind(t = c(1:(T - 1)), x1lag1) } } xlag1 <- as.matrix(xlag1) x1lag1 <- as.matrix(x1lag1) if (case == 1) { if (k == 0) { indep_vars <- y1lag1 model0 <- stats::.lm.fit(y = dep_var, x = indep_vars) # I(0) and I(1) are identical f_test <- f_test_custom(dep_var, indep_vars, model0$residuals, const = FALSE) f_crit_val0[i] <- f_crit_val1[i] <- f_test$f # F and Chisq are identical for k=0 & no c,t chi2_crit_val0[i] <- chi2_crit_val1[i] <- f_test$f / f_test$df1 } else { indep_vars <- as.matrix(cbind(xlag1, y1lag1)) model0 <- stats::.lm.fit(y = dep_var, x = indep_vars) f_test <- f_test_custom(dep_var, indep_vars, model0$residuals, const = FALSE) f_crit_val0[i] <- f_test$f chi2_crit_val0[i] <-f_test$f * f_test$df1 indep_vars <- as.matrix(cbind(x1lag1, y1lag1)) model1 <- stats::.lm.fit(y = dep_var, x = indep_vars) f_test <- f_test_custom(dep_var, indep_vars, model1$residuals, const = FALSE) f_crit_val1[i] <- f_test$f chi2_crit_val1[i] <- f_test$f * f_test$df1 } } else if (case %in% c(2, 5)) { if (k == 0) { if (case == 2) { indep_vars <- as.matrix(cbind(1, y1lag1)) model0 <- stats::.lm.fit(y = dep_var, x = indep_vars) # I(0) and I(1) are identical w0 <- w1 <- aod::wald.test(b = stats::coef(model0), Sigma = vcov_custom(indep_vars, model0$residuals), Terms = 1:(k + 2))$result } else if (case == 5) { indep_vars <- as.matrix(cbind(1, xlag1, y1lag1)) model0 <- stats::.lm.fit(y = dep_var, x = indep_vars) # I(0) and I(1) are identical w0 <- w1 <- aod::wald.test(b = stats::coef(model0), Sigma = vcov_custom(indep_vars, model0$residuals), Terms = 3:(k + 3))$result } } else { indep_vars0 <- as.matrix(cbind(1, xlag1, y1lag1)) model0 <- stats::.lm.fit(y = dep_var, x = indep_vars0) indep_vars1 <- as.matrix(cbind(1, x1lag1, y1lag1)) model1 <- stats::.lm.fit(y = dep_var, x = indep_vars1) if (case == 2) { w0 <- aod::wald.test(b = stats::coef(model0), Sigma = vcov_custom(indep_vars0, model0$residuals), Terms = 1:(k + 2))$result w1 <- aod::wald.test(b = stats::coef(model1), Sigma = vcov_custom(indep_vars1, model1$residuals), Terms = 1:(k + 2))$result } else if (case == 5) { w0 <- aod::wald.test(b = stats::coef(model0), Sigma = vcov_custom(indep_vars0, model0$residuals), Terms = 3:(k + 3))$result w1 <- aod::wald.test(b = stats::coef(model1), Sigma = vcov_custom(indep_vars1, model1$residuals), Terms = 3:(k + 3))$result } } chi2_crit_val0[i] <- w0[[1]][1] chi2_crit_val1[i] <- w1[[1]][1] f_crit_val0[i] <- w0[[1]][1] / w0[[1]][2] f_crit_val1[i] <- w1[[1]][1] / w1[[1]][2] } else if (case %in% c(3, 4)) { if (k == 0) { if (case == 3) { indep_vars <- as.matrix(cbind(1, y1lag1)) model0 <- stats::.lm.fit(y = dep_var, x = indep_vars) # I(0) and I(1) are identical } else if (case == 4) { indep_vars <- as.matrix(cbind(1, xlag1, y1lag1)) model0 <- stats::.lm.fit(y = dep_var, x = indep_vars) # I(0) and I(1) are identical } F0 <- F1 <- f_test_custom(dep_var, indep_vars, model0$residuals, const = TRUE) } else { indep_vars0 <- as.matrix(cbind(1, xlag1, y1lag1)) model0 <- stats::.lm.fit(y = dep_var, x = indep_vars0) indep_vars1 <- as.matrix(cbind(1, x1lag1, y1lag1)) model1 <- stats::.lm.fit(y = dep_var, x = indep_vars1) F0 <- f_test_custom(dep_var, indep_vars0, model0$residuals, const = TRUE) F1 <- f_test_custom(dep_var, indep_vars1, model1$residuals, const = TRUE) } chi2_crit_val0[i] <- F0$f * F0$df1 chi2_crit_val1[i] <- F1$f * F1$df1 f_crit_val0[i] <- F0$f f_crit_val1[i] <- F1$f } } chi2_LB <- stats::quantile(chi2_crit_val0, 1 - alpha) chi2_UB <- stats::quantile(chi2_crit_val1, 1 - alpha) chi2_bounds <- cbind(chi2_LB, chi2_UB) f_LB <- stats::quantile(f_crit_val0, 1 - alpha) f_UB <- stats::quantile(f_crit_val1, 1 - alpha) f_bounds <- cbind(f_LB, f_UB) chi2_mean0 <- mean(chi2_crit_val0) chi2_mean1 <- mean(chi2_crit_val1) chi2_bounds <- rbind(chi2_bounds, c(chi2_mean0, chi2_mean1)) chi2_var0 <- stats::var(chi2_crit_val0) chi2_var1 <- stats::var(chi2_crit_val1) chi2_bounds <- rbind(chi2_bounds, c(chi2_var0, chi2_var1)) f_mean0 <- mean(f_crit_val0) f_mean1 <- mean(f_crit_val1) f_bounds <- rbind(f_bounds, c(f_mean0, f_mean1)) f_var0 <- stats::var(f_crit_val0) f_var1 <- stats::var(f_crit_val1) f_bounds <- rbind(f_bounds, c(f_var0, f_var1)) chi2_bounds <- as.data.frame(chi2_bounds) f_bounds <- as.data.frame(f_bounds) colnames(chi2_bounds) <- c('I0', 'I1') colnames(f_bounds) <- c('I0', 'I1') rownames(chi2_bounds) <- c(paste0("a", alpha), 'Mean', 'Variance') rownames(f_bounds) <- c(paste0("a", alpha), 'Mean', 'Variance') return(list(f_bounds = f_bounds, chisq_bounds = chi2_bounds)) } #' Critical value bounds stochastic simulation for t-bounds test for no #' cointegration #' #' \code{t_bounds_sim} simulates the critical value bounds for the t-bounds test #' for no cointegration \cite{Pesaran et al. (2001)}. #' #' @param case An integer (1, 3 or 5) specifying whether the 'intercept' and/or #' the trend' have to participate in the short-run relationship (see section #' 'Cases' in \code{\link{bounds_t_test}}). #' @param k The number of independent variables. #' @param alpha A numeric vector between 0 and 1 indicating the significance #' level of the critical value bounds. Multiple values can be used. #' @param T An integer indicating the number of observations. #' @param R An integer indicating how many iterations will be used. Default is #' 40000. #' #' @return \code{t_bounds_sim} returns a data frame with the critical value #' bounds for the t-statistic. #' @seealso \code{\link{f_bounds_sim}} \code{\link{bounds_t_test}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal ts #' t_bounds_sim <- function(case, k, alpha, T, R = 40000) { t_crit_val0 <- c() t_crit_val1 <- c() if (case %in% c(2,4)) { stop("The t-statistic bounds test is not applicable under case II and case IV", call. = FALSE) } else if (case %in% c(1)) { restriction_place <- k+1 } else if (case %in% c(3)) { restriction_place <- k+2 } else if (case %in% c(5)) { restriction_place <- k+3 } for(i in 1:R) { # set the independent covariates y1 <- cumsum(stats::rnorm(T)) dy1 <- diff(y1, 1) y1lag1 <- y1[1:(T-1)] if (k != 0) { x <- matrix(stats::rnorm(T * k), T, k) x1 <- apply(x, 2, cumsum) xlag1 <- x[1:(T - 1), ] x1lag1 <- x1[1:(T - 1), ] } # set the deterministic linear trend if (case %in% c(1,3)) { # dataset already set } else if (case %in% c(5)) { if (k == 0) { xlag1 <- c(t = 1:(T - 1)) x1lag1 <- c(t = 1:(T - 1)) } else { xlag1 <- cbind(t = c(1:(T - 1)), xlag1) x1lag1 <- cbind(t = c(1:(T - 1)), x1lag1) } } if (case == 1) { if (k == 0) { t_crit_val0[i] <- summary(stats::lm(dy1 ~ y1lag1 -1))$coefficients[restriction_place, 3] # I(0) and I(1) are identical t_crit_val1[i] <- t_crit_val0[i] } else { t_crit_val0[i] <- summary(stats::lm(dy1 ~ xlag1 + y1lag1 -1))$coefficients[restriction_place, 3] t_crit_val1[i] <- summary(stats::lm(dy1 ~ x1lag1 + y1lag1 -1))$coefficients[restriction_place, 3] } } else if (case %in% c(5)) { if (k == 0) { t_crit_val0[i] <- summary(stats::lm(dy1 ~ xlag1 + y1lag1))$coefficients[restriction_place, 3] # I(0) and I(1) are identical t_crit_val1[i] <- t_crit_val0[i] } else { t_crit_val0[i] <- summary(stats::lm(dy1 ~ xlag1 + y1lag1))$coefficients[restriction_place, 3] t_crit_val1[i] <- summary(stats::lm(dy1 ~ x1lag1 + y1lag1))$coefficients[restriction_place, 3] } } else if (case %in% c(3)) { if (k == 0) { t_crit_val0[i] <- summary(stats::lm(dy1 ~ y1lag1))$coefficients[restriction_place, 3] # I(0) and I(1) are identical t_crit_val1[i] <- t_crit_val0[i] } else { t_crit_val0[i] <- summary(stats::lm(dy1 ~ xlag1 + y1lag1))$coefficients[restriction_place, 3] t_crit_val1[i] <- summary(stats::lm(dy1 ~ x1lag1 + y1lag1))$coefficients[restriction_place, 3] } } } t_LB <- stats::quantile(t_crit_val0, alpha) t_UB <- stats::quantile(t_crit_val1, alpha) t_bounds <- cbind(t_LB, t_UB) t_mean0 <- mean(t_crit_val0) t_mean1 <- mean(t_crit_val1) t_bounds <- rbind(t_bounds, c(t_mean0, t_mean1)) t_var0 <- stats::var(t_crit_val0) t_var1 <- stats::var(t_crit_val1) t_bounds <- rbind(t_bounds, c(t_var0, t_var1)) t_bounds <- as.data.frame(t_bounds) colnames(t_bounds) <- c('I0', 'I1') rownames(t_bounds) <- c(paste0("a", alpha), 'Mean', 'Variance') return(t_bounds) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/crit_val_bounds_sim.R
#' The Danish data on money income prices and interest rates #' #' This data set contains the series used by S. Johansen and K. Juselius for #' estimating a money demand function of Denmark. #' #' An object of class "zooreg" "zoo". #' #' @format A time-series object with 55 rows and 5 variables. Time period from 1974:Q1 #' until 1987:Q3. #' \describe{ #' \item{LRM}{logarithm of real money, M2} #' \item{LRY}{logarithm of real income} #' \item{LPY}{logarithm of price deflator} #' \item{IBO}{bond rate} #' \item{IDE}{bank deposit rate} #' } #' @source \url{https://onlinelibrary.wiley.com/doi/10.1111/j.1468-0084.1990.mp52002003.x} #' @references Johansen, S. and Juselius, K. (1990), Maximum Likelihood #' Estimation and Inference on Cointegration -- with Applications to the Demand #' for Money, \emph{Oxford Bulletin of Economics and Statistics}, \bold{52, 2}, #' 169--210. #' @keywords datasets "denmark" #' The UK earnings equation data from Pesaran et al. (2001) #' #' This data set contains the series used by Pesaran et al. (2001) for estimating the UK earnings equation. #' The clean format of the data retrieved from the Data Archive of Natsiopoulos and Tzeremes (2022). #' #' An object of class "zooreg" "zoo". #' #' @format A time-series object with 112 rows and 7 variables. Time period from 1970:Q1 #' until 1997:Q4. #' \describe{ #' \item{w}{real wage} #' \item{Prod}{labor productivity} #' \item{UR}{unemployment rate} #' \item{Wedge}{wedge effect} #' \item{Union}{union power} #' \item{D7475}{income policies 1974:Q1-1975:Q4} #' \item{D7579}{income policies 1975:Q1-1979:Q4} #' } #' @source \url{http://qed.econ.queensu.ca/jae/datasets/pesaran001/} #' \url{http://qed.econ.queensu.ca/jae/datasets/natsiopoulos001/} #' @references M. Hashem Pesaran, Richard J. Smith, and Yongcheol Shin, (2001), "Bounds Testing #' Approaches to the Analysis of Level Relationships", \emph{Journal of Applied #' Econometrics}, \bold{16, 3}, 289--326. #' #' Kleanthis Natsiopoulos and Nickolaos G. Tzeremes, (2022), "ARDL bounds test for #' Cointegration: Replicating the Pesaran et al. (2001) Results for the #' UK Earnings Equation Using R", #' \emph{Journal of Applied Econometrics}, \bold{37, 5}, 1079--1090. #' \doi{10.1002/jae.2919} #' @keywords datasets "PSS2001" #' The UK earnings equation data from Natsiopoulos and Tzeremes (2022) #' #' This data set contains the series used by Natsiopoulos and Tzeremes (2022) for re-estimating the UK earnings equation. #' The clean format of the data retrieved from the Data Archive of Natsiopoulos and Tzeremes (2022). #' #' An object of class "zooreg" "zoo". #' #' @format A time-series object with 196 rows and 9 variables. Time period from 1971:Q1 #' until 2019:Q4. #' \describe{ #' \item{time}{time variable} #' \item{w}{real wage} #' \item{Prod}{labor productivity} #' \item{UR}{unemployment rate} #' \item{Wedge}{wedge effect} #' \item{Union}{union power} #' \item{D7475}{income policies 1974:Q1-1975:Q4} #' \item{D7579}{income policies 1975:Q1-1979:Q4} #' \item{UnionR}{union membership} #' } #' @source \url{http://qed.econ.queensu.ca/jae/datasets/natsiopoulos001/} #' @references Kleanthis Natsiopoulos and Nickolaos G. Tzeremes, (2022), "ARDL bounds test for #' Cointegration: Replicating the Pesaran et al. (2001) Results for the UK Earnings Equation Using R", #' \emph{Journal of Applied Econometrics}, \bold{37, 5}, 1079--1090. #' \doi{10.1002/jae.2919} #' @keywords datasets "NT2022"
/scratch/gouwar.j/cran-all/cranData/ARDL/R/data.R
#' Delta method #' #' An internal generic function, customized for approximating the standard #' errors of the estimated multipliers. #' #' The function invokes two different \code{\link[utils]{methods}}, one for #' objects of \code{\link[base]{class}} 'ardl' and one for objects of #' \code{class} 'uecm'. This is because of the different (but equivalent) #' transformation functions that are used for each class/model ('ardl' and #' 'uecm') to estimate the multipliers. #' #' @inheritParams multipliers #' #' @return \code{delta_method} returns a numeric vector of the same length as #' the number of the independent variables (excluding the fixed ones) in the #' model. #' #' @seealso \code{\link{multipliers}}, \code{\link{ardl}}, \code{\link{uecm}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal #' delta_method <- function(object, vcov_matrix = NULL) { UseMethod("delta_method") } #' @rdname delta_method #' delta_method.ardl <- function(object, vcov_matrix = NULL) { if (is.null(vcov_matrix)) vcov_matrix <- stats::vcov(object) estmean <- stats::coef(object) estvar <- vcov_matrix w_part <- object$parsed_formula$w_part$var kx <- object$parsed_formula$kx p <- object$order[1] q <- object$order[-1] m <- 1 : (sum(object$order) + length(object$order) - 1) # case 2, 3 # keep first this case to avoid w_part[1] in case when length(w_part)==0 if ((length(w_part) == 0) | (length(w_part) == 2) ) { # 2nd statement for the case with only trend place_y <- 2:(p+1) m <- c(m, m[length(m)] + 1) bins_w <- 1 # case 1 } else if ((w_part[1] == "- 1") & length(w_part) == 1) { place_y <- 1:p bins_w <- c() # case 4, 5 } else if (length(w_part) == 1) { place_y <- 3:(p+2) m <- 1:(m[length(m)] + 2) bins_w <- c(1, 1) } place_x <- m[!(m %in% place_y)] bin_breaks <- c(0, cumsum(c(bins_w, q + 1))) pi_yx.x <- lapply(1:(length(bin_breaks) - 1), function(i) { paste0("~ (", paste0("x", place_x[(bin_breaks[i] + 1) : (bin_breaks[i + 1])], collapse = " + "), ")") }) pi_yy <- paste0(" / (1 - (", paste0("x", place_y, collapse = " + "), "))") restrictions <- paste0(pi_yx.x, pi_yy) lr_se <- sapply(1:length(restrictions), function(i) { msm::deltamethod(stats::formula(restrictions[i]), estmean, estvar) }) return(lr_se) } #' @rdname delta_method #' delta_method.uecm <- function(object, vcov_matrix = NULL) { if (is.null(vcov_matrix)) vcov_matrix <- stats::vcov(object) estmean <- stats::coef(object) estvar <- vcov_matrix w_part <- object$parsed_formula$w_part$var kx <- object$parsed_formula$kx # case 2, 3 # keep first this case to avoid w_part[1] in case when length(w_part)==0 if ((length(w_part) == 0) | (length(w_part) == 2) ) { # 2nd statement for the case with only trend restrictions <- paste0("~ x", c(1, 3:(kx + 2)), " / x2") # case 1 } else if ((w_part[1] == "- 1") & length(w_part) == 1) { restrictions <- paste0("~ x", c(2:(kx + 1)), " / x1") # case 4, 5 } else if (length(w_part) == 1) { restrictions <- paste0("~ x", c(1, 2, 4:(kx + 3)), " / x3") } lr_se <- sapply(1:length(restrictions), function(i) { msm::deltamethod(stats::formula(restrictions[i]), estmean, estvar) }) return(lr_se) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/delta_method.R
#' ARDL formula specification builder #' #' It creates the ARDL specification according to the given "formula" and their #' corresponding "orders". #' #' @param parsed_formula A list containing the formula parts as returned from #' \code{\link{parse_formula}}. #' @param order A numeric vector with the ARDL order as returned from #' \code{\link[=parse_order]{parse_order(restriction = FALSE)}}. #' #' @return \code{build_ardl_formula} returns a list containing the full formula #' and the independent and dependent parts of the formula separated. The full #' formula is ready to be used as input in the \code{dynlm} function. #' #' @seealso \code{\link{build_uecm_formula}}, \code{\link{build_recm_formula}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal #' build_ardl_formula <- function(parsed_formula, order) { ardl_formula <- list() ardl_formula$dep_part <- paste0(parsed_formula$y_part$var, " ~ ") ardl_formula$indep_part$w$var <- c() if (length(parsed_formula$w_part$var) > 0) { ardl_formula$indep_part$w <- paste0(parsed_formula$w_part$var, " + ", collapse = "") } ardl_formula$indep_part$y <- c() ardl_formula$indep_part$y <- paste0("L(", parsed_formula$y_part$var, ", ", 1:order[1], ")", " + ", collapse = "") ardl_formula$indep_part$x <- c() for(i in 1:parsed_formula$kx) { if (order[i + 1] != 0) { temp <- paste0(parsed_formula$x_part$var[i], " + ", paste0("L(", parsed_formula$x_part$var[i], ", ", 1:order[i + 1], ")", " + ", collapse = ""), collapse = "") } else { temp <- paste0(parsed_formula$x_part$var[i], " + ") } ardl_formula$indep_part$x <- paste(ardl_formula$indep_part$x, temp, collapse = "") } rm(temp) if (parsed_formula$kfixed > 0) { ardl_formula$indep_part$fixed <- paste0(parsed_formula$fixed_part$var, " + ", collapse = "") } else { ardl_formula$indep_part$fixed <- "" } ardl_formula$full <- paste0(ardl_formula$dep_part, ardl_formula$indep_part$w, ardl_formula$indep_part$y, ardl_formula$indep_part$x, ardl_formula$indep_part$fixed, collapse = "") %>% stringr::str_trim(side = "both") ardl_formula$full <- ifelse(stringr::str_sub(ardl_formula$full, start = stringr::str_count(ardl_formula$full), end = stringr::str_count(ardl_formula$full)) == "+", stringr::str_sub(ardl_formula$full, start = 1, end = stringr::str_count(ardl_formula$full) -1 ), ardl_formula$full) %>% stringr::str_trim(side = "both") return_list <- list(dep_part = ardl_formula$dep_part, indep_part = ardl_formula$indep_part, full = ardl_formula$full) return(return_list) } #' UECM formula specification builder #' #' It creates the UECM (Unrestricted Error Correction Model) specification #' according to the given "formula" and the corresponding "order" of the #' underlying ARDL. #' #' @param order A numeric vector with the underlying ARDL order as returned from #' \code{\link[=parse_order]{parse_order(restriction = FALSE)}}. #' @inheritParams build_ardl_formula #' #' @return \code{build_uecm_formula} returns a list containing the full formula #' and the independent and dependent parts of the formula separated. The full #' formula is ready to be used as input in the \code{dynlm} function. #' #' @seealso \code{\link{build_ardl_formula}}, \code{\link{build_recm_formula}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal #' build_uecm_formula <- function(parsed_formula, order) { uecm_formula <- list() uecm_formula$dep_part <- paste0("d(", parsed_formula$y_part$var, ")", " ~ ") uecm_formula$indep_part$w <- c() if (length(parsed_formula$w_part$var) > 0) { uecm_formula$indep_part$w <- paste0(parsed_formula$w_part$var, " + ", collapse = "") } uecm_formula$indep_part$levels$y <- c() uecm_formula$indep_part$levels$y <- paste0("L(", parsed_formula$y_part$var, ", ", 1, ")", " + ", collapse = "") uecm_formula$indep_part$levels$x <- c() uecm_formula$indep_part$diff$x <- c() for(i in 1:parsed_formula$kx){ if (order[i + 1] == 0) { temp <- paste0(parsed_formula$x_part$var[i], " + ") temp1 <- "" } else if (order[i + 1] == 1) { temp <- paste0("L(", parsed_formula$x_part$var[i], ", ", 1, ")", " + ", collapse = "") temp1 <- paste0("d(", parsed_formula$x_part$var[i], ") + ", collapse = "") } else { temp <- paste0("L(", parsed_formula$x_part$var[i], ", ", 1, ")", " + ", collapse = "") temp1 <- paste0("d(", parsed_formula$x_part$var[i], ") + ", paste0("d(L(", parsed_formula$x_part$var[i], ", ", 1:(order[i + 1] - 1), "))", " + ", collapse = ""), collapse = "") } uecm_formula$indep_part$levels$x <- paste(uecm_formula$indep_part$levels$x, temp, collapse = "") uecm_formula$indep_part$diff$x <- paste(uecm_formula$indep_part$diff$x, temp1, collapse = "") } rm(temp); rm(temp1) uecm_formula$indep_part$diff$y <- c() if (order[1] == 1) { uecm_formula$indep_part$diff$y <- "" } else { # AR(0) models are discarded anyway uecm_formula$indep_part$diff$y <- paste0("d(L(", parsed_formula$y_part$var, ", ", 1:(order[1] - 1), "))", " + ", collapse = "") } if (parsed_formula$kfixed > 0) { uecm_formula$indep_part$fixed <- paste0(parsed_formula$fixed_part$var, " + ", collapse = "") } else { uecm_formula$indep_part$fixed <- "" } uecm_formula$full <- paste0(uecm_formula$dep_part, uecm_formula$indep_part$w, uecm_formula$indep_part$levels$y, uecm_formula$indep_part$levels$x, uecm_formula$indep_part$diff$y, uecm_formula$indep_part$diff$x, uecm_formula$indep_part$fixed, collapse = "") %>% stringr::str_trim(side = "both") uecm_formula$full <- ifelse(stringr::str_sub(uecm_formula$full, start = stringr::str_count(uecm_formula$full), end = stringr::str_count(uecm_formula$full)) == "+", stringr::str_sub(uecm_formula$full, start = 1, end = stringr::str_count(uecm_formula$full) -1 ), uecm_formula$full) %>% stringr::str_trim(side = "both") return_list <- list(dep_part = uecm_formula$dep_part, indep_part = uecm_formula$indep_part, full = uecm_formula$full) return(return_list) } #' RECM formula specification builder #' #' It creates the RECM (Restricted Error Correction Model) specification #' according to the given "formula" and the corresponding "order" of the #' underlying ARDL. #' #' @param case An integer from 1-5 or a character string specifying whether the #' 'intercept' or the 'trend' have to participate in the #' long-run/cointegrating relationship/equation (see 'Details'). #' @inheritParams build_uecm_formula #' #' @return \code{build_recm_formula} returns a list containing the full formula #' and the independent and dependent parts of the formula separated. The full #' formula is ready to be used as input in the \code{dynlm} function providing #' also the 'ect' (error correction term) to the data. #' #' @seealso \code{\link{build_ardl_formula}}, \code{\link{build_uecm_formula}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal #' build_recm_formula <- function(parsed_formula, order, case) { recm_formula <- list() recm_formula$dep_part <- paste0("d(", parsed_formula$y_part$var, ")", " ~ ") recm_formula$indep_part$w <- c() recm_formula$indep_part$w <- paste0(parsed_formula$w_part$var, " + ", collapse = "") if (case %in% c(1, 2)) { recm_formula$indep_part$w <- "- 1 + " } else if (case %in% c(3, 4)) { recm_formula$indep_part$w <- "" } else if (case == 5) { recm_formula$indep_part$w <- paste0(parsed_formula$w_part$var, " + ", collapse = "") } recm_formula$indep_part$diff$x <- c() for(i in 1:parsed_formula$kx){ if (order[i + 1] == 0) { temp <- "" } else if (order[i + 1] == 1) { temp <- paste0("d(", parsed_formula$x_part$var[i], ") + ", collapse = "") } else { temp <- paste0("d(", parsed_formula$x_part$var[i], ") + ", paste0("d(L(", parsed_formula$x_part$var[i], ", ", 1:(order[i + 1] - 1), "))", " + ", collapse = ""), collapse = "") } recm_formula$indep_part$diff$x <- paste(recm_formula$indep_part$diff$x, temp, collapse = "") } rm(temp) recm_formula$indep_part$diff$y <- c() if (order[1] == 1) { recm_formula$indep_part$diff$y <- "" } else { # AR(0) models are discarded anyway recm_formula$indep_part$diff$y <- paste0("d(L(", parsed_formula$y_part$var, ", ", 1:(order[1] - 1), "))", " + ", collapse = "") } if (parsed_formula$kfixed > 0) { recm_formula$indep_part$fixed <- paste0(parsed_formula$fixed_part$var, " + ", collapse = "") } else { recm_formula$indep_part$fixed <- "" } recm_formula$indep_part$ect <- "ect" recm_formula$full <- paste0(recm_formula$dep_part, recm_formula$indep_part$w, recm_formula$indep_part$diff$y, recm_formula$indep_part$diff$x, recm_formula$indep_part$fixed, recm_formula$indep_part$ect, collapse = "") %>% stringr::str_trim(side = "both") recm_formula$full <- ifelse(stringr::str_sub(recm_formula$full, start = stringr::str_count(recm_formula$full), end = stringr::str_count(recm_formula$full)) == "+", stringr::str_sub(recm_formula$full, start = 1, end = stringr::str_count(recm_formula$full) -1 ), recm_formula$full) %>% stringr::str_trim(side = "both") return_list <- list(dep_part = recm_formula$dep_part, indep_part = recm_formula$indep_part, full = recm_formula$full) return(return_list) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/formula_builders.R
AIC_pss <- function(model){ # maximized log-likelihood value of the model LLp <- stats::logLik(model) # number of freely estimated coefficients sp <- length(model$coefficients) LLp - sp }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/helper-functions.R
#' Multipliers estimation #' #' \code{multipliers} is a generic function used to estimate short-run (impact), #' delay, interim and long-run (total) multipliers, accompanied by their #' corresponding standard errors, t-statistics and p-values. #' #' The function invokes two different \code{\link[utils]{methods}}, one for #' objects of \code{\link[base]{class}} 'ardl' and one for objects of #' \code{class} 'uecm'. This is because of the different (but equivalent) #' transformation functions that are used for each class/model ('ardl' and #' 'uecm') to estimate the multipliers. #' #' \code{type = 0} is equivalent to \code{type = "sr"}. #' #' Note that the interim multipliers are the cumulative sum of the delays, and #' that the sum of the interim multipliers (for long enough periods) and thus #' a distant enough interim multiplier match the long-run multipliers. #' #' The delay (interim) multiplier can be interpreted as the effect on the #' dependent variable in period t+s, resulting from an instant (sustained) shock #' to an independent variable in period t. #' #' The delta method is used for approximating the standard errors (and thus the #' t-statistics and p-values) of the estimated long-run and delay multipliers. #' #' @param object An object of \code{\link[base]{class}} 'ardl' or 'uecm'. #' @param type A character string describing the type of multipliers. Use "lr" #' for long-run (total) multipliers (default), "sr" or 0 for short-run (impact) #' multipliers or an integer between 1 and 200 for delay and interim multipliers. #' @param vcov_matrix The estimated covariance matrix of the random variable #' that the transformation function uses to estimate the standard errors (and #' so the t-statistics and p-values) of the multipliers. The default is #' \code{vcov(object)} (when \code{vcov_matrix = NULL}), but other estimations #' of the covariance matrix of the regression's estimated coefficients can #' also be used (e.g., using \code{\link[sandwich]{vcovHC}} or #' \code{\link[sandwich]{vcovHAC}}). #' @param se A logical indicating whether you want standard errors for delay #' multipliers to be provided. The default is FALSE. Note that this parameter #' does not refer to the standard errors for the long-run and short-run #' multipliers, for which are always calculated. IMPORTANT: Calculating standard #' errors for long periods of delays may cause your computer to run out of #' memory and terminate your R session, losing important unsaved work. As a rule #' of thumb, try not to exceed \code{type = 19} when \code{se = TRUE}. #' #' @return \code{multipliers} returns (for long and short run multipliers) a #' data.frame containing the independent variables (including possibly #' existing intercept or trend and excluding the fixed variables) and their #' corresponding standard errors, t-statistics and p-values. For delay and #' interim multipliers it returns a list with a data.frame for each variable, #' containing the delay and interim multipliers for each period. #' #' @section Mathematical Formula: #' #' \strong{Short-Run Multipliers:} #' \describe{ #' \item{As derived from an ARDL:}{} #' } #' \deqn{\frac{\partial y_{t}}{\partial x_{j,t}} = b_{j,0} \;\;\;\;\; j \in \{1,\dots,k\}} #' #' \describe{ #' \item{As derived from an Unrestricted ECM:}{} #' } #' \deqn{\frac{\partial y_{t}}{\partial x_{j,t}} = \omega_{j} \;\;\;\;\; j \in \{1,\dots,k\}} #' #' \describe{ #' \item{Constant and Linear Trend:}{} #' } #' \deqn{c_{0}} #' \deqn{c_{1}} #' #' \strong{Delay & Interim Multipliers:} #' \describe{ #' \item{As derived from an ARDL:}{} #' } #' \deqn{Delay_{x_{j},s} = \frac{\partial y_{t+s}}{\partial x_{j,t}} = b_{j,s} + \sum_{i=1}^{min\{p,s\}} b_{y,i} \frac{\partial y_{t+(s-i)}}{\partial x_{j,t}} \;\;\;\;\; b_{j,s} = 0 \;\; \forall \;\; s > q} #' \deqn{Interim_{x_{j},s} = \sum_{i=0}^{s} Delay_{x_{j},s}} #' #' \describe{ #' \item{Constant and Linear Trend:}{} #' } #' \deqn{Delay_{intercept,s} = c_{0} + \sum_{i=1}^{min\{p,s\}} b_{y,i} Delay_{intercept,s-i} \;\;\;\;\; c_{0} = 0 \;\; \forall \;\; s \neq 0} #' \deqn{Interim_{intercept,s} = \sum_{i=0}^{s} Delay_{intercept,s}} #' \deqn{Delay_{trend,s} = c_{1} + \sum_{i=1}^{min\{p,s\}} b_{y,i} Delay_{trend,s-i} \;\;\;\;\; c_{1} = 0 \;\; \forall \;\; s \neq 0} #' \deqn{Interim_{trend,s} = \sum_{i=0}^{s} Delay_{trend,s}} #' #' \strong{Long-Run Multipliers:} #' \describe{ #' \item{As derived from an ARDL:}{} #' } #' \deqn{\frac{\partial y_{t+\infty}}{\partial x_{j,t}} = \theta_{j} = \frac{\sum_{l=0}^{q_{j}}b_{j,l}}{1-\sum_{i=1}^{p}b_{y,i}} \;\;\;\;\; j \in \{1,\dots,k\}} #' \describe{ #' \item{Constant and Linear Trend:}{} #' } #' \deqn{\mu = \frac{c_{0}}{1-\sum_{i=1}^{p}b_{y,i}}} #' \deqn{\delta = \frac{c_{1}}{1-\sum_{i=1}^{p}b_{y,i}}} #' #' \describe{ #' \item{As derived from an Unrestricted ECM:}{} #' } #' \deqn{\frac{\partial y_{t+\infty}}{\partial x_{j,t}} = \theta_{j} = \frac{\pi_{j}}{-\pi_{y}} \;\;\;\;\; j \in \{1,\dots,k\}} #' \describe{ #' \item{Constant and Linear Trend:}{} #' } #' \deqn{\mu = \frac{c_{0}}{-\pi_{y}}} #' \deqn{\delta = \frac{c_{1}}{-\pi_{y}}} #' #' @seealso \code{\link{ardl}}, \code{\link{uecm}}, \code{\link{plot_delay}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords math #' @export #' @examples #' data(denmark) #' #' ## Estimate the long-run multipliers of an ARDL(3,1,3,2) model --------- #' #' # From an ARDL model #' ardl_3132 <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' mult_ardl <- multipliers(ardl_3132) #' mult_ardl #' #' # From an UECM #' uecm_3132 <- uecm(ardl_3132) #' mult_uecm <- multipliers(uecm_3132) #' mult_uecm #' #' all.equal(mult_ardl, mult_uecm) #' #' #' ## Estimate the short-run multipliers of an ARDL(3,1,3,2) model -------- #' #' mult_sr <- multipliers(uecm_3132, type = "sr") #' mult_0 <- multipliers(uecm_3132, type = 0) #' all.equal(mult_sr, mult_0) #' #' #' ## Estimate the delay & interim multipliers of an ARDL(3,1,3,2) model -- #' #' mult_lr <- multipliers(uecm_3132, type = "lr") #' mult_inter80 <- multipliers(uecm_3132, type = 80) #' #' mult_lr #' sum(mult_inter80$`(Intercept)`$Delay) #' mult_inter80$`(Intercept)`$Interim[nrow(mult_inter80$`(Intercept)`)] #' sum(mult_inter80$LRY$Delay) #' mult_inter80$LRY$Interim[nrow(mult_inter80$LRY)] #' sum(mult_inter80$IBO$Delay) #' mult_inter80$IBO$Interim[nrow(mult_inter80$IBO)] #' sum(mult_inter80$IDE$Delay) #' mult_inter80$IDE$Interim[nrow(mult_inter80$IDE)] #' plot(mult_inter80$LRY$Delay, type='l') #' plot(mult_inter80$LRY$Interim, type='l') #' #' mult_inter12 <- multipliers(uecm_3132, type = 12, se = TRUE) #' plot_delay(mult_inter12, interval = 0.95) multipliers <- function(object, type = "lr", vcov_matrix = NULL, se = FALSE) { UseMethod("multipliers") } #' @rdname multipliers #' @export #' multipliers.ardl <- function(object, type = "lr", vcov_matrix = NULL, se = FALSE) { # no visible binding for global variable NOTE solution group_id <- coeff <- sums <- NULL; rm(group_id, coeff, sums) if (!(type %in% c("lr", "sr", 0:200))) { stop("'type' should be one of 'lr', 'sr' or a number between 0 and 200", call. = FALSE) } if (is.null(vcov_matrix)) vcov_matrix <- stats::vcov(object) kw <- object$parsed_formula$kw kx <- object$parsed_formula$kx kfixed <- object$parsed_formula$kfixed objcoef <- object$coefficients orders_x <- object$order[-1] from <- kw + 1 to <- kw + object$order[1] if (type == "lr") { # create table without the y in levels and fixed x_table <- dplyr::tibble(name = names(objcoef), coeff = objcoef) %>% dplyr::slice(-(from:to)) %>% dplyr::slice(1:(dplyr::n() - object$parsed_formula$kfixed)) # create table only with y in levels y_table <- dplyr::tibble(objcoef) %>% dplyr::slice(from:to) # create groups to sum by group x_table <- x_table %>% dplyr::mutate(group_id = if (kw != 0) { c(1:kw, rep(from:(from + object$parsed_formula$kx - 1), orders_x + 1)) } else { c(rep(from:(from + object$parsed_formula$kx - 1), orders_x + 1)) }) # create the sums of levels of x and trends temp <- x_table %>% dplyr::group_by(group_id) %>% dplyr::summarise(sums = sum(coeff)) %>% dplyr::select(sums) # calculate coefficients of multipliers multipliers_coef <- (temp / (1 - sum(y_table)))[ ,1] if (kw != 0) { names(multipliers_coef) <- c(names(objcoef)[1:kw], object$parsed_formula$x_part$var) } else { names(multipliers_coef) <- object$parsed_formula$x_part$var } multipliers_se <- delta_method(object, vcov_matrix = vcov_matrix) multipliers <- data.frame(multipliers_coef, multipliers_se, multipliers_coef/multipliers_se, 2 * stats::pt(-abs(multipliers_coef/multipliers_se), df = stats::df.residual(object))) # df = n - # of estimated coefficients multipliers <- data.frame(rownames(multipliers), multipliers) names(multipliers) <- c("Term", "Estimate", "Std. Error", "t value", "Pr(>|t|)") rownames(multipliers) <- 1:nrow(multipliers) return(multipliers) } else { b0 <- 1 for (i in 1:(length(orders_x) -1)) { b0 <- c(b0, b0[length(b0)] + orders_x[i] +1) } b0 <- if (kw!=0) c(1:kw, b0+kw) else b0 # create table without the y in levels and fixed sr_mult <- as.data.frame(summary(object)$coefficients) %>% dplyr::slice(-(from:to)) %>% dplyr::slice(b0) sr_mult <- cbind(Term = rownames(sr_mult), sr_mult) rownames(sr_mult) <- 1:nrow(sr_mult) if (type %in% 1:200) { # anything except 0:200 would have been stopped in the earlier check interim = type delays_table <- as.data.frame(summary(object)$coefficients) %>% dplyr::slice(-(from:to)) if (kfixed != 0) { delays_table <- delays_table %>% dplyr::slice(-((nrow(delays_table)-kfixed+1):nrow(delays_table))) } delay <- list() int_mult <- list() if (se) xpressions <- list() orders_wx <- c(rep(0, kw), orders_x) if (kw != 0) { delay_names <- c(rownames(delays_table)[1:kw], object$parsed_formula$x_part$var) } else { delay_names <- object$parsed_formula$x_part$var } y_table <- dplyr::tibble(objcoef) %>% dplyr::slice(from:to) for (k in 1:(kx+kw)) { delay[[k]] <- delays_table$Estimate[1:(orders_wx[k]+1)] names(delay)[k] <- delay_names[k] int_mult[[k]] <- data.frame() names(int_mult)[[k]] <- delay_names[k] if (se) { xpressions[[k]] <- data.frame() names(xpressions)[[k]] <- delay_names[k] } for (ss in 0:interim) { weights_n <- min(object$order[1], ss) direct <- ifelse(ss <= orders_wx[k], delay[[k]][ss+1], 0) if (se) { skip_w_y <- ifelse((kw != 0) & k %in% 1:kw, 0 + k-1, kw + object$order[1]) if (direct == 0) { direct_xpression <- NULL } else { direct_xpression <- paste0("x", skip_w_y + ifelse(!k %in% 1:kw, sum(orders_wx[kw:(k-1)]) + k-kw-1, 0) + (ss + 1)) } } if (ss == 0) { int_mult[[k]] <- data.frame(Period = ss, Delay = direct) if (se) xpressions[[k]] <- data.frame(Period = ss, xpression = direct_xpression) } else { int_mult[[k]] <- rbind(int_mult[[k]], data.frame(Period = ss, Delay = direct + sum(y_table[1:weights_n,] * rev(int_mult[[k]][(ss-(weights_n-1)):ss,"Delay"])))) if (se) { xpressions[[k]] <- rbind(xpressions[[k]], data.frame(Period = ss, xpression = paste0(c(direct_xpression, paste0("x", (1:weights_n)+kw, "*(", rev(xpressions[[k]][(ss-(weights_n-1)):ss,"xpression"]), ")")), collapse = "+" ))) } } } delays_table <- delays_table %>% dplyr::slice(-(1:(orders_wx[k] + 1))) } if (se) xpressions <- lapply(xpressions, FUN = function(x) {data.frame(Period = x$Period, xpression = paste0("~ ", x$xpression))}) if (se) xpressions <- lapply(xpressions, FUN = function(x) {msm::deltamethod(lapply(x$xpression, stats::formula), stats::coef(object), vcov_matrix)}) for (i in 1:length(int_mult)) { if (se) int_mult[[i]] <- cbind(int_mult[[i]], "Std. Error Delay" = xpressions[[i]]) rownames(int_mult[[i]]) <- NULL } int_mult <- lapply(int_mult, FUN = function(x) {cbind(x, Interim = cumsum(x$Delay))}) return(int_mult) } else { return(sr_mult) } } } #' @rdname multipliers #' @export #' multipliers.uecm <- function(object, type = "lr", vcov_matrix = NULL, se = FALSE) { if (!(type %in% c("lr", "sr", 0:200))) { stop("'type' should be one of 'lr', 'sr' or a number between 0 and 200", call. = FALSE) } if (is.null(vcov_matrix)) vcov_matrix <- stats::vcov(object) if (type %in% 1:200) { return(multipliers(object = ardl(object), type = type, vcov_matrix = vcov_matrix, se = se)) } kw <- object$parsed_formula$kw objcoef <- object$coefficients orders_x <- object$order[-1] objnames_ws <- gsub(" ", "", names(objcoef), fixed = TRUE) objxvars <- object$parsed_formula$x_part$var if (type == "lr") { if (kw != 0) { multipliers_coef <- c( -objcoef[1:kw] / objcoef[kw + 1], -objcoef[(kw + 2):(kw + object$parsed_formula$kx + 1)] / objcoef[kw + 1] ) } else { multipliers_coef <- -objcoef[2:(object$parsed_formula$kx + 1)] / objcoef[1] } multipliers_coef_se <- delta_method(object, vcov_matrix = vcov_matrix) multipliers <- data.frame(multipliers_coef, multipliers_coef_se, multipliers_coef/multipliers_coef_se, 2 * stats::pt(-abs(multipliers_coef/multipliers_coef_se), stats::df.residual(object))) # df = n - # of estimated coefficients } else { # use gsub because sometimes the spacing goes weird Xt_1 <- c() Xt_1[orders_x != 0] <- gsub(" ", "", paste0("d(", objxvars, ")"), fixed = TRUE)[orders_x != 0] Xt_1[orders_x == 0] <- gsub(" ", "", objxvars[orders_x == 0], fixed = TRUE) if (kw != 0) { srm <- c(1:kw, which(objnames_ws %in% Xt_1)) multipliers <- summary(object)$coefficients[srm,] } else { srm <- objnames_ws %in% Xt_1 multipliers <- summary(object)$coefficients[srm,] } } if (kw != 0) { multipliers <- data.frame(c(names(objcoef)[1:kw], objxvars), multipliers) } else { multipliers <- data.frame(objxvars, multipliers) } names(multipliers) <- c("Term", "Estimate", "Std. Error", "t value", "Pr(>|t|)") rownames(multipliers) <- 1:nrow(multipliers) return(multipliers) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/multipliers.R
#' Formula parser #' #' It parses the formula and separates the dependent, independent and fixed #' variables and also the constant and linear trends (if present). #' #' The notation we follow (e.g., using y, x, z, w etc.) is according to #' \cite{Pesaran et al. (2001)}. #' #' The \code{formula} should contain only variables that exist in the data #' provided through \code{data} plus some additional functions supported by #' \code{\link[dynlm]{dynlm}} (i.e., \code{trend()}). #' #' You can also specify fixed variables that are not supposed to be lagged (e.g. #' dummies etc.) simply by placing them after \code{|}. For example, \code{y ~ #' x1 + x2 | z1 + z2} where \code{z1} and \code{z2} are the fixed variables and #' should not be considered in \code{order}. Note that the \code{|} notion #' should not be confused with the same notion in \code{dynlm} where it #' introduces instrumental variables. #' #' @param formula A "formula" describing the linear model. Details for model #' specification are given under 'Details'. #' @param colnames_data A character vector containing the colnames of the data #' used in the formula (usually via \code{colnames(data)}). #' #' @return A list containing other lists with the names of the dependent, #' independent and fixed variables, the constant and linear trends and the #' number of variables in each category. #' #' @section References: Pesaran, M. H., Shin, Y., & Smith, R. J. (2001). Bounds #' testing approaches to the analysis of level relationships. \emph{Journal of #' Applied Econometrics}, 16(3), 289-326 #' #' @seealso \code{\link{parse_order}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal #' parse_formula <- function(formula, colnames_data) { # turn formula (from class = formula) into character formula <- Reduce(paste, deparse(formula)) # split at fixed_part at | formula <- stringr::str_split(formula, pattern = "[|]")[[1]] w_part <- list(var = "") if (length(formula) == 1) { fixed_part <- list(var = "") } else if (length(formula) > 2) { stop("Formula can not contain more than one '|' symbols.", call. = FALSE) } else { # split fixed_part at + and trim white spaces fixed_part <- stringr::str_split(formula[2], pattern = "[+]")[[1]] %>% stringr::str_trim(side = "both") %>% list(var = .) if (fixed_part$var[1] == "") stop("The '|' symbol must be followed by at least one variable name.", call. = FALSE) # check if linear/non-constant intercept exists in fixed_part temp <- stringr::str_sub(stringr::str_replace_all(fixed_part$var, " ", ""), 1, 6) %in% "trend(" | stringr::str_sub(stringr::str_replace_all(fixed_part$var, " ", ""), 1, 5) %in% "time(" if (sum(temp) != 0) { w_part$var <- fixed_part$var[temp] if (sum(temp) == length(fixed_part$var)) { fixed_part$var <- "" } else { fixed_part$var <- fixed_part$var[!(temp)] } } rm(temp) } formula <- stringr::str_split(formula[1], pattern = "[~]")[[1]] # split at ~ if (length(formula) == 1) { stop("The formula must contain the '~' symbol.", call. = FALSE) } else if (length(formula) > 2){ stop("The formula must contain exactly one '~' symbol.", call. = FALSE) } # split y_part at + (if any) and trim white spaces y_part <- stringr::str_split(formula[1], pattern = "[+]")[[1]] %>% stringr::str_trim(side = "both") %>% list(var = .) if ((length(y_part$var) > 1) | y_part$var[1] == "") { stop("The formula must contain exactly one dependent variable.", call. = FALSE) } # identify if there is an interpect exclusion formula = stringr::str_split(formula[2], pattern = "[-]")[[1]] %>% stringr::str_trim(side = "both") if (length(formula) == 2) { if (stringr::str_sub(formula[2], start = 1, end = 1) == "1") { if (w_part$var[1] != "") { # constant intercept exclusion w_part$var <- c("- 1", w_part$var) } else { w_part$var <- "- 1" } } else { stop("The formula can only accept the term '-1' for the exclusion of constant intercept.", call. = FALSE) } } else if (length(formula) > 2) { stop("The formula can only accept one '-1' constant intercept exclusion term.", call. = FALSE) } # split x_part at + and trim white spaces if (length(formula) != 1) { formula = paste0(formula[1], stringr::str_sub(formula[2], start = 2, end = stringr::str_count(formula[2]))) } x_part <- stringr::str_split(formula, pattern = "[+]")[[1]] %>% stringr::str_trim(side = "both") %>% list(var = .) if (x_part$var[1] == "") { warnings("The formula contains only an AR (Autoregressive) part.") } # check if linear/non-constant intercept exists in x_part temp <- stringr::str_sub(stringr::str_replace_all(x_part$var, " ", ""), 1, 6) %in% "trend(" | stringr::str_sub(stringr::str_replace_all(x_part$var, " ", ""), 1, 5) %in% "time(" if (sum(temp) != 0) { if (w_part$var[1] != "") { w_part$var <- c(w_part$var, x_part$var[temp]) } else { w_part$var <- x_part$var[temp] } if (sum(temp) == length(x_part$var)) { x_part$var <- "" } else { x_part$var <- x_part$var[!(temp)] } } rm(temp) # create the union set of variables if (y_part$var[1] == "") y_part$var <- c() if (x_part$var[1] == "") x_part$var <- c() if (fixed_part$var[1] == "") fixed_part$var <- c() if (w_part$var[1] == "") w_part$var <- c() z_part <- list(var = c(y_part$var, x_part$var)) kz <- length(z_part$var) kx <- length(x_part$var) temp <- stringr::str_sub(stringr::str_replace_all(w_part$var, " ", ""), 1, 5) kw <- if ((length(w_part$var) == 0) | (any(c("trend", "time(") %in% temp) & ("-1" %in% temp))) { 1 } else if (any(c("trend", "time(") %in% temp) & !("-1" %in% temp)) { 2 } else if (!any(c("trend", "time(") %in% temp) & ("-1" %in% temp)) { 0 } rm(temp) kfixed <- length(fixed_part$var) # check for correct function inputs # check if variables exist if (sum(!(c(z_part$var, fixed_part$var) %in% colnames_data)) >= 1) { stop(c("Variable(s) ", paste0("'", c(z_part$var, fixed_part$var)[!(c(z_part$var, fixed_part$var) %in% colnames_data)], sep = "', "), "not found in 'data'"), call. = FALSE) } return_list <- list(y_part = y_part, x_part = x_part, z_part = z_part, w_part = w_part, fixed_part = fixed_part, kz = kz, kx = kx, kw = kw, kfixed = kfixed) return(return_list) } #' Order parser #' #' It parses the order and checks the integrity of the order input. #' #' @param orders A numeric vector of the same length as the total number of #' variables (excluding the fixed ones). If the input is \code{order} or #' \code{max_order} it should only contain positive integers or 0. If the #' input is \code{fixed_order} it should also contain the value '-1' #' indicating that a specific order should not be fixed. An integer could be #' provided if all variables are of the same order (or all '-1' in the case of #' \code{fixed_order}). #' @param order_name The name of the function argument that is passed into #' \code{order}. #' @param var_names The names of the variables corresponding to the orders. #' @param kz An integer. The number of dependent and independent variables. #' @param restriction When the input in \code{orders} is either \code{order} or #' \code{max_order} it should be \code{FALSE} (default). When the input is #' \code{fixed_order} it should be '-1' indicating that the input in #' \code{orders} is a restriction for the 'order' of the model (either upper #' bound or fixed order). #' #' @return A numeric vector of the same length as the total number of variables #' (excluding the fixed ones). #' #' @seealso \code{\link{parse_formula}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal #' parse_order <- function(orders, order_name, var_names, kz, restriction = FALSE) { if (restriction == FALSE) { restriction = 0 } # check if orders length is correct if (!(length(orders) %in% c(1, kz))) { stop("The length of '", order_name, "' is not correct", call. = FALSE) } # check for AR(0) models if (orders[1] == 0) { stop("AR(0) models are not allowed. The order of the dependent variable should be a positive integer", call. = FALSE) } # check for negatives (or other restriction e.g. -1) and decimals in orders if ((any(orders < restriction)) | (sum(orders %% 1) != 0)) { if (restriction == 0) { stop("'", order_name, "' can only contain positive integers or 0", call. = FALSE) } else if (restriction == (-1)) { stop("'", order_name, "' can only contain positive integers, 0 or '-1'", call. = FALSE) } } if (length(orders) == 1) { orders <- rep(orders[1], kz) } names(orders) <- var_names return(orders) } #' Case parser #' #' It parses the 'case' and checks the integrity of the 'case' input and the #' compatibility with the formula. #' #' @inherit recm details #' #' @param parsed_formula A list containing the formula parts as returned from #' \code{\link{parse_formula}}. #' @inheritParams recm #' #' @return An integer from 1-5 representing the case. #' #' @inheritSection recm References #' @seealso \code{\link{parse_formula}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords internal #' parse_case <- function(parsed_formula, case) { if ((length(case) != 1) | !(case[1]) %in% c(1:5, 'n', 'rc', 'uc', 'ucrt', 'ucut')) { stop("'case' must be a number between 1 and 5 or one of the 'n', 'rc', 'uc', 'ucrt' or 'ucut'", call. = FALSE) } else if (case == "n") { case = 1 } else if (case == "rc") { case = 2 } else if (case == "uc") { case = 3 } else if (case == "ucrt") { case = 4 } else if (case == "ucut") { case = 5 } w_check <- stringr::str_sub(stringr::str_replace_all(parsed_formula$w_part$var, " ", ""), 1, 5) if ((case == 1) & (any(c("trend", "time(") %in% w_check ) | !("-1" %in% w_check))) { stop("Trying to impose case 1 (no constant, no linear trend) but the underlying ARDL model includes at least one of them", call. = FALSE) } else if ((case %in% c(2,3)) & any(c("-1", "trend", "time(") %in% w_check)) { stop("Trying to impose case ", ifelse(case == 2, paste0(case, " (restricted constant, no linear trend)"), paste0(case, " (unrestricted constant, no linear trend)")), " but the underlying ARDL model either doesn't include a constant or includes a trend", call. = FALSE) } else if ((case %in% c(4,5)) & (!any(c("-1", "trend", "time(") %in% w_check))) { stop("Trying to impose case ", ifelse(case == 4, paste0(case, " (unrestricted constant, restricted linear trend)"), paste0(case, " (unrestricted constant, unrestricted linear trend)")), " but the underlying ARDL model doesn't include one or both of them", call. = FALSE) } return(case) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/parsers.R
#' Create plots for the delay multipliers #' #' Creates plots for the delay multipliers and their uncertainty intervals based #' on their estimated standard errors. This is a basic #' \code{\link[ggplot2]{ggplot}} with a few customizable parameters. #' #' @param multipliers A list returned from \code{\link{multipliers}}, #' in which \code{type} is a positive integer to return delay multipliers. #' @param facets_ncol If a positive integer, it indicates the number of the #' columns in the facet. If FALSE, each plot is created separately. The default #' is 2. #' @param interval If FALSE (default), no uncertainty intervals are drawn. If a #' positive integer, the intervals are this number times the standard error. If #' a number between 0 and 1 (e.g. 0.95), the equivalent confidence interval is #' drawn (e.g. 95\% CI). In case of the confidence intervals, they are based on #' the Gaussian distribution. #' @param interval_color The color of the uncertainty intervals. Default is #' "blue". #' @param show.legend A logical indicating whether the interval legend is shown. #' Default is FALSE. #' @param xlab,ylab Names displayed at the x and y axes respectively. Default is #' "Period" and "Delay" respectively. #' @param ... Currently unused argument. #' #' @return \code{plot_delay} returns a number of \code{\link[ggplot2]{ggplot}} #' objects. #' #' @seealso \code{\link{multipliers}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords iplots #' @export #' @examples #' ardl_3132 <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' delay_mult <- multipliers(ardl_3132, type = 12, se = TRUE) #' #' ## Simply plot the delay multipliers ----------------------------------- #' #' plot_delay(delay_mult) #' #' ## Rearrange them ------------------------------------------------------ #' #' plot_delay(delay_mult, facets_ncol = 1) #' #' ## Add 1 standard deviation uncertainty intervals ---------------------- #' #' plot_delay(delay_mult, interval = 1) #' #' ## Add 95% confidence intervals, change color and add legend ----------- #' #' plot_delay(delay_mult, interval = 0.95, interval_color = "darkgrey", #' show.legend = TRUE) plot_delay <- function(multipliers, facets_ncol = 2, interval = FALSE, interval_color = "blue", show.legend = FALSE, xlab = "Period", ylab = "Delay", ...) { plot_data <- function(mult, .data) { plot <- ggplot2::ggplot(mult, ggplot2::aes(x = .data[["Period"]], y = .data[["Delay"]])) + ggplot2::geom_line() + ggplot2::labs(x = xlab, y = ylab) + ggplot2::theme_minimal() if (!isFALSE(interval)) { adjustment <- ifelse((interval %% 1) == 0, interval, stats::qnorm(1-(1-interval)/2)) plot <- plot + ggplot2::geom_ribbon(ggplot2::aes(ymin = .data[["Delay"]] - adjustment*.data[["Std. Error Delay"]], ymax = .data[["Delay"]] + adjustment*.data[["Std. Error Delay"]], fill = "Std. Error Delay"), alpha = 0.3, show.legend = show.legend) + ggplot2::scale_fill_manual(name = "", values = interval_color, labels = ifelse((interval %% 1) == 0, paste0(interval, " SE"), paste0(100*interval, "%", " CI"))) + ggplot2::theme(legend.position = "bottom") } plot } plots <- lapply(names(multipliers), function(name) { plot_data(multipliers[[name]]) + ggplot2::labs(title = name) + ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5)) }) if (!isFALSE(facets_ncol)) { gridExtra::grid.arrange(grobs = plots, ncol = facets_ncol) } else { plots } }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/plot_delay.R
#' Create plot for the long-run (cointegrating) equation #' #' Creates a plot for the long-run relationship in comparison with the dependent #' variable, and the fitted values of the model. This is a basic #' \code{\link[ggplot2]{ggplot}} with a few customizable parameters. #' #' @param object An object of \code{\link[base]{class}} `ardl`. #' @param coint_eq The objected returned from \code{\link{coint_eq}}. #' @param facets A logical indicating whether the long-run relationship appears #' in a separate plot. Default is FALSE. #' @param show_fitted A logical indicating whether the fitted values are shown. #' Default is FALSE. #' @param show.legend A logical indicating whether the legend is shown. #' Default is FALSE. #' @param xlab Name displayed at the x axis. Default is "Time". #' @param ... Currently unused argument. #' #' @return \code{plot_lr} returns a \code{\link[ggplot2]{ggplot}} object. #' #' @seealso \code{\link{coint_eq}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords iplots #' @export #' #' @examples #' ardl_3132 <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' ce2 <- coint_eq(ardl_3132, case = 2) #' #' plot_lr(ardl_3132, coint_eq = ce2) #' #' ## Compare fitted values and place long-run relationship separately ---- #' #' ce3 <- coint_eq(ardl_3132, case = 3) #' plot_lr(ardl_3132, coint_eq = ce3, facets = TRUE, show_fitted = TRUE, #' show.legend = TRUE) #' plot_lr <- function(object, coint_eq, facets = FALSE, show_fitted = FALSE, show.legend = FALSE, xlab = "Time", ...) { plot_data <- zoo::cbind.zoo(object$data[,1], coint_eq, object$fitted.values) lr_name <- "long-run" fit_name <- "fitted" names(plot_data) <- c(object$parsed_formula$y_part$var, lr_name, fit_name) plot_function <- function(object, coint_eq, facets, show_fitted, show.legend, xlab, .data) { facet1 <- ggplot2::ggplot(plot_data, ggplot2::aes(x = zoo::index(plot_data))) + ggplot2::geom_line(ggplot2::aes(y = .data[[object$parsed_formula$y_part$var]], color = object$parsed_formula$y_part$var), show.legend = show.legend) + ggplot2::labs(x = xlab, y = object$parsed_formula$y_part$var, color = "") + ggplot2::theme_minimal() + ggplot2::theme(legend.position = "bottom") if (!facets) { facet1 <- facet1 + ggplot2::geom_line(ggplot2::aes(y = .data[[lr_name]], color = lr_name), show.legend = show.legend) if (show_fitted) { facet1 <- facet1 + ggplot2::geom_line(ggplot2::aes(y = .data[[fit_name]], color = fit_name), na.rm = TRUE, show.legend = show.legend) + ggplot2::scale_color_manual(guide = "legend", values = c("black", "red", "blue"), breaks = c(object$parsed_formula$y_part$var, lr_name, fit_name), labels = c(object$parsed_formula$y_part$var, lr_name, fit_name)) } else { facet1 <- facet1 + ggplot2::scale_color_manual(guide = "legend", values = c("black", "red"), breaks = c(object$parsed_formula$y_part$var, lr_name), labels = c(object$parsed_formula$y_part$var, lr_name)) } } else { if (show_fitted) { facet1 <- facet1 + ggplot2::geom_line(ggplot2::aes(y = .data[[fit_name]], color = fit_name), na.rm = TRUE, show.legend = show.legend) + ggplot2::scale_color_manual(guide = "legend", labels = c(object$parsed_formula$y_part$var, fit_name), breaks = c(object$parsed_formula$y_part$var, fit_name), values = c("black", "blue")) } else { facet1 <- facet1 + ggplot2::scale_color_manual(guide = "legend", values = "black", breaks = object$parsed_formula$y_part$var, labels = object$parsed_formula$y_part$var) } } if (facets) { facet2 <- ggplot2::ggplot(plot_data, ggplot2::aes(x = zoo::index(plot_data))) + ggplot2::geom_line(ggplot2::aes(y = .data[[lr_name]], color = lr_name), show.legend = show.legend) + ggplot2::labs(x = xlab, y = object$parsed_formula$y_part$var, color = "") + ggplot2::scale_color_manual(values = "red", labels = lr_name) + ggplot2::theme_minimal() + ggplot2::theme(legend.position = "bottom") gridExtra::grid.arrange(facet1, facet2, ncol = 1) } else { facet1 } } plot_function(object = object, coint_eq = coint_eq, facets = facets, show_fitted = show_fitted, show.legend = show.legend, xlab = xlab) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/plot_lr.R
#' Restricted ECM regression #' #' Creates the Restricted Error Correction Model (RECM). This is the conditional #' RECM, which is the RECM of the underlying ARDL. #' #' Note that the statistical significance of 'ect' in a RECM should not be #' tested using the corresponding t-statistic (or the p-value) because it #' doesn't follow a standard t-distribution. Instead, the #' \code{\link{bounds_t_test}} should be used. #' #' @param object An object of \code{\link[base]{class}} 'ardl' or 'uecm'. #' @param case An integer from 1-5 or a character string specifying whether the #' 'intercept' and/or the 'trend' have to participate in the short-run or the #' long-run relationship (cointegrating equation) (see section 'Cases' below). #' #' @return \code{recm} returns an object of \code{\link[base]{class}} #' \code{c("dynlm", "lm", "recm")}. In addition, attributes 'order', 'data', #' 'parsed_formula' and 'full_formula' are provided. #' #' @section Mathematical Formula: The formula of a Restricted ECM conditional to #' an \eqn{ARDL(p,q_{1},\dots,q_{k})}{ARDL(p,q1,...,qk)} is: \deqn{\Delta #' y_{t} = c_{0} + c_{1}t + \sum_{i=1}^{p-1}\psi_{y,i}\Delta y_{t-i} + #' \sum_{j=1}^{k}\sum_{l=1}^{q_{j}-1} \psi_{j,l}\Delta x_{j,t-l} + #' \sum_{j=1}^{k}\omega_{j}\Delta x_{j,t} + \pi_{y}ECT_{t} + \epsilon_{t}} #' \deqn{\psi_{j,l} = 0 \;\; \forall \;\; q_{j} = 1, \psi_{j,l} = \omega_{j} = #' 0 \;\; \forall \;\; q_{j} = 0} #' \describe{ #' \item{Under Case 1:}{\itemize{ #' \item \eqn{c_{0}=c_{1}=0} #' \item \eqn{ECT = y_{t-1} - (\sum_{j=1}^{k} \theta_{j} x_{j,t-1})}}} #' \item{Under Case 2:}{\itemize{ #' \item \eqn{c_{0}=c_{1}=0} #' \item \eqn{ECT = y_{t-1} - (\mu + \sum_{j=1}^{k}\theta_{j} x_{j,t-1})}}} #' \item{Under Case 3:}{\itemize{ #' \item \eqn{c_{1}=0} #' \item \eqn{ECT = y_{t-1} - (\sum_{j=1}^{k} \theta_{j} x_{j,t-1})}}} #' \item{Under Case 4:}{\itemize{ #' \item \eqn{c_{1}=0} #' \item \eqn{ECT = y_{t-1} - (\delta(t-1)+ \sum_{j=1}^{k} \theta_{j} x_{j,t-1})}}} #' \item{Under Case 5:}{\itemize{ #' \item \eqn{ECT = y_{t-1} - (\sum_{j=1}^{k} \theta_{j} x_{j,t-1})}}} #' } #' {In all cases,} \eqn{x_{j,t-1}} {in} \eqn{ECT} {is replaced by} \eqn{x_{j,t} \;\;\;\;\; \forall \;\; q_{j} = 0} #' #' @section Cases: According to \cite{Pesaran et al. (2001)}, we distinguish the #' long-run relationship (cointegrating equation) (and thus the bounds-test and #' the Restricted ECMs) between 5 different cases. These differ in terms of #' whether the 'intercept' and/or the 'trend' are restricted to participate in #' the long-run relationship or they are unrestricted and so they participate in #' the short-run relationship. #' #' \describe{ #' \item{Case 1:}{\itemize{ #' \item No \emph{intercept} and no \emph{trend}. #' \item \code{case} inputs: 1 or "n" where "n" stands for none.}} #' \item{Case 2:}{\itemize{ #' \item Restricted \emph{intercept} and no \emph{trend}. #' \item \code{case} inputs: 2 or "rc" where "rc" stands for restricted #' constant.}} #' \item{Case 3:}{\itemize{ #' \item Unrestricted \emph{intercept} and no \emph{trend}. #' \item \code{case} inputs: 3 or "uc" where "uc" stands for unrestricted #' constant.}} #' \item{Case 4:}{\itemize{ #' \item Unrestricted \emph{intercept} and restricted \emph{trend}. #' \item \code{case} inputs: 4 or "ucrt" where "ucrt" stands for #' unrestricted constant and restricted trend.}} #' \item{Case 5:}{\itemize{ #' \item Unrestricted \emph{intercept} and unrestricted \emph{trend}. #' \item \code{case} inputs: 5 or "ucut" where "ucut" stands for #' unrestricted constant and unrestricted trend.}} #' } #' #' Note that you can't restrict (or leave unrestricted) a parameter that doesn't #' exist in the input model. For example, you can't compute \code{recm(object, #' case=3)} if the object is an ARDL (or UECM) model with no intercept. The same #' way, you can't compute \code{bounds_f_test(object, case=5)} if the object is #' an ARDL (or UECM) model with no linear trend. #' #' @section References: Pesaran, M. H., Shin, Y., & Smith, R. J. (2001). Bounds #' testing approaches to the analysis of level relationships. \emph{Journal of #' Applied Econometrics}, 16(3), 289-326 #' #' @seealso \code{\link{ardl}} \code{\link{uecm}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords models ts #' @export #' @examples #' data(denmark) #' #' ## Estimate the RECM, conditional to it's underlying ARDL(3,1,3,2) ----- #' #' # Indirectly from an ARDL #' ardl_3132 <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' recm_3132 <- recm(ardl_3132, case = 2) #' #' # Indirectly from an UECM #' uecm_3132 <- uecm(ardl_3132) #' recm_3132_ <- recm(uecm_3132, case = 2) #' identical(recm_3132, recm_3132_) #' summary(recm_3132) #' #' ## Error Correction Term (ect) & Speed of Adjustment ------------------- #' #' # The coefficient of the ect, #' # shows the Speed of Adjustment towards equilibrium. #' # Note that this can be also be obtained from an UECM, #' # through the coefficient of the term L(y, 1) (where y is the dependent variable). #' tail(recm_3132$coefficients, 1) #' uecm_3132$coefficients[2] #' #' ## Post-estimation testing --------------------------------------------- #' #' # See examples in the help file of the uecm() function #' recm <- function(object, case) { # no visible binding for global variable NOTE solution y <- NULL; rm(y) class(object)[4] <- "recm_indicator" coint_eq_list <- coint_eq(object = object, case = case) design_matrix <- coint_eq_list$design_matrix coint_eq <- coint_eq_list$coint_eq data <- coint_eq_list$data parsed_formula <- coint_eq_list$parsed_formula case <- coint_eq_list$case order <- coint_eq_list$order recm_formula <- build_recm_formula(parsed_formula = parsed_formula, order = order, case = case) if (0 %in% order) { design_matrix <- stats::ts(design_matrix, start = stats::start(data), frequency = stats::frequency(data)) x_0_order <- colnames(design_matrix)[-c(1:3)][order[-1] == 0] x_q_order <- colnames(design_matrix)[-1][!(colnames(design_matrix)[-1] %in% x_0_order)] coint_eq <- rowSums(zoo::cbind.zoo(stats::lag(design_matrix[,x_q_order], -1), design_matrix[,x_0_order])) coint_eq <- stats::ts(coint_eq, start = stats::start(data), frequency = stats::frequency(data)) data_full <- zoo::cbind.zoo(data, stats::lag(design_matrix[,"y"], -1) - coint_eq) } else { # error term, u u <- design_matrix %>% dplyr::mutate(u = y - coint_eq) %>% dplyr::select(u) %>% unlist() %>% stats::ts(., start = stats::start(data), frequency = stats::frequency(data)) # create a new data table with the lagged error term u, naming the error correction term (ect) data_full <- zoo::cbind.zoo(data, stats::lag(u, -1)) } colnames(data_full) <- c(colnames(data), "ect") data <- data_full; rm(data_full) full_formula <- stats::formula(recm_formula$full) start <- start(object) end <- end(object) recm_model <- dynlm::dynlm(full_formula, data = data, start = start, end = end) # for model compatibility in the global env attr(recm_model$terms, ".Environment") <- .GlobalEnv attr(attr(recm_model$model, "terms"), ".Environment") <- .GlobalEnv attr(full_formula, ".Environment") <- .GlobalEnv attr(recm_model, "class") <- c(class(recm_model), "recm") recm_model$order <- order recm_model$data <- data recm_model$parsed_formula <- parsed_formula recm_model$full_formula <- full_formula return(recm_model) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/recm.R
#' Convert dynlm model (ardl, uecm, recm) to lm model #' #' Takes a \code{\link[dynlm]{dynlm}} model of \code{\link[base]{class}} 'ardl', #' 'uecm' or 'recm' and converts it into an \code{\link[stats]{lm}} model. This #' can help using the model as a regular \code{\link[stats]{lm}} model with #' functions that are not compatible with \code{\link[dynlm]{dynlm}} models such #' as the \code{\link[stats]{predict}} function to forecast. #' #' @param object An object of \code{\link[base]{class}} 'ardl', 'uecm' or 'recm'. #' @param fix_names A logical, indicating whether the variable names should be #' rewritten without special functions and character in the names such as "d()" #' or "L()". When \code{fix_names = TRUE}, the characters "(", and "," are #' replaces with ".", and ")" and spaces are deleted. The name of the dependent #' variable is always transformed, regardless of the value of this parameter. #' Default is FALSE. #' @param data_class If "ts", it converts the data class to #' \code{\link[stats]{ts}} (see examples for its usage). The default is #' \code{\link[base]{NULL}}, which uses the same data provided in the original #' object. #' @param ... Currently unused argument. #' #' @return \code{to_lm} returns an object of \code{\link[base]{class}} #' \code{"lm"}. #' #' @seealso \code{\link{ardl}}, \code{\link{uecm}}, \code{\link{recm}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords models ts #' @export #' @examples #' ## Convert ARDL into lm ------------------------------------------------ #' #' ardl_3132 <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' ardl_3132_lm <- to_lm(ardl_3132) #' summary(ardl_3132)$coefficients #' summary(ardl_3132_lm)$coefficients #' #' ## Convert UECM into lm ------------------------------------------------ #' #' uecm_3132 <- uecm(ardl_3132) #' uecm_3132_lm <- to_lm(uecm_3132) #' summary(uecm_3132)$coefficients #' summary(uecm_3132_lm)$coefficients #' #' ## Convert RECM into lm ------------------------------------------------ #' #' recm_3132 <- recm(ardl_3132, case = 2) #' recm_3132_lm <- to_lm(recm_3132) #' summary(recm_3132)$coefficients #' summary(recm_3132_lm)$coefficients #' #' ## Use the lm model to forecast ---------------------------------------- #' #' # Forecast using the in-sample data #' insample_data <- ardl_3132$model #' head(insample_data) #' predicted_values <- predict(ardl_3132_lm, newdata = insample_data) #' #' # The predicted values are expected to be the same as the fitted values #' ardl_3132$fitted.values #' predicted_values #' #' # Convert to ts class for the plot #' predicted_values <- ts(predicted_values, start = c(1974,4), frequency=4) #' plot(denmark$LRM, lwd=4) #The input dependent variable #' lines(ardl_3132$fitted.values, lwd=4, col="blue") #The fitted values #' lines(predicted_values, lty=2, lwd=2, col="red") #The predicted values #' #' ## Convert to lm for post-estimation testing --------------------------- #' #' # Ramsey's RESET test for functional form #' library(lmtest) # for resettest() #' library(strucchange) # for efp(), and sctest() #' #' \dontrun{ #' # This produces an error. #' # resettest() cannot use data of class 'zoo' such as the 'denmark' data #' # used to build the original model #' resettest(uecm_3132, type = c("regressor")) #' } #' #' uecm_3132_lm <- to_lm(uecm_3132, data_class = "ts") #' resettest(uecm_3132_lm, power = 2) #' #' # CUSUM test for structural change detection #' \dontrun{ #' # This produces an error. #' # efp() does not understand special functions such as "d()" and "L()" #' efp(uecm_3132$full_formula, data = uecm_3132$model) #' } #' #' uecm_3132_lm_names <- to_lm(uecm_3132, fix_names = TRUE) #' fluctuation <- efp(uecm_3132_lm_names$full_formula, #' data = uecm_3132_lm_names$model) #' sctest(fluctuation) #' plot(fluctuation) #' to_lm <- function(object, fix_names = FALSE, data_class = NULL, ...) { objmodel <- object$model if (!is.null(data_class)) { if (data_class == "ts") { objmodel <- stats::ts(objmodel, start = stats::start(objmodel[,1]), frequency = stats::frequency(objmodel[,1])) } } dep_var <- colnames(objmodel)[1] fix_names_fun <- function(text) { text <- gsub(" ", "", text) %>% gsub("(", ".", ., fixed = TRUE) %>% gsub(")", "", ., fixed = TRUE) %>% gsub(",", ".", ., fixed = TRUE) return(text) } y <- fix_names_fun(dep_var) if (fix_names) { colnames(objmodel) <- sapply(colnames(objmodel), fix_names_fun) if (attr(object$terms,"intercept") == 0) { formula <- formula(paste0(y, " ~ . -1")) } else { formula <- formula(paste0(y, " ~ .")) } full_formula <- as.character(object$full_formula) lm_model <- stats::lm(formula, data = objmodel) lm_model$full_formula <- stats::formula(paste0(y, "~", fix_names_fun(full_formula[3]))) return(lm_model) } else { colnames(objmodel)[1] <- y if (attr(object$terms,"intercept") == 0) { formula <- formula(paste0(y, " ~ . -1")) } else { formula <- formula(paste0(y, " ~ .")) } return(stats::lm(formula, data = objmodel)) } }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/to_lm.R
#' Unrestricted ECM regression #' #' \code{uecm} is a generic function used to construct Unrestricted Error #' Correction Models (UECM). The function invokes two different #' \code{\link[utils]{methods}}. The default method works exactly like #' \code{\link{ardl}}. The other method requires an object of #' \code{\link[base]{class}} 'ardl'. Both methods create the conditional UECM, #' which is the UECM of the underlying ARDL. #' #' @inherit ardl details #' #' @return \code{uecm} returns an object of \code{\link[base]{class}} #' \code{c("dynlm", "lm", "uecm")}. In addition, attributes 'order', 'data', #' 'parsed_formula' and 'full_formula' are provided. #' #' @section Mathematical Formula: The formula of an Unrestricted ECM conditional #' to an \eqn{ARDL(p,q_{1},\dots,q_{k})}{ARDL(p,q1,...,qk)} is: \deqn{\Delta #' y_{t} = c_{0} + c_{1}t + \pi_{y}y_{t-1} + \sum_{j=1}^{k}\pi_{j}x_{j,t-1} + #' \sum_{i=1}^{p-1}\psi_{y,i}\Delta y_{t-i} + #' \sum_{j=1}^{k}\sum_{l=1}^{q_{j}-1} \psi_{j,l}\Delta x_{j,t-l} + #' \sum_{j=1}^{k}\omega_{j}\Delta x_{j,t} + \epsilon_{t}} #' \deqn{\psi_{j,l} = 0 \;\; \forall \;\; q_{j} \leq 1, \;\;\;\;\; \psi_{y,i} #' = 0 \;\; if \;\; p = 1} #' {In addition,} \eqn{x_{j,t-1}} {and} \eqn{\Delta x_{j,t}} {cancel out #' becoming} \eqn{x_{j,t} \;\; \forall \;\; q_{j} = 0} #' #' @seealso \code{\link{ardl}} \code{\link{recm}} #' @author Kleanthis Natsiopoulos, \email{klnatsio@@gmail.com} #' @keywords models ts #' @export #' @examples #' data(denmark) #' #' ## Estimate the UECM, conditional to it's underlying ARDL(3,1,3,2) ----- #' #' # Indirectly #' ardl_3132 <- ardl(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' uecm_3132 <- uecm(ardl_3132) #' #' # Directly #' uecm_3132_ <- uecm(LRM ~ LRY + IBO + IDE, data = denmark, order = c(3,1,3,2)) #' identical(uecm_3132, uecm_3132_) #' summary(uecm_3132) #' #' ## Post-estimation testing --------------------------------------------- #' #' library(lmtest) # for bgtest(), bptest(), and resettest() #' library(tseries) # for jarque.bera.test() #' library(strucchange) # for efp(), and sctest() #' #' # Breusch-Godfrey test for higher-order serial correlation #' bgtest(uecm_3132, order = 4) #' #' # Breusch-Pagan test against heteroskedasticity #' bptest(uecm_3132) #' #' # Ramsey's RESET test for functional form #' \dontrun{ #' # This produces an error. #' # resettest() cannot use data of class 'zoo' such as the 'denmark' data #' # used to build the original model #' resettest(uecm_3132, type = c("regressor")) #' } #' #' uecm_3132_lm <- to_lm(uecm_3132, data_class = "ts") #' resettest(uecm_3132_lm, power = 2) #' #' # Jarque-Bera test for normality #' jarque.bera.test(residuals(uecm_3132)) #' #' # CUSUM test for structural change detection #' \dontrun{ #' # This produces an error. #' # efp() does not understand special functions such as "d()" and "L()" #' efp(uecm_3132$full_formula, data = uecm_3132$model) #' } #' #' uecm_3132_lm_names <- to_lm(uecm_3132, fix_names = TRUE) #' fluctuation <- efp(uecm_3132_lm_names$full_formula, #' data = uecm_3132_lm_names$model) #' sctest(fluctuation) #' plot(fluctuation) #' uecm <- function(...) { UseMethod("uecm") } #' @rdname uecm #' #' @param object An object of \code{\link[base]{class}} 'ardl'. #' #' @export #' uecm.ardl <- function(object, ...) { parsed_formula <- object$parsed_formula order <- object$order data <- object$data start <- start(object) end <- end(object) uecm_formula <- build_uecm_formula(parsed_formula = parsed_formula, order = order) full_formula <- stats::formula(uecm_formula$full) uecm_model <- dynlm::dynlm(full_formula, data = data, start = start, end = end) # for model compatibility in the global env attr(uecm_model$terms, ".Environment") <- .GlobalEnv attr(attr(uecm_model$model, "terms"), ".Environment") <- .GlobalEnv attr(full_formula, ".Environment") <- .GlobalEnv uecm_model$order <- order uecm_model$data <- data uecm_model$parsed_formula <- parsed_formula uecm_model$full_formula <- full_formula attr(uecm_model, "class") <- c(class(uecm_model), "uecm") return(uecm_model) } #' @rdname uecm #' #' @param order A specification of the order of the underlying ARDL model (e.g., #' for the UECM of an ARDL(1,0,2) model it should be \code{order = c(1,0,2)}). #' A numeric vector of the same length as the total number of variables #' (excluding the fixed ones, see 'Details'). It should only contain positive #' integers or 0. An integer could be provided if all variables are of the #' same order. #' @inheritParams ardl #' #' @export #' uecm.default <- function(formula, data, order, start = NULL, end = NULL, ...) { if (!any(c("ts", "zoo", "zooreg") %in% class(data))) { data <- stats::ts(data, start = 1, end = nrow(data), frequency = 1) } parsed_formula <- parse_formula(formula = formula, colnames_data = colnames(data)) order <- parse_order(orders = order, order_name = "order", var_names = parsed_formula$z_part$var, kz = parsed_formula$kz) uecm_formula <- build_uecm_formula(parsed_formula = parsed_formula, order = order) full_formula <- stats::formula(uecm_formula$full) uecm_model <- dynlm::dynlm(full_formula, data = data, start = start, end = end, ...) # for model compatibility in the global env attr(uecm_model$terms, ".Environment") <- .GlobalEnv attr(attr(uecm_model$model, "terms"), ".Environment") <- .GlobalEnv attr(full_formula, ".Environment") <- .GlobalEnv uecm_model$order <- order uecm_model$data <- data uecm_model$parsed_formula <- parsed_formula uecm_model$full_formula <- full_formula attr(uecm_model, "class") <- c(class(uecm_model), "uecm") return(uecm_model) }
/scratch/gouwar.j/cran-all/cranData/ARDL/R/uecm.R
#' Lasso #' #' This function performs lasso regression using the cv.glmnet function, #' then refits the model using ordinary least squares. #' #' @param data A data frame or matrix containing the predictors and response. #' The response must be in the first column. #' @param index A numeric vector of indices indicating the rows of 'data' to #' use for the lasso regression. #' @param ols_ps A logical scalar. If TRUE (default), the function returns the #' coefficients from the OLS fit. If FALSE, it returns the #' coefficients from the lasso fit. #' #' @return A numeric vector of coefficients. If 'ols_ps' is TRUE, these are the #' coefficients from the OLS fit. If 'ols_ps' is FALSE, these are the #' coefficients from the lasso fit. If an error occurs during the lasso #' or OLS fit, the function returns a vector of NAs. #' #' @export #' @import Matrix #' @import glmnet #' @import tidyverse #' @importFrom stats lm #' @importFrom stats coef #' @importFrom stats as.formula #' @importFrom stats predict #' @importFrom stats BIC #' @importFrom glmnet cv.glmnet lasso <- function(data, index, ols_ps = TRUE) { tryCatch({ x <- as.matrix(data[index,-1]) y <- as.matrix(data[index, 1, drop = FALSE]) lasso_init <- cv.glmnet(x, y, alpha = 1, intercept = TRUE) #alpha=1, lasso lasso_init_lambda_min <- lasso_init$lambda.min lambda_init_grid <- lasso_init$lambda coef <- as.numeric(coef(lasso_init, lasso_init_lambda_min)) if (lasso_init$lambda.min == lasso_init$lambda[length(lasso_init$lambda)]) { lower_bound_grid <- lasso_init$lambda.min / 10 upper_bound_grid <- min(lasso_init$lambda[1], 1.1 * lasso_init$lambda.min) lambda_grid <- seq(upper_bound_grid, lower_bound_grid, length = 100) lasso_new <- cv.glmnet(x, y, alpha = 1, lambda = lambda_grid, intercept = TRUE) lasso_second_grid <- lasso_new$lambda coef <- as.numeric(coef(lasso_new, lasso_new$lambda.min)) } threshold_sequence <- 10 ^ (-8:1) lasso_final_coefficients_list <- lapply(threshold_sequence, function(x) { ifelse(abs(coef) <= x, 0, coef) }) coef_logical_list <- sapply(lasso_final_coefficients_list, function(e) { !all(e == 0) }) lasso_final_coefficients_list <- lasso_final_coefficients_list[which(coef_logical_list)] ols_list <- lapply(lasso_final_coefficients_list, function(e) { coef_nonzero <- e != 0 if (sum(coef_nonzero) > 0) { if (coef_nonzero[1] & any(coef_nonzero[-1])) { selected_x <- x[, coef_nonzero[-1], drop = FALSE] ols <- lm(y ~ as.matrix(selected_x)) } else if (coef_nonzero[1]) { ols <- lm(y ~ 1) } else { selected_x <- x[, coef_nonzero[-1], drop = FALSE] ols <- lm(y ~ 0 + as.matrix(selected_x)) } } }) bic_min_list <- lapply(ols_list, function(e) { BIC(e) }) lasso_ols_coefficients_list <- lapply(seq_along(lasso_final_coefficients_list), function(e) { coef_nonzero <- lasso_final_coefficients_list[[e]] != 0 vect_coef <- rep(0, ncol(data)) vect_coef[coef_nonzero] <- ols_list[[e]]$coefficients return(vect_coef) }) lasso_final_coefficients <- lasso_final_coefficients_list[[which.min(bic_min_list)]] lasso_ols_coefficients <- lasso_ols_coefficients_list[[which.min(bic_min_list)]] if (ols_ps) { coef <- lasso_ols_coefficients } else { coef <- lasso_final_coefficients } return(coef) }, error = function(e) { rep(NA, ncol(data)) }) } #' Adaptive Lasso #' #' This function performs adaptive lasso regression using the cv.glmnet function, #' then refits the model using ordinary least squares. #' #' @param data A data frame or matrix containing the predictors and response. #' The response must be in the first column. #' @param index A numeric vector of indices indicating the rows of 'data' to use #' for the adaptive lasso regression. #' @param weights_method A character string specifying the method to calculate #' the weights. Can be either "ols" or "ridge". Default #' is "ols". #' @param ols_ps A logical scalar. If TRUE (default), the function returns the #' coefficients from the OLS fit. If FALSE, it returns the #' coefficients from the lasso fit. #' #' @return A numeric vector of coefficients. If 'ols_ps' is TRUE, these are the #' coefficients from the OLS fit. If 'ols_ps' is FALSE, these are the #' coefficients from the lasso fit. If an error occurs during the lasso #' or OLS fit, the function returns a vector of NAs. #' #' @export #' @import Matrix #' @import glmnet #' @importFrom stats lm #' @importFrom stats lsfit #' @importFrom stats coef #' @importFrom stats as.formula #' @importFrom stats predict #' @importFrom stats BIC #' @importFrom glmnet cv.glmnet alasso <- function(data, index, weights_method = c("ols", "ridge"), ols_ps = TRUE) { tryCatch({ x <- as.matrix(data[index,-1]) y <- as.matrix(data[index, 1, drop = FALSE]) if (weights_method == "ols") { ols <- lsfit( x = x, y = y, intercept = FALSE, tolerance = 1e-20 )[[1]] ols_coef <- ols ols_coef[is.na(ols_coef)] <- 0 weight <- ols_coef } if (weights_method == "ridge") { ridge_init <- cv.glmnet(x, y, alpha = 0, intercept = TRUE) ridge_coef <- as.numeric(coef(ridge_init, s = ridge_init$lambda.min)) if (ridge_init$lambda.min == ridge_init$lambda[length(ridge_init$lambda)]) { lower_bound_grid <- ridge_init$lambda.min / 10 upper_bound_grid <- min(ridge_init$lambda[1], 1.1 * ridge_init$lambda.min) lambda_grid <- seq(upper_bound_grid, lower_bound_grid, length = 100) ridge_new <- cv.glmnet(x, y, alpha = 0, lambda = lambda_grid, intercept = TRUE) ridge_coef <- as.numeric(coef(ridge_new, ridge_new$lambda.min)) } ridge_coef[is.na(ridge_coef)] <- 0 weight <- ridge_coef[-1] } # alpha=1, lasso alasso_init <- cv.glmnet( x, y, alpha = 1, penalty.factor = 1 / abs(weight), intercept = TRUE ) alasso_init_lambda_min <- alasso_init$lambda.min alasso_init_lambda_grid <- alasso_init$lambda coef <- as.numeric(coef(alasso_init, alasso_init$lambda.min)) if (alasso_init$lambda.min == alasso_init$lambda[length(alasso_init$lambda)]) { lower_bound_grid <- alasso_init$lambda.min / 10 upper_bound_grid <- min(alasso_init$lambda[1], 1.1 * alasso_init$lambda.min) lambda_grid <- seq(upper_bound_grid, lower_bound_grid, length = 100) alasso_new <- cv.glmnet( x, y, alpha = 1, penalty.factor = 1 / abs(weight), lambda = lambda_grid, intercept = TRUE ) alasso_second_lambda_grid <- alasso_new$lambda coef <- as.numeric(coef(alasso_new, alasso_new$lambda.min)) } threshold_sequence <- 10 ^ (-8:1) alasso_final_coefficients_list <- lapply(threshold_sequence, function(x) { ifelse(abs(coef) <= x, 0, coef) }) coef_logical_list <- sapply(alasso_final_coefficients_list, function(e) { !all(e == 0) }) alasso_final_coefficients_list <- alasso_final_coefficients_list[which(coef_logical_list)] ols_list <- lapply(alasso_final_coefficients_list, function(e) { coef_nonzero <- e != 0 if (sum(coef_nonzero) > 0) { if (coef_nonzero[1] & any(coef_nonzero[-1])) { selected_x <- x[, coef_nonzero[-1], drop = FALSE] ols <- lm(y ~ as.matrix(selected_x)) } else if (coef_nonzero[1]) { ols <- lm(y ~ 1) } else { selected_x <- x[, coef_nonzero[-1], drop = FALSE] ols <- lm(y ~ 0 + as.matrix(selected_x)) } } }) bic_min_list <- lapply(ols_list, function(e) { BIC(e) }) alasso_ols_coefficients_list <- lapply(seq_along(alasso_final_coefficients_list), function(e) { coef_nonzero <- alasso_final_coefficients_list[[e]] != 0 vect_coef <- rep(0, ncol(data)) vect_coef[coef_nonzero] <- ols_list[[e]]$coefficients return(vect_coef) }) alasso_final_coefficients <- alasso_final_coefficients_list[[which.min(bic_min_list)]] alasso_ols_coefficients <- alasso_ols_coefficients_list[[which.min(bic_min_list)]] if (ols_ps) { coef <- alasso_ols_coefficients } else { coef <- alasso_final_coefficients } return(coef) }, error = function(e) { rep(NA, ncol(data)) }) } #' Optimal Savitzky-Golay Filter Parameters Finder #' #' This function finds the optimal parameters for the Savitzky-Golay filter #' by evaluating combinations of polynomial orders and window lengths. #' #' @param x_t A numeric vector or one-column matrix. The data to be smoothed. #' @param dt A numeric scalar. The time-step interval of the data. Default is 1. #' @param polyorder A numeric scalar. The order of the polynomial to be used in #' the Savitzky-Golay filter. If not specified, 4 will be used #' by default. #' #' @return A list with three elements: #' - sg_combinations: a matrix where each row represents a combination of #' polynomial order and window length tried. #' - sg_order_wl: a vector of length 2 with the optimal polynomial order and #' window length. #' - f_dist: a data frame with the mean squared error of the differences #' between the original data and the smoothed data for each #' combination. #' #' @export #' @import signal #' @import tidyverse #' @importFrom tidyr expand_grid sg_optimal_combination <- function(x_t, dt = 1, polyorder) { ### Create Combinations wl_max <- round((nrow(as.matrix(x_t)) * 0.05), 0) wl_max <- ifelse(wl_max %% 2 == 0, wl_max + 1, wl_max) ### Polynomial Order polyorder <- if(missing(polyorder)) 4 else polyorder ### If the Window length calculation is less than 11 ### we will just try the two minimum values. if (wl_max < 13) { ### Combinations sg_combinations <- cbind(4, 13) } else { ### Combinations if (wl_max > 101) { window_length <- seq(5, 101, by = 2) } else { if (wl_max %% 2 == 0) { wl_max <- wl_max + 1 window_length <- seq(5, wl_max, by = 2) } else { window_length <- seq(5, wl_max, by = 2) } } sg_combinations <- expand_grid(polyorder, window_length) %>% subset(window_length > polyorder + 7 - polyorder %% 2) %>% as.matrix() if (nrow(sg_combinations) == 1) { sg_combinations <- cbind(4, 13) } } ### Determine MSE for Combinations mse_xt <- sapply(seq_len(nrow(sg_combinations)), function(i) { x_t_smoothed <- x_t %>% sgolayfilt(p = sg_combinations[i, 1], n = sg_combinations[i, 2], m = 0, ts = dt) Metrics::mse(x_t, x_t_smoothed) }) mse_df <- data.frame(mse_xt = unlist(mse_xt)) sg_best_combination <- which.min(mse_df$mse_xt) sg_order_wl <- cbind(sg_combinations[sg_best_combination, 1], sg_combinations[sg_best_combination, 2]) return( list( sg_combinations = sg_combinations, sg_order_wl = sg_order_wl, mse_df = mse_df ) ) } #' Build Design Matrix #' #' This function first smooths the data and approximates the #' derivative before building the design matrix to include monomial and fourier #' terms. #' #' @param x_t Matrix of observations. #' @param dt Time step (default is 1). #' @param sg_poly_order Polynomial order for Savitzky-Golay Filter. #' @param library_degree Degree of polynomial library (default is 5). #' @param library_type Type of library to use. Can be one of "poly", #' "four", or "poly_four". #' #' @return A list with two elements: #' \itemize{ #' \item \code{sorted_theta} - A matrix with sorted polynomial/trigonometric #' terms. #' \item \code{monomial_orders} - A vector indicating the order of each #' polynomial term. #' \item \code{xdot_filtered} - A matrix with derivative terms #' (dependent variable). #' } #' @export #' @examples #' # Build a design matrix using the Duffing Oscillator as the state-space. #' # Output provides matrix, and derivative matrix monomial orders #' # (needed for running `argos`). #' x_t <- duffing_oscillator(n=5000, dt = 0.01, #' init_conditions = c(1, 0), #' gamma_value = 0.1, kappa_value = 1, #' epsilon_value = 5, snr = 49) #' duffing_design_matrix <- #' build_design_matrix(x_t, dt = 0.01, sg_poly_order = 4, #' library_degree = 5, library_type = "poly") #' head(duffing_design_matrix$sorted_theta) #' @importFrom signal sgolayfilt #' @importFrom magrittr %>% #' @importFrom stats polym build_design_matrix <- function(x_t, dt = 1, sg_poly_order = 4, library_degree = 5, library_type = c("poly", "four", "poly_four")) { monomial_degree <- library_degree dt <- dt # Filter x_t num_columns <- ncol(x_t) x_filtered <- list() xdot_filtered <- list() # Filter x_t for (i in 1:num_columns) { sg_combinations <- sg_optimal_combination(x_t[, i], dt, polyorder = sg_poly_order)[[2]] x_filtered[[i]] <- sgolayfilt( x_t[, i], p = sg_combinations[1, 1], n = sg_combinations[1, 2], m = 0, ts = dt ) xdot_filtered[[i]] <- sgolayfilt( x_t[, i], p = sg_combinations[1, 1], n = sg_combinations[1, 2], m = 1, ts = dt ) } # Combine filtered data and derivatives x_t <- do.call(cbind, x_filtered) sg_dx <- do.call(cbind, xdot_filtered) # Get the number of columns in the matrix num_columns_sg_dx <- ncol(sg_dx) # Create column names based on the pattern colnames(sg_dx) <- paste0("xdot", 1:num_columns_sg_dx) ### Sort state variables for expansion ### x_t needs to be in reverse order because of how poly function expands ### We do this here so that we can use it for the for loop to determine ### optimal SG parameters out_sorted <- x_t %>% data.frame() %>% rev() if (library_type == "poly" | library_type == "poly_four") { # Polynomial Expansion expanded_theta <- polym(as.matrix(out_sorted), degree = monomial_degree, raw = TRUE) # Order by degree using as.numeric_version numeric_version allows to # convert names of variables and expand without limit ordered_results <- order(attr(expanded_theta, "degree"), as.numeric_version(colnames(expanded_theta))) # Sort Theta Matrix sorted_theta <- expanded_theta[, ordered_results] sorted_theta <- data.frame(sorted_theta) # Change Variable Names s <- strsplit(substring(colnames(sorted_theta), 2), "\\.") colnames(sorted_theta) <- sapply(s, function(powers) { terms <- mapply(function(power, index) { if (power == "0") { return(NULL) } else if (power == "1") { return(paste0("x", index)) } else { return(paste0("x", index, "^", power)) } }, powers, rev(seq_along(powers)), SIMPLIFY = FALSE) # Filter out any NULL values from the terms list terms <- Filter(Negate(is.null), terms) # Sort terms alphabetically sorted_terms <- sort(unlist(terms)) # Collapse the sorted terms into one string paste(sorted_terms, collapse = "") }) # That lost the attributes, so put them back attr(sorted_theta, "degree") <- attr(expanded_theta, "degree")[ordered_results] monomial_orders <- attr(expanded_theta, 'degree')[ordered_results] } if (library_type == "four" | library_type == "poly_four") { if (ncol(x_t) == 1) { trig_functions <- cbind(sin(x_t[, 1]), cos(x_t[, 1])) attr(trig_functions, "degree") <- c(1, 1) } else if (ncol(x_t) == 2) { trig_functions <- cbind(sin(x_t[, 1]), cos(x_t[, 1]), sin(x_t[, 2]), cos(x_t[, 2])) attr(trig_functions, "degree") <- c(1, 1, 1, 1) } else { trig_functions <- cbind(sin(x_t[, 1]), cos(x_t[, 1]), sin(x_t[, 2]), cos(x_t[, 2]), sin(x_t[, 3]), cos(x_t[, 3])) attr(trig_functions, "degree") <- c(1, 1, 1, 1, 1, 1) } num_columns <- ncol(trig_functions) column_names <- character(num_columns) for (i in seq(1, num_columns, by = 2)) { # sin for odd columns column_names[i] <- paste("sin_x", ceiling(i/2), sep = "") # If there's an even column left if (i + 1 <= num_columns) { column_names[i + 1] <- paste("cos_x", ceiling(i/2), sep = "") } } colnames(trig_functions) <- column_names if (library_type == "four") { sorted_theta <- trig_functions attr(sorted_theta, "degree") <- attr(sorted_theta, "degree")[c(attr(trig_functions, "degree"))] # That lost the attributes again, so put them back monomial_orders <- attr(trig_functions, "degree") } else { sorted_theta <- cbind(trig_functions, sorted_theta) attr(sorted_theta, "degree") <- attr(expanded_theta, "degree")[c(attr(trig_functions, "degree"), ordered_results)] # That lost the attributes again, so put them back monomial_orders <- attr(expanded_theta, "degree")[c(attr(trig_functions, "degree"), ordered_results)] } } return(list(sorted_theta = cbind(sorted_theta), monomial_orders = monomial_orders, xdot_filtered = sg_dx)) } #' Automatic Regression for Governing Equations (ARGOS) #' #' This function performs sparse regression on a data set to identify the #' governing equations of the system. It takes a list of data from #' `build_design_matrix` then applies the Lasso or Adaptive Lasso for variable #' selection. #' #' @param design_matrix A list containing data frame, vector of predictor #' variable orders for 'theta', and derivative matrix. #' @param library_type A character vector (default: c("poly", "four", #' "poly_four")) specifying the type of library being used. #' @param state_var_deriv An integer. The index of the state variable for which #' the derivative is calculated. Default is 1. #' @param alpha_level A numeric scalar. The level of significance for #' confidence intervals. Default is 0.05. #' @param num_samples An integer. The number of bootstrap samples. Default is #' 2000. #' @param sr_method A character string. The sparse regression method to be used, #' either "lasso" or "alasso". Default is "lasso". #' @param weights_method A string or NULL. The method for calculating weights in #' the Adaptive Lasso. If NULL, ridge regression pilot #' estimates are used. Default is NULL. #' @param ols_ps A logical. If TRUE, post-selection OLS is performed after the #' Lasso or Adaptive Lasso. Default is TRUE. #' @param parallel A character string. The type of parallel computation to be #' used, either "no", "multicore" or "snow". Default is "no". #' @param ncpus An integer or NULL. The number of cores to be used in parallel #' computation. If NULL, the function will try to detect the #' number of cores. Default is NULL. #' #' @return A list with three elements: #' - point_estimates: a vector of point estimates for the coefficients. #' - ci: a matrix where each column represents the lower and upper bounds of #' the confidence interval for a coefficient. #' - identified_model: a matrix of coefficients of the identified model. #' #' @export #' @examples #' # Identify the x1 equation of the Duffing Oscillator with ARGOS. #' # Output provides point estimates, confidence intervals, and identified model. #' x_t <- duffing_oscillator(n=1000, dt = 0.01, #' init_conditions = c(1, 0), #' gamma_value = 0.1, kappa_value = 1, #' epsilon_value = 5, snr = 49) #' duffing_design_matrix <- #' build_design_matrix(x_t, dt = 0.01, sg_poly_order = 4, #' library_degree = 5, library_type = "poly") #' design_matrix <- duffing_design_matrix #' state_var_deriv = 1 # Denotes first equation/derivative to be identified #' alpha_level = 0.05 #' num_samples = 10 #' sr_method = "lasso" #' weights_method = NULL #' ols_ps = TRUE #' parallel = "no" #' ncpus = NULL #' library_type <- "poly" #' perform_argos <- argos(design_matrix = design_matrix, #' library_type = library_type, #' state_var_deriv = state_var_deriv, #' alpha_level = alpha_level, #' num_samples = num_samples, #' sr_method = "lasso", #' weights_method = NULL, #' ols_ps = TRUE, #' parallel = "no", #' ncpus = NULL) #' perform_argos$point_estimates #' perform_argos$ci #' perform_argos$identified_model #' @import boot #' @import tidyverse #' @importFrom stats polym #' @importFrom magrittr `%>%` argos <- function(design_matrix, library_type = c("poly", "four", "poly_four"), state_var_deriv = 1, alpha_level = 0.05, num_samples = 2000, sr_method = c("lasso", "alasso"), weights_method = NULL, ols_ps = TRUE, parallel = c("no", "multicore", "snow"), ncpus = NULL) { # Unpack design matrix sorted_theta <- design_matrix$sorted_theta monomial_orders <- design_matrix$monomial_orders xdot <- design_matrix$xdot parallel <- match.arg(parallel) # add this line sr_method <- match.arg(sr_method) # add this line # Check if parallel processing is requested if (parallel != "no") { # Check if ncpus is NULL if (is.null(ncpus)) { # Detect number of cores and assign it to ncpus ncpus <- parallel::detectCores() } } # Create derivative and combine with theta matrix with SG Golay num_deriv_columns <- ncol(xdot) derivative_data <- list() for (i in 1:num_deriv_columns) { deriv_col <- xdot[, i] dot_df <- data.frame(cbind(deriv_col, sorted_theta)) derivative_data[[i]] <- dot_df } # Access the desired data frame using the derivative variable data <- derivative_data[[state_var_deriv]] # Perform initial sparse regression to determine polynomial order of design matrix sr_method <- if(missing(sr_method)) "lasso" else sr_method weights_method <- if(missing(weights_method)) "ridge" else weights_method if (sr_method == "alasso") { initial_estimate <- alasso(data, weights_method = weights_method, ols_ps = ols_ps) } else { initial_estimate <- lasso(data, ols_ps = ols_ps) } # max nonzero value from sparse regression init_nz_max <- max(which(initial_estimate != 0)) # Determine new theta order based on max nonzero value. # Include all monomials in max value new_theta_order <- sum(monomial_orders <= monomial_orders[init_nz_max]) if (library_type == "four") { post_lasso_matrix <- data } else { # Rerun Bootstrap with Truncated Matrix if (is.na(new_theta_order) | new_theta_order == length(monomial_orders)) { post_lasso_matrix <- data } else { post_lasso_matrix <- data[-1][, 1:(new_theta_order)] post_lasso_matrix <- cbind.data.frame(data[1], post_lasso_matrix) } } # Create list to compile necessary information for bootstrap. # Add updated matrix if (sr_method == "alasso") { boot_info <- c( list(data = post_lasso_matrix, R = num_samples), statistic = match.fun("alasso"), weights_method = weights_method, ols_ps = ols_ps, parallel = parallel, ncpus = ncpus ) } else { boot_info <- c( list(data = post_lasso_matrix, R = num_samples), statistic = match.fun("lasso"), ols_ps = ols_ps, parallel = parallel, ncpus = ncpus ) } # boot Function on Original Dataframe boot_s <- do.call(boot, boot_info) # Matrix of coefficients from bootstrap samples boot_t0 <- boot_s$t0 # point estimates boot_t <- boot_s$t # sample estimates ### In case of string/character, change to numeric boot_t <- matrix(as.numeric(boot_t), nrow = num_samples, ncol = ncol(boot_t)) # subset any NAs num_nas_boot <- sum(apply(boot_t, 1, function(x) any(is.na(x)))) boot_t <- subset(boot_t, apply(boot_t, 1, function(x) any(!is.na(x)))) # ordered polynomial degree of variables alpha # typically equal to 0.05, 0.01, or 0.10 b <- nrow(boot_t) q_normal <- alpha_level # Lower bound q1_normal <- (b * q_normal) / 2 # Upper bound q2_normal <- b - q1_normal + 1 if (round(q1_normal) <= 0) { q1_normal <- 1 } if (q2_normal > b) { q2_normal <- b } # Sort and determine value of lower and upper bound ci <- apply(boot_t, 2, function(u) { sort(u)[c(round(q1_normal, 0), round(q2_normal, 0))] }) ci[is.na(ci)] <- 0 count_zero <- apply(boot_t, 2, function(x) { length(which(x == 0)) }) percent_zero <- apply(boot_t, 2, function(x) { length(which(x == 0)) / length(x) }) df_columns <- c("(Intercept)", colnames(post_lasso_matrix)[-1]) df_columns <- gsub("\\.", "^", df_columns) identified_model <- matrix(data = NA, nrow = length(boot_t0)) rownames(identified_model) <- c("(Intercept)", df_columns[-1]) ### Check if confidence intervals contain variable and do not cross zero for (i in seq_along(identified_model)) { if (ci[1, i] <= boot_t0[i] & ci[2, i] >= boot_t0[i] & ((ci[1, i] <= 0 && ci[2, i] >= 0)) == FALSE) { identified_model[i,] <- boot_t0[i] } else { identified_model[i,] <- 0 } } colnames(ci) <- df_columns return( list( point_estimates = boot_t0, ci = ci, identified_model = identified_model ) ) } #' Cubic 2D System #' #' Simulates a two-dimensional damped oscillator with cubic dynamics and optional #' noise. #' #' @param n Number of time points (rounded to the nearest integer). #' @param init_conditions Initial conditions as a numeric vector of length 2. #' @param dt Time step between observations. #' @param snr Signal-to-noise ratio (in dB). Use Inf for no noise. #' #' @return A numeric matrix representing the system's state over time. Each row #' corresponds to a time point, and each column represents a variable. #' #' @examples #' # Simulate a 2D cubic system with 100 time points and no noise #' data <- cubic2d_system(n = 100, init_conditions = c(1, 2), dt = 0.01, snr = Inf) #' #' @details #' This function simulates a two-dimensional damped oscillator with cubic dynamics. #' It uses the specified time step and initial conditions to compute the system's #' state over time. If a non-Infinite SNR is provided, Gaussian noise is added to #' the system. #' #' @import deSolve #' @importFrom stats sd #' @importFrom stats rnorm #' @export cubic2d_system <- function(n, init_conditions, dt, snr = Inf) { n <- round(n, 0) dt <- dt # n = number of time points rounded to nearest integer # snr = added noise to system (dB) # times: n - 1 to round off total n given to start at t_init = 0 times <- seq(0, ((n) - 1) * dt, by = dt) init_conditions <- init_conditions matrix_a <- matrix(c(-0.1, -2, 2, -0.1), 2, 2) cubic2d <- function(t, init_conditions, parameters) { with(as.list(c(init_conditions, parameters)), { dx <- matrix_a[1, 1] * init_conditions[1] ** 3 + matrix_a[1, 2] * init_conditions[2] ** 3 dy <- matrix_a[2, 1] * init_conditions[1] ** 3 + matrix_a[2, 2] * init_conditions[2] ** 3 list(c(dx, dy)) }) } out <- ode(y = init_conditions, times = times, func = cubic2d, parms = matrix_a, atol = 1.49012e-8, rtol = 1.49012e-8)[, -1] # Add Noise if (!is.infinite(snr)) { length <- nrow(out) * ncol(out) # Convert to snr voltage (dB) snr_volt <- 10 ^ -(snr / 20) noise_matrix <- snr_volt * matrix(rnorm(length, mean = 0, sd = sd(out)), nrow(out)) out <- out + noise_matrix } # Return x_t return(x_t = out) } #' Linear 2D System #' #' Simulates a two-dimensional damped oscillator with linear dynamics and optional #' noise. #' #' @param n Number of time points (rounded to the nearest integer). #' @param init_conditions Initial conditions as a numeric vector of length 2. #' @param dt Time step between observations. #' @param snr Signal-to-noise ratio (in dB). Use Inf for no noise. #' #' @return A numeric matrix representing the system's state over time. Each row #' corresponds to a time point, and each column represents a variable. #' #' @examples #' # Simulate a 2D linear system with 100 time points and no noise #' data <- linear2d_system(n = 100, init_conditions = c(-1, 1), dt = 0.01, snr = Inf) #' #' @details #' This function simulates a two-dimensional damped oscillator with linear dynamics. #' It uses the specified time step and initial conditions to compute the system's #' state over time. If a non-Infinite SNR is provided, Gaussian noise is added to #' the system. #' #' @import deSolve #' @importFrom stats sd #' @importFrom stats rnorm #' @export linear2d_system <- function(n, init_conditions, dt, snr = Inf) { n <- round(n, 0) dt <- dt snr <- snr # n = number of time points rounded to nearest integer # times: n - 1 to round off total n given that we start at 0 times <- seq(0, ((n) - 1) * dt, by = dt) init_conditions <- init_conditions matrix_a <- matrix(c(-0.1, -2, 2, -0.1), 2, 2) linear2d <- function(t, init_conditions, parameters) { with(as.list(c(init_conditions, parameters)), { dx <- matrix_a[1, 1] * init_conditions[1] + matrix_a[1, 2] * init_conditions[2] dy <- matrix_a[2, 1] * init_conditions[1] + matrix_a[2, 2] * init_conditions[2] list(c(dx, dy)) }) } out <- ode(y = init_conditions, times = times, func = linear2d, parms = matrix_a, atol = 1.49012e-8, rtol = 1.49012e-8)[, -1] # Add Noise if (!is.infinite(snr)) { length <- nrow(out) * ncol(out) # Convert to snr voltage (dB) snr_volt <- 10 ^ -(snr / 20) noise_matrix <- snr_volt * matrix(rnorm(length, mean = 0, sd = sd(out)), nrow(out)) out <- out + noise_matrix } # Return x_t return(x_t = out) } #' Linear 3D System #' #' Simulates a three-dimensional linear dynamical system with optional noise. #' #' @param n Number of time points (rounded to the nearest integer). #' @param init_conditions Initial conditions as a numeric vector of length 3. #' @param dt Time step between observations. #' @param snr Signal-to-noise ratio (in dB). Use Inf for no noise. #' #' @return A numeric matrix representing the system's state over time. Each row #' corresponds to a time point, and each column represents a variable. #' #' @examples #' # Simulate a 3D linear system with 100 time points and no noise #' data <- linear3d_system(n = 100, init_conditions = c(1, 2, 3), dt = 0.01, snr = Inf) #' #' @details #' This function simulates a three-dimensional linear dynamical system. #' It uses the specified time step and initial conditions to compute the system's #' state over time. If a non-Infinite SNR is provided, Gaussian noise is added to #' the system. #' #' @import deSolve #' @importFrom stats sd #' @importFrom stats rnorm #' @export linear3d_system <- function(n, init_conditions, dt, snr = Inf) { n <- round(n, 0) dt <- dt # n = number of time points rounded to nearest integer # snr = added noise to system (dB) # times: n - 1 to round off total n given to start at t_init = 0 times <- seq(0, ((n) - 1) * dt, by = dt) matrix_a <- matrix(c(-0.1, -2, 0, 2, -0.1, 0, 0, 0, -0.3), 3, 3) init_conditions <- init_conditions linear3d <- function(t, init_conditions, parameters) { with(as.list(c(init_conditions, parameters)), { dx <- matrix_a[1, 1] * init_conditions[1] + matrix_a[1, 2] * init_conditions[2] dy <- matrix_a[2, 1] * init_conditions[1] + matrix_a[2, 2] * init_conditions[2] dz <- matrix_a[3, 3] * init_conditions[3] list(c(dx, dy, dz)) }) } out <- ode(y = init_conditions, times = times, func = linear3d, parms = matrix_a, atol = 1.49012e-8, rtol = 1.49012e-8)[, -1] # Add Noise if (!is.infinite(snr)) { length <- nrow(out) * ncol(out) # Convert to snr voltage (dB) snr_volt <- 10 ^ -(snr / 20) noise_matrix <- snr_volt * matrix(rnorm(length, mean = 0, sd = sd(out)), nrow(out)) out <- out + noise_matrix } # Return x_t return(x_t = out) } #' Duffing Oscillator #' #' Simulates the Duffing oscillator with optional noise. #' #' @param n Number of time points (rounded to the nearest integer). #' @param dt Time step between observations. #' @param init_conditions Initial conditions as a numeric vector of length 2. #' @param gamma_value Value of gamma parameter. #' @param kappa_value Value of kappa parameter. #' @param epsilon_value Value of epsilon parameter. #' @param snr Signal-to-noise ratio (in dB). Use Inf for no noise. #' #' @return A numeric matrix representing the system's state over time. Each row #' corresponds to a time point, and each column represents a variable. #' #' @examples #' # Simulate a Duffing oscillator with 100 time points and no noise #' data <- duffing_oscillator( #' n = 100, #' dt = 0.01, #' init_conditions = c(2, 6), #' gamma_value = 0.1, #' kappa_value = 1, #' epsilon_value = 5, #' snr = Inf #' ) #' #' @details #' This function simulates a Duffing oscillator with the specified parameters. #' It uses the specified time step and initial conditions to compute the system's #' state over time. If a non-Infinite SNR is provided, Gaussian noise is added to #' the system. #' #' @import deSolve #' @importFrom stats sd #' @importFrom stats rnorm #' @export duffing_oscillator <- function(n, dt, init_conditions, gamma_value, kappa_value, epsilon_value, snr = Inf) { n <- round(n, 0) dt <- dt # n = number of time points rounded to nearest integer # snr = added noise to system (dB) # times: n - 1 to round off total n given to start at t_init = 0 init_conditions <- init_conditions times <- seq(0, ((n) - 1) * dt, by = dt) duff_parameters <- c(gamma_value, kappa_value, epsilon_value) duff_osc <- function(t, init_conditions, duff_parameters) { with(as.list(c(init_conditions, duff_parameters)), { dx <- init_conditions[2] dy <- (-duff_parameters[1] * init_conditions[2]) - (duff_parameters[2] * init_conditions[1]) - (duff_parameters[3] * (init_conditions[1] ^ 3)) list(c(dx, dy)) }) } # Oscillator out <- ode( y = init_conditions, func = duff_osc, times = times, parms = duff_parameters, atol = 1.49012e-8, rtol = 1.49012e-8 )[, -1] # Add Noise if (!is.infinite(snr)) { length <- nrow(out) * ncol(out) # Convert to snr voltage (dB) snr_volt <- 10 ^ -(snr / 20) noise_matrix <- snr_volt * matrix(rnorm(length, mean = 0, sd = sd(out)), nrow(out)) out <- out + noise_matrix } # Return x_t return(x_t = out) } #' Van der Pol Oscillator #' #' Simulates the Van der Pol oscillator with optional noise. #' #' @param n Number of time points (rounded to the nearest integer). #' @param dt Time step between observations. #' @param init_conditions Initial conditions as a numeric vector of length 2. #' @param mu Parameter controlling the nonlinear damping level of the system. #' @param snr Signal-to-noise ratio (in dB). Use Inf for no noise. #' #' @return A numeric matrix representing the system's state over time. Each row #' corresponds to a time point, and each column represents a variable. #' #' @examples #' # Simulate a Van der Pol oscillator with 100 time points and no noise #' data <- vdp_oscillator( #' n = 100, #' dt = 0.01, #' init_conditions = c(-1, 1), #' mu = 1.2, #' snr = Inf #' ) #' #' @details #' This function simulates a Van der Pol oscillator with the specified parameters. #' It uses the specified time step and initial conditions to compute the system's #' state over time. If a non-Infinite SNR is provided, Gaussian noise is added to #' the system. #' #' @import deSolve #' @importFrom stats sd #' @importFrom stats rnorm #' @export vdp_oscillator <- function(n, dt, init_conditions, mu, snr = Inf) { n <- round(n, 0) dt <- dt mu <- mu # mu = "negative" resistance of triode passing a small current # n = number of time points rounded to nearest integer # snr = added noise to system (dB) # times: n - 1 to round off total n given to start at t_init = 0 init_conditions <- init_conditions times <- seq(0, ((n) - 1) * dt, by = dt) mu <- mu vdpol <- function(t, init_conditions, mu) { with(as.list(c(init_conditions, mu)), { dx <- init_conditions[2] dy <- mu * (1 - ((init_conditions[1]) ^ 2)) * init_conditions[2] - init_conditions[1] list(c(dx, dy)) }) } out <- ode( y = init_conditions, func = vdpol, times = times, parms = mu, atol = 1.49012e-8, rtol = 1.49012e-8 )[,-1] # Add Noise if (!is.infinite(snr)) { length <- nrow(out) * ncol(out) # Convert to snr voltage (dB) snr_volt <- 10 ^ -(snr / 20) noise_matrix <- snr_volt * matrix(rnorm(length, mean = 0, sd = sd(out)), nrow(out)) out <- out + noise_matrix } return(x_t = out) } #' Lotka-Volterra System #' #' Simulates the Lotka-Volterra predator-prey system with optional noise. #' #' @param n Number of time points (rounded to the nearest integer). #' @param dt Time step between observations. #' @param init_conditions Initial conditions as a numeric vector of length 2. #' @param snr Signal-to-noise ratio (in dB). Use Inf for no noise. #' #' @return A numeric matrix representing the system's state over time. Each row #' corresponds to a time point, and each column represents a variable. #' #' @examples #' # Simulate a Lotka-Volterra system with 100 time points and no noise #' data <- lotka_volterra( #' n = 100, #' dt = 0.01, #' init_conditions = c(2, 1), #' snr = Inf #' ) #' #' @details #' This function simulates the Lotka-Volterra predator-prey system with the #' specified parameters. It uses the specified time step and initial conditions #' to compute the system's state over time. If a non-Infinite SNR is provided, #' Gaussian noise is added to the system. #' #' @import deSolve #' @importFrom stats sd #' @importFrom stats rnorm #' @export lotka_volterra <- function(n, init_conditions, dt, snr = Inf) { n <- round(n, 0) dt <- dt # n = number of time points rounded to nearest integer # snr = added noise to system (dB) # times: n - 1 to round off total n given to start at t_init = 0 parameters <- c(c0 = 1, c1 = -1, c2 = -1, c3 = 1) init_conditions <- init_conditions lv <- function(t, init_conditions, parameters) { with(as.list(c(init_conditions, parameters)), { dx <- (parameters[1] * init_conditions[1]) + (parameters[2] * (init_conditions[1] * init_conditions[2])) dy <- (parameters[3] * init_conditions[2]) + (parameters[4] * (init_conditions[1] * init_conditions[2])) list(c(dx, dy)) }) } times <- seq(0, ((n) - 1) * dt, by = dt) out <- ode(y = init_conditions, times = times, func = lv, parms = parameters, atol = 1.49012e-8, rtol = 1.49012e-8)[, -1] # Add Noise if (!is.infinite(snr)) { length <- nrow(out) * ncol(out) # Convert to snr voltage (dB) snr_volt <- 10 ^ -(snr / 20) noise_matrix <- snr_volt * matrix(rnorm(length, mean = 0, sd = sd(out)), nrow(out)) out <- out + noise_matrix } # Return x_t return(x_t = out) } #' Lorenz Chaotic System #' #' Simulates the Lorenz chaotic system with optional noise. #' #' @param n Number of time points (rounded to the nearest integer). #' @param dt Time step between observations. #' @param init_conditions Initial conditions as a numeric vector of length 3 (X, Y, Z). #' @param snr Signal-to-noise ratio (in dB). Use Inf for no noise. #' #' @return A numeric matrix representing the system's state over time. Each row #' corresponds to a time point, and each column represents a variable (X, Y, Z). #' #' @examples #' # Simulate the Lorenz system with 1000 time points and no noise #' data <- lorenz_system( #' n = 1000, #' dt = 0.01, #' init_conditions = c(-8, 7, 27), #' snr = Inf #' ) #' #' @details #' This function simulates the Lorenz chaotic system with the specified #' parameters. It uses the specified time step and initial conditions to compute #' the system's state over time. If a non-Infinite SNR is provided, Gaussian noise #' is added to the system. #' #' @import deSolve #' @importFrom stats sd #' @importFrom stats rnorm #' @export lorenz_system <- function(n, init_conditions, dt, snr = Inf) { n <- round(n, 0) dt <- dt # n = number of time points rounded to nearest integer # snr = added noise to system (dB) # times: n - 1 to round off total n given to start at t_init = 0 # Lorenz Parameters: sigma, rho, beta parameters <- c(s = 10, r = 28, b = 8 / 3) # init_conditions <- c(X = -8, Y = 7, Z = 27) # Original Initial Conditions init_conditions <- init_conditions lorenz <- function(t, init_conditions, parameters) { with(as.list(c(init_conditions, parameters)), { dx <- parameters[1] * (init_conditions[2] - init_conditions[1]) dy <- init_conditions[1] * (parameters[2] - init_conditions[3]) - init_conditions[2] dz <- init_conditions[1] * init_conditions[2] - parameters[3] * init_conditions[3] list(c(dx, dy, dz)) }) } times <- seq(0, ((n) - 1) * dt, by = dt) out <- ode(y = init_conditions, times = times, func = lorenz, parms = parameters, atol = 1.49012e-8, rtol = 1.49012e-8)[, -1] # Add Noise if (!is.infinite(snr)) { length <- nrow(out) * ncol(out) # Convert to snr voltage (dB) snr_volt <- 10 ^ -(snr / 20) noise_matrix <- snr_volt * matrix(rnorm(length, mean = 0, sd = sd(out)), nrow(out)) out <- out + noise_matrix } # Return x_t return(x_t = out) } #' Rossler Chaotic System #' #' Simulates the Rossler chaotic system with optional noise. #' #' @param n Number of time points (rounded to the nearest integer). #' @param dt Time step between observations. #' @param init_conditions Initial conditions as a numeric vector of length 3 (X, Y, Z). #' @param a Rossler parameter 1 #' @param b Rossler parameter 2 #' @param c Rossler parameter 3 #' @param snr Signal-to-noise ratio (in dB). Use Inf for no noise. #' #' @return A numeric matrix representing the system's state over time. Each row #' corresponds to a time point, and each column represents a variable (X, Y, Z). #' #' @examples #' # Simulate the Rossler system with 1000 time points and no noise #' data <- rossler_system( #' n = 1000, #' dt = 0.01, #' init_conditions = c(0, 2, 0), #' a = 0.2, b = 0.2, c = 5.7, #' snr = Inf #' ) #' #' @details #' This function simulates the Rossler chaotic system with the specified #' parameters. It uses the specified time step and initial conditions to compute #' the system's state over time. If a non-Infinite SNR is provided, Gaussian noise #' is added to the system. #' #' @import deSolve #' @importFrom stats sd #' @importFrom stats rnorm #' @export rossler_system <- function(n, dt, init_conditions, a, b, c, snr = Inf) { n <- round(n, 0) dt <- dt # n = number of time points rounded to nearest integer # snr = added noise to system (dB) # times: n - 1 to round off total n given to start at t_init = 0 init_conditions <- init_conditions times <- seq(0, ((n) - 1) * dt, by = dt) rossler_parameters <- c(a, b, c) rossler <- function(t, init_conditions, rossler_parameters) { with(as.list(c(init_conditions, rossler_parameters)), { dx <- -init_conditions[2] - init_conditions[3] dy <- init_conditions[1] + (rossler_parameters[1] * init_conditions[2]) dz <- rossler_parameters[2] + (init_conditions[3] * (init_conditions[1] - rossler_parameters[3])) list(c(dx, dy, dz)) }) } out <- ode( y = init_conditions, func = rossler, times = times, parms = rossler_parameters, atol = 1.49012e-8, rtol = 1.49012e-8 )[,-1] # Add Noise if (!is.infinite(snr)) { length <- nrow(out) * ncol(out) # Convert to snr voltage (dB) snr_volt <- 10 ^ -(snr / 20) noise_matrix <- snr_volt * matrix(rnorm(length, mean = 0, sd = sd(out)), nrow(out)) out <- out + noise_matrix } # Return x_t return(x_t = out) }
/scratch/gouwar.j/cran-all/cranData/ARGOS/R/argos_files.R
#' An adaptable generalized Hotelling's \eqn{T^2} test for high dimensional data #' @export #' @import stats #' @description This function performs the adaptable regularized Hotelling's \eqn{T^2} test (ARHT) (Li et al., (2016) <arXiv:1609.08725>) for the one-sample #' and two-sample test problem, where we're interested in detecting the mean vector in the one-sample problem or the difference #' between mean vectors in the two-sample problem in a high dimensional regime. #' #' @details The method incorporates ridge-regularization in the classic Hotelling's \eqn{T^2} test with the regularization parameter #' chosen such that the asymptotic power under a class of probabilistic alternative prior models is maximized. ARHT combines #' different prior models by taking the maximum of statistics under all models. ARHT is distributed as the maximum #' of a correlated multivariate normal random vector. We estimate its covariance matrix and bootstrap its distribution. The #' returned p-value is a Monte Carlo approximation to its true value using the bootstrap sample, therefore not deterministic. #' Various methods are available to calibrate the slightly inflated Type 1 error rate of ARHT, including Cube-root transformation, #' square-root transformation and chi-square approximation. #' @param X the n1-by-p observation matrix with numeric column variables. #' @param Y an optional n2-by-p observation matrix; if \code{NULL}, a one-sample test is conducted on \code{X}; otherwise, a two-sample test #' is conducted on \code{X} and \code{Y}. #' @param mu_0 the null hypothesis vector to be tested; if \code{NULL}, the default value is the 0 vector of length p. #' @param prob_alt_prior a non-empty list; Each field is a numeric vector with sum 1. The default value is the "canonical weights" #' \code{list(c(1,0,0), c(0,1,0), c(0,0,1))}; Each field represents a probabilistic prior model specified by weights of \eqn{I_p}, #' \eqn{\Sigma}, \eqn{\Sigma^2}, etc, where \eqn{\Sigma} is the population covariance matrix of the observations. #' @param Type1error_calib the method to calibrate Type 1 error rate of ARHT. Choose its first element when more than one are specified. #' Four values are allowed: #' \itemize{\item{\code{cube_root}} The default value; cube-root transformation; #' \item{\code{sqrt}} Square-root transformation; #' \item{\code{chi_sq}} Chi-square approximation, not available when more than three models are specified in \code{prob_alt_prior}; #' \item{\code{none}} No calibration. #' } #' @param lambda_range optional user-supplied lambda range; If \code{NULL}, ARHT chooses its own range. #' @param nlambda optional user-supplied number of lambda's in grid search; default to be \code{2000}; the grid is progressively coarser. #' @param bs_size positive numeric with default value \code{1e5}; only effective when more than one prior models are specified in \code{prob_alt_prior}; #' control the size of the bootstrap sample used to approximate the ARHT p-value. #' @references Li, H. Aue, A., Paul, D. Peng, J., & Wang, P. (2016). \emph{An adaptable generalization of Hotelling's \eqn{T^2} test in high dimension.} #' <arXiv:1609:08725>. #' @references Chen, L., Paul, D., Prentice, R., & Wang, P. (2011). \emph{A regularized Hotelling's \eqn{T^2} test for pathway analysis in proteomic studies.} #' Journal of the American Statistical Association, 106(496), 1345-1360. #' @return \itemize{ #' \item{\code{ARHT_pvalue}}: The p-value of ARHT test. #' \itemize{ #' \item If \code{length(prob_alt_prior)==1}, it is identical to \code{RHT_pvalue}. #' \item If \code{length(prob_alt_prior)>1}, it is the p-value after combining results from all prior models. The value is #' bootstrapped, therefore not deterministic. #' } #' \item{\code{RHT_opt_lambda}}: The optimal lambda's chosen under each of the prior models in \code{prob_alt_prior}. It has the same length and order as #' \code{prob_alt_prior}. #' \item{\code{RHT_pvalue}}: The p-value of RHT tests with the lambda's in \code{RHT_opt_lambda}. #' \item{\code{RHT_std}}: The standardized RHT statistics with the lambda's in \code{RHT_opt_lambda}. #' Take its maximum to get the statistic of ARHT test. #' \item{\code{Theta1}}: As defined in Li et al. (2016) <arXiv:1609.08725>, the estimated asymptotic means of RHT statistics with the lambda's in \code{RHT_opt_lambda}. #' \item{\code{Theta2}}: As defined in Li et al. (2016) <arXiv:1609.08725>, \code{2*Theta2} are the estimated asymptotic variances of RHT statistics the lambda's in \code{RHT_opt_lambda}. #' \item{\code{Corr_RHT}}: The estimated correlation matrix of the statistics in \code{RHT_std}. #'} #' @examples #' set.seed(10086) #' # One-sample test #' n1 = 300; p =500 #' dataX = matrix(rnorm(n1 * p), nrow = n1, ncol = p) #' res1 = ARHT(dataX) #' #' # Two-sample test #' n2= 400 #' dataY = matrix(rnorm(n2 * p), nrow = n2, ncol = p ) #' res2 = ARHT(dataX, dataY, mu_0 = rep(0.01,p)) #' #' # Specify probabilistic alternative priors model #' res3 = ARHT(dataX, dataY, mu_0 = rep(0.01,p), #' prob_alt_prior = list(c(1/3, 1/3, 1/3), c(0,1,0))) #' #' # Change Type 1 error calibration method #' res4 = ARHT(dataX, dataY, mu_0 = rep(0.01,p), #' Type1error_calib = "sqrt") #' #' RejectOrNot = res4$ARHT_pvalue < 0.05 #' ARHT = function(X, Y = NULL, mu_0 = NULL, prob_alt_prior = list(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1)), Type1error_calib = c("cube_root", "sqrt", "chi_sq", "none"), lambda_range = NULL, nlambda = 2000, bs_size = 1e5){ if(length(dim(X)) > 2L || !(is.numeric(X))) stop("X must be a numeric matrix with column variables") if(!is.matrix(X)) X = as.matrix(X) if(nrow(X) <= 1L){ stop("The number of rows in X must be larger than 1") } if(is.null(Y)){ mode = "one_sample" }else{ if(length(dim(Y)) > 2L || !(is.numeric(Y))) stop("Y must be a numeric matrix with column variables") if(!is.matrix(Y)) Y <- as.matrix(Y) if(nrow(Y) <= 1L){ stop("The number of rows in Y must be larger than 1") } if(ncol(Y) != ncol(X)){ stop("The dimensions of X and Y differ") } mode = "two_sample" } if(!is.null(mu_0)){ if(!is.vector(mu_0, mode = "numeric")) stop("mu_0 must be a numeric vector") if(length(mu_0) != ncol(X)) stop("The dimension of X doesn't match with that of mu_0") }else{ mu_0 = numeric(ncol(X)) } if(!is.list(prob_alt_prior)) stop("prob_alt_prior must be a list of numeric vectors") if(!all(sapply(prob_alt_prior, is.vector, mode = "numeric"))) stop("prob_alt_prior must be a list of numeric vectors") valid_prob_alt_prior = sapply(prob_alt_prior, function(a){ round(sum(a), 5) != 1}) if(any(valid_prob_alt_prior)){ stop(paste("In Model", paste( which(valid_prob_alt_prior), collapse = ", "), "specified in prob_alt_prior, the sum of prior weights is not 1")) } # throw away meaningless 0's and shorten prob_alt_prior max_nonzero_index = max(sapply(prob_alt_prior, function(xxx) max(which(xxx != 0)))) prob_alt_prior = lapply(prob_alt_prior, function(xxx){ xxx[1: min(length(xxx), max_nonzero_index)]}) if(!is.null(lambda_range)){ if(!is.vector(lambda_range, mode = "numeric")) stop("lambda_range must be a numeric vector of two elements") if(length(lambda_range)!=2L) stop("The length of lambda_range must be 2.") if(lambda_range[1]<=0) stop("The lower bound of lambda sequence must be positive") if(lambda_range[2]<= lambda_range[1]) stop("The upper bound of lambda sequence must be larger than the lower bound") } if( (!is.numeric(nlambda)) || (length(nlambda)!= 1) ) stop("nlambda must be numeric of length 1") if(nlambda<=0) stop("nlambda must be postive") nlambda = ceiling(nlambda) if(!(Type1error_calib[1] %in% c("cube_root", "sqrt", "chi_sq", "none"))){ Type1error_calib = "cube_root" warning('Unknown value for Type1error_calib; default value "cube_root" is chosen instead') } if( (length(prob_alt_prior) >3L) && (Type1error_calib[1] == "chi_sq")){ stop("Chi-square calibration of Type 1 error is not available when the number of prior models in prob_alt_prior is larger than 3") } if(length(prob_alt_prior)>1L ){ if( (!is.numeric(bs_size)) || (length(bs_size)!= 1) ) stop("bs_size must be numeric of length 1") if(bs_size <=0 ) stop("bs_size must be postive") if(bs_size < 1e3) warning("Bootstrap sample size is too small; ARHT_pvalue is not reliable.") } bs_size = ceiling(bs_size) if((Type1error_calib[1] != "chi_sq") && (length(prob_alt_prior)>1L)){ bootstrap_sample = matrix(rnorm(length(prob_alt_prior)*bs_size), ncol = bs_size) } if(mode == "one_sample"){ eig_proj = eigen_proj_1samp(X, mu_0, lower_lambda = lambda_range[1]) }else{ eig_proj = eigen_proj_2samp(X, Y, mu_0, lower_lambda = lambda_range[1]) } p = ncol(X) n = eig_proj$n gamma = p/n proj_diff = eig_proj$proj_shift ridge = eig_proj$lower_lambda # To speed up the computation of Stieltjes transform, separate positive eigenvalues and negative ones. positive_emp_eig = eig_proj$pos_eig_val num_zero_emp_eig = p - length(positive_emp_eig) emp_eig = c(positive_emp_eig, rep(0, times = num_zero_emp_eig)) ## specify the lambda's net. Use log-scale. Progressively coarser. if(is.null(lambda_range)){ lambda = exp(seq(from = log(ridge), to = log(20 * emp_eig[1] + (ridge - mean(emp_eig)/100) * (ridge - mean(emp_eig)/100 >0)), length = nlambda)) }else{ lambda = exp(seq(from = log(lambda_range[1]), to = log(lambda_range[2]), length = nlambda)) } ## Stieltjes transform, its derivative, Theta_1, Theta_2 mF = 1/p * ( rowSums(1/outer(lambda, positive_emp_eig, FUN = "+")) + num_zero_emp_eig/lambda ) mFprime = 1/p * (rowSums(1/(outer(lambda, positive_emp_eig, FUN = "+"))^2) + num_zero_emp_eig/lambda^2) Theta1 = (1 - lambda*mF)/(1 - gamma*(1 - lambda * mF)) Theta2 = (1 + gamma*Theta1)^2 * (Theta1 - lambda *(mF - lambda * mFprime)/(1 - gamma*(1 - lambda * mF))^2) # Calculate the power under each prior model prior_max_order = max(sapply(prob_alt_prior,length)) unified_prob_alt_prior = lapply(prob_alt_prior, function(i) c(i, rep(0, times = max(prior_max_order,2) - length(i)))) matrix_prob_alt_prior = do.call(rbind, unified_prob_alt_prior) if(prior_max_order <= 2L){ rhos = rbind(mF, Theta1) }else{ pop_moments = moments_PSD(emp_eig, n, prior_max_order-2) rhos = matrix(NA, nrow = prior_max_order, ncol = length(mF)) rhos[1, ] = mF rhos[2, ] = Theta1 # recursive formulas; cannot be parallel for(ii in 3:prior_max_order){ rhos[ii,] = (1 + gamma * Theta1) * (pop_moments[ii-2] - lambda * rhos[ii-1, ]) } } powers = t(matrix_prob_alt_prior %*% rhos) / sqrt(2*gamma*Theta2) # Column: prior model; Row: lambda opt_lambda_index = apply(powers, 2, which.max) # optimal lambda index under each prior model ## Estimated covariance matrix of standardized RHT statistics with optimal lambda's G = matrix( apply( expand.grid(opt_lambda_index, opt_lambda_index), 1, function(ddd){ aaa = ddd[1] bbb = ddd[2] if( abs(aaa - bbb) < 1e-8){ return(1) }else{ return( (1 + gamma * Theta1[aaa]) * (1 + gamma * Theta1[bbb]) * ( lambda[aaa] * Theta1[aaa] - lambda[bbb] * Theta1[bbb]) / ( (lambda[aaa] - lambda[bbb]) * sqrt(Theta2[aaa] * Theta2[bbb])) ) } }), nrow = length(opt_lambda_index), ncol = length(opt_lambda_index) ) ## square root of G ## G_eigen = eigen(G,symmetric=T) ### project G to the ''closest'' nonnegative definite matrix G_evec = G_eigen$vectors G_eval = G_eigen$values G_eval_plus = G_eval * (G_eval >= 0) G_sqrt = G_evec %*% diag(sqrt(G_eval_plus)) # standardized statistics RHT = sapply(lambda[opt_lambda_index], function(xx){ (1/p) * sum( proj_diff^2 / (emp_eig + xx))} ) if(Type1error_calib[1] != "chi_sq"){ if(Type1error_calib[1] == "cube_root"){ RHT_std = {sqrt(p) * ( RHT^(1/3) - (Theta1[opt_lambda_index])^(1/3)) / sqrt(2*Theta2[opt_lambda_index]) / (1 / 3 * Theta1[opt_lambda_index]^(-2/3))} } if(Type1error_calib[1] == "sqrt"){ RHT_std = {sqrt(p) * (sqrt(RHT) - sqrt(Theta1[opt_lambda_index]))/ sqrt(Theta2[opt_lambda_index] / 2 / Theta1[opt_lambda_index])} } if(Type1error_calib[1] == "none"){ RHT_std = (RHT - Theta1[opt_lambda_index]) / sqrt(2 * Theta2[opt_lambda_index] / p) } # p-values if(length(prob_alt_prior) == 1){ p_value = 1 - pnorm(RHT_std) composite_p_value = p_value }else{ p_value = 1 - pnorm(RHT_std) Tmax = apply(G_sqrt %*% bootstrap_sample,2,max) composite_p_value = 1 - mean(max(RHT_std)>Tmax) } } if(Type1error_calib[1] == "chi_sq"){ if(length(prob_alt_prior) == 1L){ # when one prior model is specified, no need for bootstrap constant_coef = Theta2[opt_lambda_index] / Theta1[opt_lambda_index] degree_freedom = p * (Theta1[opt_lambda_index])^2 / Theta2[opt_lambda_index] p_value = 1 - pchisq( p * RHT / constant_coef, df = degree_freedom) composite_p_value = p_value }else{ if( length(prob_alt_prior) == 2L){ # Trick: add dummy variables to make the length of opt_lambda_index when less than 3 priors are specified # max(RHT(lambda_1), RHT(lambda_2), RHT(lambda_1)) = max(RHT(lambda_1), RHT(lambda_2)) length3_opt_lambda_index = c(opt_lambda_index, opt_lambda_index[1]) # expand G to 3 variables G_tmp = G_sqrt %*% t(G_sqrt) G_expand = rbind( cbind(G_tmp, c(1, G_tmp[1,2])), c(1, G_tmp[1,2], 1)) }else{ # when 3 prior models are specified length3_opt_lambda_index = opt_lambda_index G_expand = G_sqrt %*% t(G_sqrt) } # Call r3chisq to get the generated 3-vairate chi-square bootstrap sample constant_coef = Theta2[length3_opt_lambda_index] / Theta1[length3_opt_lambda_index] degree_freedom = ceiling( p * (Theta1[length3_opt_lambda_index])^2 / Theta2[length3_opt_lambda_index]) chisq = r3chisq(size = bs_size, df = degree_freedom, corr_mat = G_expand)$sample # standardize the bootstrap sample T1sample = (1/p*constant_coef[1] * chisq[,1] - Theta1[opt_lambda_index[1]])/sqrt(2*Theta2[opt_lambda_index[1]]/p) T2sample = (1/p*constant_coef[2] * chisq[,2] - Theta1[opt_lambda_index[2]])/sqrt(2*Theta2[opt_lambda_index[2]]/p) T3sample = (1/p*constant_coef[3] * chisq[,3] - Theta1[opt_lambda_index[3]])/sqrt(2*Theta2[opt_lambda_index[3]]/p) Tmax = pmax(T1sample, T2sample, T3sample) p_value = 1 - pchisq( p * RHT / constant_coef[1:length(RHT)], df = degree_freedom[1:length(RHT)]) RHT_std = (RHT - Theta1[opt_lambda_index])/sqrt(2*Theta2[opt_lambda_index]/p) composite_p_value = 1 - mean(max(RHT_std) > Tmax) } } return(list(ARHT_pvalue = composite_p_value, RHT_pvalue = p_value, RHT_std = RHT_std, RHT_opt_lambda = lambda[opt_lambda_index], Theta1 = Theta1[opt_lambda_index], Theta2 = Theta2[opt_lambda_index], Corr_RHT = G )) }
/scratch/gouwar.j/cran-all/cranData/ARHT/R/ARHT.R
#' An internal function #' @name eigen_proj_1samp #' @keywords internal eigen_proj_1samp = function(X, mu_0, lower_lambda = NULL){ N = nrow(X) p = ncol(X) nn = N-1L #gamma = p/nn X_bar = colMeans(X) half_S = 1 / sqrt(nn) * t(X) %*% (diag(1,nrow = N, ncol = N) - 1/N * matrix(1,N,N)) # S = half_S %*% t(half_S) svd_half_S = try(svd(half_S, nu = p , nv = 0), silent = TRUE) # Handle the situation where svd fails to converge if(inherits(svd_half_S,"try-error")){ S = half_S %*% t(half_S) # If lower_lambda specified, add the lower bound to S, then svd. # If not specified, generate recommended lower bound. # Initially (mean(diag(S))/100), if not enough, ridge = 1.5 * ridge. if(!is.null(lower_lambda)){ ridge = lower_lambda svdofS_ridge = try(svd(S + diag(ridge, nrow = p), nu = p , nv = 0), silent = TRUE) if(inherits(svdofS_ridge, "try-error")){ stop("The lower bound of lambda sequence is too small.") } }else{ # the generated lower bound of lambda sequence ridge = (mean(diag(S))/100) svdofS_ridge = try(svd(S + diag(ridge, nrow = p), nu = p , nv = 0), silent = TRUE) loop_counter = 0L while(inherits(svdofS_ridge, "try-error") && loop_counter<=20L){ ridge = ridge * 1.5 loop_counter = loop_counter +1L svdofS_ridge = try(svd(S + diag(ridge, nrow = p), nu = p , nv = 0), silent = TRUE) } if(loop_counter > 20L) stop("singular value algorithm in svd() did not converge") } # projection of X_bar - mu_0 to the eigenspace of S. emp_evec = svdofS_ridge$u # To speed up the computation of Stieltjes transform, separate positive eigenvalues and negative ones. emp_eig = (svdofS_ridge$d - ridge) * (svdofS_ridge$d >= ridge) positive_emp_eig = emp_eig[emp_eig > (1e-8) * mean(emp_eig)] #num_zero_emp_eig = p - length(positive_emp_eig) # number of 0 eigenvalues }else{ emp_evec = svd_half_S$u eig_raw = svd_half_S$d^2 positive_emp_eig = eig_raw[eig_raw > (1e-8 ) * mean(eig_raw)] num_zero_emp_eig = p - length(positive_emp_eig) emp_eig = c(positive_emp_eig, rep(0, times = num_zero_emp_eig)) if(!is.null(lower_lambda)){ ridge = lower_lambda }else{ ridge = (mean(emp_eig)/100) } } proj = as.vector(sqrt(N) * t(emp_evec) %*% (X_bar - mu_0)) return(list(n = nn, pos_eig_val = positive_emp_eig, proj_shift = proj, lower_lambda = ridge)) }
/scratch/gouwar.j/cran-all/cranData/ARHT/R/eigen_proj_1samp.R
#' An internal function #' @name eigen_proj_2samp #' @keywords internal eigen_proj_2samp = function(X, Y, mu_0, lower_lambda = NULL){ #X = matrix(rnorm(100 * 50),100, 50) #Y = matrix(rnorm(200 * 50),200, 50) N1 = nrow(X) N2 = nrow(Y) p = ncol(X) nn = N1 + N2 - 2 #gamma = p/nn X_bar = colMeans(X) Y_bar = colMeans(Y) Z = cbind(t(X),t(Y)) design_mat = rbind(c(rep(1,times = N1), rep(0,times =N2)), c(rep(0,times = N1),rep(1,times =N2))) half_S = 1/sqrt(nn) * Z %*% (diag(1, nrow = N1+N2 ) - t(design_mat) %*% diag(c(1/N1, 1/N2), nrow =2) %*% design_mat) svd_half_S = try(svd(half_S, nu = p , nv = 0), silent = TRUE) # Handle the situation where svd fails to converge if(inherits(svd_half_S,"try-error")){ S = (cov(X) * (N1-1) + cov(Y) * (N2-1)) / nn # or svd_half_S %*% t(svd_half_S) # If lower_lambda specified, add the lower bound to S, then svd. # If not specified, generate recommended lower bound. # Initially (mean(diag(S))/100), if not enough, ridge = 1.5 * ridge. if(!is.null(lower_lambda)){ ridge = lower_lambda svdofS_ridge = try(svd(S + diag(ridge, nrow = p), nu = p , nv = 0), silent = TRUE) if(inherits(svdofS_ridge, "try-error")){ stop("The lower bound of lambda sequence is too small.") } }else{ # the generated lower bound of lambda sequence ridge = (mean(diag(S))/100) svdofS_ridge = try(svd(S + diag(ridge, nrow = p), nu = p, nv = 0), silent = TRUE) loop_counter = 0 while(inherits(svdofS_ridge, "try-error") & loop_counter<=20){ ridge = ridge * 1.5 loop_counter = loop_counter +1 svdofS_ridge = try(svd(S + diag(ridge, nrow = p), nu = p, nv = 0), silent = TRUE) } if(loop_counter > 20) stop("singular value algorithm in svd() did not converge") } # projection of X_bar - mu_0 to the eigenspace of S. emp_evec = svdofS_ridge$u # To speed up the computation of Stieltjes transform, separate positive eigenvalues and negative ones. emp_eig = (svdofS_ridge$d - ridge) * (svdofS_ridge$d >= ridge) positive_emp_eig = emp_eig[emp_eig > 1e-8 * mean(emp_eig)] #num_zero_emp_eig = p - length(positive_emp_eig) # number of 0 eigenvalues }else{ emp_evec = svd_half_S$u eig_raw = svd_half_S$d^2 positive_emp_eig = eig_raw[eig_raw > (1e-8) * mean(eig_raw)] num_zero_emp_eig = p - length(positive_emp_eig) emp_eig = c(positive_emp_eig, rep(0, times = num_zero_emp_eig) ) if(!is.null(lower_lambda)){ ridge = lower_lambda }else{ ridge = (mean(emp_eig)/100) } } proj = as.vector(sqrt((N1*N2/(N1+N2))) * t(emp_evec) %*% (X_bar - Y_bar - mu_0)) return(list(n = nn, pos_eig_val =positive_emp_eig, proj_shift = proj, lower_lambda = ridge)) }
/scratch/gouwar.j/cran-all/cranData/ARHT/R/eigen_proj_2samp.R
#' Consistent estimators of high-order moments of the population spectral distribution for high-dimensional data #' @description The function calculates consistent estimators of moments of the spectral distribution #' of the population covariance matrix given the spectral of the sample covariance matrix. #' @keywords population spectral moments estimators #' @name moments_PSD #' @param eigenvalues all eigenvalues of the sample covariance matrix including 0's. #' @param n degree of freedom of the sample covariance matrix. #' @param mom_degree the maximum order of moments. #' @return Estimators of moments from the first to the \code{mom_degree} -th order. #' @references Bai, Z., Chen, J., & Yao, J. (2010). #'\emph{On estimation of the population spectral distribution from a high-dimensional sample covariance matrix.} #' Australian & New Zealand Journal of Statistics, 52(4), 423-437. #' @examples #' set.seed(10086) #' n = 400; p= 500 #' pop_eig = seq(10,1,length = p) #' # Data with covariance matrix diag(pop_eig) #' Z = matrix(rnorm(n*p),n,p) #' X = Z %*% diag(sqrt(pop_eig)) #' raw_eig = svd(cov(X))$d #' emp_eig = raw_eig[raw_eig>=0] #' # Moments of population spectral distribution #' colMeans(outer(pop_eig, 1:4, "^")) #' # Estimators #' moments_PSD(emp_eig, n-1, 4) #' @export moments_PSD = function( eigenvalues, n, mom_degree){ if(!is.vector(eigenvalues, mode = "numeric")){ stop("eigenvalues must be a numeric vector") }else{ if( any(eigenvalues<0)){ stop("eigenvalues must be nonnegative.") }else{ eigenvalues = sort(eigenvalues, decreasing = TRUE) p = length(eigenvalues) } } if(!is.numeric(n)){ stop("n must be a numeric atomic") } if(length(n)!= 1){ stop("length of n should be 1.") } if(n<=0){ stop("n must be positive.") } if(ceiling(n) != n){ stop("n must be integer.") } if(!is.numeric(mom_degree)){ stop("mom_degree must be numeric") } if(length(mom_degree)!= 1){ stop("length of mom_degree should be 1.") } if(mom_degree <1){ stop("mom_degree must be no less than 1.") } if(ceiling(mom_degree) != mom_degree){ stop("mom_degree must be integer.") } gamma = p/n if(p >= n){ emp_dual = eigenvalues[1:n] }else{ emp_dual = c(eigenvalues, rep(0,n-p)) } emp_moments = colMeans(outer( emp_dual, 1:mom_degree, FUN = '^')) pop_moments = numeric(mom_degree) pop_moments[1] = mean(eigenvalues) if(mom_degree > 1){ # recursive formulas; cannot be parallel for( kk in 2:mom_degree){ max_values = kk %/% (1:(kk-1)) # The maximum possible value of i1,..., i_(kk-1) because j * i_{j}<= kk possible_values = mapply(seq, 0, max_values, SIMPLIFY = FALSE) # Possible values of i partitions = t(expand.grid(possible_values)) # all possible partitions valid_partitions = partitions[, colSums(partitions * (1:(kk-1))) == kk, drop = FALSE] # valid partitions # Coefficients fac1 = (gamma^(colSums(valid_partitions))) # gamma^(i1+i2+i3+...+ij) fac2 = factorial(kk) / apply(factorial(valid_partitions), 2, prod) # fac/fac3 is the phi in equation (8) of the reference fac3 = factorial(kk+1 - colSums(valid_partitions)) offset = sum( fac1 * fac2 /fac3 * apply( (pop_moments[1:(kk-1)])^valid_partitions, 2, prod )) pop_moments[kk]= (emp_moments[kk] - offset) / gamma } } return(pop_moments) }
/scratch/gouwar.j/cran-all/cranData/ARHT/R/moments_PSD.R
#' 3-variate positively correlated chi-squared sample generation when degrees of freedom are large #'@description Generate samples approximately from three positively correlated chi-squared random variables #' \eqn{(\chi^2(d_1), \chi^2(d_2), \chi^2(d_3))} #' when the degrees of freedom \eqn{(d_1, d_2, d_3)} are large. #' @name r3chisq #' @details It is generally hard to sample from \eqn{(\chi^2(d_1), \chi^2(d_2), \chi^2(d_3))} with a designed correlation matrix. #' In the algorithm, we approximate the random vector by \eqn{(z^T Q_1 z, z^T Q_2 z, z^T Q_3 z)} #' where \eqn{z} is a standard norm random vector and \eqn{Q_1,Q_2,Q_3} are diagonal matrices #' with diagonal elements 1's and 0's. The designed positive correlations is approximated by carefully #' selecting common locations of 1's on the diagonals. The generated sample may have slightly larger marginal degrees #' of freedom than the inputted \code{df}, also slightly different covariances. #' @param size sample size. #' @param df the degree of freedoms of the marginal distributions. Must be non-negative, but can be non-integer. #' The function uses \code{ceiling(df)} if non-integer. #' @param corr_mat the target correlation matrix; negative elements will be set to 0. #' @return #' \itemize{ #' \item{\code{sample}}: a \code{size}-by-3 matrix contains the generated sample. #' \item{\code{approx_cov}}: the true covariance matrix of \code{sample}.} #' @references Li, H., Aue, A., Paul, D., Peng, J., & Wang, P. (2016). \emph{ #' An adaptable generalization of Hotelling's \eqn{T^2} #' test in high dimension.} arXiv preprint <arXiv:1609.08725>. #' @examples #' set.seed(10086) #' cor_examp = matrix(c(1,1/6,2/3,1/6,1,2/3,2/3,2/3,1),3,3) #' a_sam = r3chisq(size = 10000, #' df = c(80,90,100), #' corr_mat = cor_examp) #' cov(a_sam$sample) - a_sam$approx_cov #' cov2cor(a_sam$approx_cov) - cor_examp #' @export r3chisq = function(size, df, corr_mat){ if(!is.numeric(size)){ stop("size must be numeric") } if(length(size) != 1L){ stop("length(size) must be 1") } if(!is.vector(df, mode = "numeric")){ stop("df must be a numeric vector") } if(length(df) != 3L){ stop("length(df) must be 3") } if(any(df < 0)){ stop("df must be positive") } df = ceiling(df) if( (length(dim(corr_mat)) > 2L) || !(is.numeric(corr_mat))){ stop("corr_mat must be a numeric matrix") } if(!is.matrix(corr_mat)){ corr_mat = as.matrix(corr_mat) } if(!isSymmetric(corr_mat)){ stop("corr_mat must be symmetric") } corr_mat = corr_mat * (corr_mat >= 0) half_covariances = floor(c((df[1])^(1/2) * (df[2])^(1/2) * corr_mat[1,2], (df[1])^(1/2) * (df[3])^(1/2) * corr_mat[1,3], (df[2])^(1/2) * (df[3])^(1/2) * corr_mat[2,3])) # Build three diag matrices Q1, Q2, Q3. z^TQ_1z, z^TQ_2z, z^TQ_3z would be the chi-square vector # See supplementary material for algorithm explanation mincov = min(half_covariances) part1 = c(rep(1, mincov), rep(1, half_covariances[1]-mincov), rep(1, half_covariances[2]-mincov), rep(0, half_covariances[3]-mincov)) part2 = c(rep(1, mincov), rep(1, half_covariances[1]-mincov), rep(0, half_covariances[2]-mincov), rep(1, half_covariances[3]-mincov)) part3 = c(rep(1, mincov), rep(0, half_covariances[1]-mincov), rep(1, half_covariances[2]-mincov), rep(1, half_covariances[3]-mincov)) ramainder1 = max(df[1] - sum(part1), 0) ramainder2 = max(df[2] - sum(part2), 0) ramainder3 = max(df[3] - sum(part3), 0) Q1 = c(part1, rep(1, ramainder1), rep(0, ramainder2), rep(0, ramainder3)) Q2 = c(part2, rep(0, ramainder1), rep(1, ramainder2), rep(0, ramainder3)) Q3 = c(part3, rep(0, ramainder1), rep(0, ramainder2), rep(1, ramainder3)) Qs = cbind(Q1, Q2, Q3) #chi-squared(1) sample bootstrap_sample = matrix((rnorm(length(Q1) * size))^2, nrow = size, ncol = length(Q1)) chisq = bootstrap_sample %*% Qs approx_cov = 2* matrix(c(sum(Q1), half_covariances[1], half_covariances[2], half_covariances[1], sum(Q2), half_covariances[3], half_covariances[2], half_covariances[3], sum(Q3)), nrow = 3, ncol = 3) return(list(sample = chisq, approx_cov = approx_cov)) }
/scratch/gouwar.j/cran-all/cranData/ARHT/R/r3chisq.R
ARIMAANN<-function(data,h){ pp=auto.arima(data)# ARIMA model fitting summary(pp) kk=pp$residuals # Residuals obtained from the fitted ARIMA model kk1=pp$fitted # Fitted values of ARIMA model x <- as.ts(kk) w=terasvirta.test(x)# Checking the suitability of data for hybrid modelling p1=w$p.value if (p1>0.05){ Test_Result<- (" Data is not suitable for hybrid modelling") } else { Test_Result<- ("Data is suitable for hybrid modelling") } p2=pp$coef #coefficients of the ARIMA fitted model pvalues<-(1-pnorm(abs(pp$coef)/sqrt(diag(pp$var.coef))))*2 # p values of the coefficients ff<-forecast(pp, h) ff1=ff$mean zz<-nnetar(kk) zz1<-zz$model # ANN model summary zz2<-zz$p fitv=fitted(zz) ff2<-forecast(zz, h) ff3=ff2$mean pp1=fitv[(zz2+1):length(fitv)] pp2=kk1[(zz2+1):length(kk1)] pp3=abs(pp1+pp2) pp3 pp4=data[(zz2+1):length(data)] pp5=fitv[1:zz2] Mape=mean(abs((pp4-pp3)/pp4))*100 # MAPE of the hybrid model Mse=mean((pp4-pp3)^2)#MSE of the hybrid model. pp6=c(pp5, pp3)# final fitted values from hybrid model ff4=ff1+ff3#final forecasted values employing hybrid model return(list(Test_Result, "ARIMA coefficients"=p2, pvalues=pvalues,"ANN Summary"=zz1,"MAPE"=Mape,"MSE"=Mse, "fitted"=pp6, "forecasted.values"=ff4))}
/scratch/gouwar.j/cran-all/cranData/ARIMAANN/R/ARIMAANN.R
#' @title Valid Circular Inference (ARI) for Brain Imaging #' @description Valid Circular Inference (ARI) for Brain Imaging #' @param Pmap 3D array of p-values or a (character) nifti file name. #' @param clusters 3D array of cluster ids (0 when voxel does not belong to any cluster) #' or a (character) nifti file name. #' @param mask 3D array of locicals (i.e. \code{TRUE}/\code{FALSE} in/out of the brain). Alternatively #' it may be a (character) nifti file name. If \code{mask=NULL}, it is assumed that non of the voxels have to be excluded. #' @param alpha Significance level. \code{alpha=.05} by default. #' @param Statmap Statistics (usually t-values) on which the summaries are based. Can be either #' a 3D array, a (character) nifti file name or a function with argument \code{ix} used in the function to select the voxels belonging to a given cluster. #' By default \code{Statmap = function(ix) -qnorm(Pmap[ix])} which convert the p-values in one-sided z-score. #' @param summary_stat Choose among \code{=c("max", "center-of-mass")}. #' @param silent \code{FALSE} by default. #' @examples #' pvalue_name <- system.file("extdata", "pvalue.nii.gz", package="ARIbrain") #' cluster_name <- system.file("extdata", "cluster_th_3.2.nii.gz", package="ARIbrain") #' zstat_name <- system.file("extdata", "zstat.nii.gz", package="ARIbrain") #' mask_name <- system.file("extdata", "mask.nii.gz", package="ARIbrain") #' #' print(mask_name) #' print(pvalue_name) #' print(cluster_name) #' print(zstat_name) #' #' ARI(Pmap = pvalue_name, clusters= cluster_name, #' mask=mask_name, Statmap = zstat_name) #' #' @return A \code{matrix} reporting Size, FalseNull, TrueNull, ActiveProp and other statistics for each cluster. #' @export #' @import hommel ARI <- function(Pmap, clusters, mask=NULL, alpha=.05,Statmap=function(ix) -qnorm(Pmap[ix]), summary_stat=c("max", "center-of-mass"),silent=FALSE){ # get, fix, check parameters inconsistencies Pmap = get_array(Pmap) clusters = get_array(clusters,map_dims=dim(Pmap)) mask = get_array(mask,map_dims=dim(Pmap)) if(is.function(Statmap)) { StatFun=Statmap } else { Statmap= get_array(Statmap,map_dims=dim(Pmap)) StatFun <- function(ix) Statmap[ix] } # called=match.call() summary_stat=match.arg(summary_stat,c("max", "center-of-mass")) # get the indices of the mask mask=which(mask!=0) #perform hommel hom <-hommel(Pmap[mask]) if(!silent) {temp=(summary(hom)) cat("\n")} # define number of clusters clstr_id=sort(unique(as.vector(clusters[mask])),decreasing = TRUE) #apply summaries to each cluster (and all the rest in an extra cluster) out=plyr::laply(clstr_id,function(i){ ix=clusters==i ix[-mask]=FALSE cluster_ids=which(ix,arr.ind = TRUE) cluster_ids=cbind(cluster_ids,Stat=StatFun(ix)) unlist(c(summary_hommel_roi(hommel = hom,ix=ix[mask]), summary_cluster(cluster_ids)[-1]) ) }) rownames(out)=paste("cl",sep="",clstr_id) # attr(out,"call")=called if(!silent) print(out) out }
/scratch/gouwar.j/cran-all/cranData/ARIbrain/R/ARI.R
#' @description It performs All-Resolutions Inference on fMRI data. As a main feature, #' it estimates lower bounds for the proportion of active voxels in a set of clusters as, for example, given by a cluster-wise analysis. #' @author all of us #' @docType package #' @name ARIbrain-package #' @title All-Resolutions Inference #' @import hommel #' @importFrom stats cutree dist hclust qnorm #' @examples #' pvalue_name <- system.file("extdata", "pvalue.nii.gz", package="ARIbrain") #' cluster_name <- system.file("extdata", "cluster_th_3.2.nii.gz", package="ARIbrain") #' zstat_name <- system.file("extdata", "zstat.nii.gz", package="ARIbrain") #' mask_name <- system.file("extdata", "mask.nii.gz", package="ARIbrain") #' #' ARI(Pmap = pvalue_name, clusters= cluster_name, #' mask=mask_name, Statmap = zstat_name) #' NULL
/scratch/gouwar.j/cran-all/cranData/ARIbrain/R/ARIbrain-package.R
#' @title cluster_threshold #' @description Get spatially-connected clusters starting from a 3D map of logical values #' @param map 3D map of logical values. \code{TRUE} if the voxel it to be clustered (e.g. it is supra-threshold). #' @param max_dist maximum distance allowed to in the same cluster. By default: #' \code{max_dist=sqrt(3)} i.e. comprises all the voxels up to the corners souranding the target voxel. A value such as #' \code{max_dist=sqrt(2)} excludes the corners. #' @return a 3D map (same size of \code{map}) with integer values identifying the cluster and 0 elsewhere. #' @examples #' \dontrun{ #' Tmap = RNifti::readNifti(system.file("extdata", "zstat.nii.gz", package="ARIbrain")) #' clstr=cluster_threshold(Tmap>3.2) #' table(clstr) #' } #' @export #' cluster_threshold <- function(map, max_dist=sqrt(3)){ ### slower: # map=spmT # threshold=3.2 # nmat <- expand.grid(-1:1, -1:1, -1:1) # nmat <- nmat[-c(1,3,7,9,14,19,21,25,27), ] # system.time( # {Suprathreshold_TF = cluster.threshold(spmT>=3.2, nmat=nmat,size.thr = .5)}) # table(Suprathreshold_TF) #an alternative and faster way: Suprathreshold_TF=which(map,arr.ind = TRUE) ######### dd = dist(Suprathreshold_TF) hc = hclust(dd, "single") # plot(hc) # ct = cutree(hc,k=5) # pander(table(ct)) # ct = cutree(hc,h=max_dist) ## sort the cluster names on the basis of their size new_cluster_names=rank(table(ct),ties.method = "random") ct_new=rep(NA,length(ct)) for(i in 1:length(new_cluster_names)){ ct_new[ct==as.numeric(names(new_cluster_names)[i])]=new_cluster_names[i] } # table(ct,ct_new) # table(ct_new) ct=ct_new rm(ct_new) ######### cluster_map=array(0,dim(map)) cluster_map[map] =ct # pander::pander(table(cluster_map)) # print(table(cluster_map)) cluster_map }
/scratch/gouwar.j/cran-all/cranData/ARIbrain/R/cluster_threshold.R
# @param coord_and_values \code{array} or \code{data.frame} with the following columns: the first column contains the values (usually t-values) # to be summarized, the remaining columns contain the coordinates of the statistics. # @param summary_stat \code{=c("max", "center-of-mass")} # @return a \code{list} with size of the cluster, coordinates of the max or the center-of-mass and its value # coordinates finding (not the index, while the cohordinates in the space ... Boring, very boring. # spmP = readNifti(paste(sep="","./",data_folder,"/pvalue_stat1.nii.gz")) # > str(spmP) # niftiImage [1:91, 1:109, 1:91] 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 ... # - attr(*, "pixdim")= num [1:3] 2 2 2 # - attr(*, "pixunits")= chr [1:2] "mm" "s" # - attr(*, ".nifti_image_ptr")=<externalptr> summary_cluster <- function(coord_and_values,summary_stat=c("max", "center-of-mass")){ # compute max and/or centre of gravity, see below name_stat=names(coord_and_values)[1] summary_stat=match.arg(summary_stat,c("max", "center-of-mass")) out=list(Size=nrow(coord_and_values)) if(summary_stat=="max"){ id_max=which.max(coord_and_values[,4]) out=c(out,coord_and_values[id_max,]) } else if(summary_stat=="center-of-mass"){ id_mean=colMeans(coord_and_values[,-1,drop=FALSE]) id_closest_to_baricenter=which.min(rowSums(t(t(coord_and_values[,-1,drop=FALSE])-id_mean)^2)) out=c(out,coord_and_values[id_closest_to_baricenter,]) names(out)[names(out)==name_stat]=summary_stat } out }
/scratch/gouwar.j/cran-all/cranData/ARIbrain/R/get_summary_cluster.R
# @description computes alpha-level estimate of active voxels. # summary_hommel_roi <- function(hommel,ix,alpha=0.05){ Total=length(hommel@p[ix]) False_Null=hommel::discoveries(hommel, alpha=alpha, ix=ix) True_Null=Total-False_Null Active_Proportion= tdp(hommel, ix=ix) list(Size=Total,FalseNull=False_Null,TrueNull=True_Null,ActiveProp=Active_Proportion) }
/scratch/gouwar.j/cran-all/cranData/ARIbrain/R/summary_hommel_roi.R
# @title get_array # @description get the array from a map parameter. and make compatibility checks # @param map array, character giving the nii file (address and) name. # @param map_dims vector with the 3 dimension the map must agree. if \code{NULL} (default) no checks are made get_array <- function(map,map_dims=NULL){ if(is.null(map)){ if(is.null(map_dims)) { stop("The dims of map are not defined") } else { map=array(TRUE,map_dims) return(map) } } if(is.character(map)) map=RNifti::readNifti(map) if(!is.null(map_dims)) if(any(dim(map)!=map_dims)) stop("The dims of map: ", paste(dim(map),sep="x"), " don't fit the dims of map_dims: ",paste(map_dims,sep="x")) map }
/scratch/gouwar.j/cran-all/cranData/ARIbrain/R/utils.R
## ------------------------------------------------------------------------ library(ARIbrain) pvalue_name <- system.file("extdata", "pvalue.nii.gz", package="ARIbrain") cluster_name <- system.file("extdata", "cluster_th_3.2.nii.gz", package="ARIbrain") zstat_name <- system.file("extdata", "zstat.nii.gz", package="ARIbrain") mask_name <- system.file("extdata", "mask.nii.gz", package="ARIbrain") res_ARI=ARI(Pmap = pvalue_name, clusters= cluster_name, mask=mask_name, Statmap = zstat_name) str(res_ARI) ## ------------------------------------------------------------------------ library(RNifti) Tmap = readNifti(system.file("extdata", "zstat.nii.gz", package="ARIbrain")) # compute p-values from Test statistic (refering to Normal distribution, right-side alternative) Pmap=pnorm(-Tmap) #Read the mask file. mask = RNifti::readNifti(system.file("extdata", "mask.nii.gz", package="ARIbrain")) # Make sure that it is a logical map by: ()!=0 mask=mask!=0 #Create Clusters using a threshold equal to 3.2 Tmap[!mask]=0 clstr=cluster_threshold(Tmap>3.2) table(clstr) res_ARI=ARI(Pmap,clusters = clstr,mask = mask,Statmap = Tmap) ## ------------------------------------------------------------------------ hom=hommel::hommel(Pmap[mask]) (thr_p=hommel::concentration(hom)) (thr_z=-qnorm(thr_p)) Tmap[!mask]=0 clstr=cluster_threshold(Tmap>thr_z) table(clstr) res_ARI_conc=ARI(Pmap,clusters = clstr,mask = mask,Statmap = Tmap)
/scratch/gouwar.j/cran-all/cranData/ARIbrain/inst/doc/ARIbrain_example.R
--- title: "Vignette for ARIbrain" author: "Livio Finos, Jelle Goeman, Wouter Weeda, Jonathan Rosenblatt, Aldo Solari" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Tutorial for ARIbrain package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <!-- \VignetteEngine{knitr::rmarkdown} --> # Introduction `ARIbrain` is the package for All-Resolution Inference in neuroscience. Here we show how to compute lower bound for proportion of active voxels (or any other spatially located units) within given clusters. The clusters can be defined *a priori*, on the basis of previous knowledges or on the basis of anatomical regions. Clusters of such a kind are usually called ROIs. There are no limitations to the number of ROIs that can be evaluated in the same analysis; the lower bounds for each ROI is valid simultaneously for all estimates (i.e. corrected for multiplicity). Even more interestigly, the clusters can be defined on the basis of the same data. This is true, since the `ARI` allows for circular analysis, still controlling for multiplicity of inferences. In the following we show an analysis where clusters are defined by a supra-threshold-statistic rule. This is the typical case of cluster-wise analysis followed by a multiplicity correction based on Random Field Theory. Here we follow an alternative way: we provide lower bound for proportion for the estimate of active voxels. ## Sintax and parameters The sintax of the function is (type `?ARIbrain::ARI` for more details) `ARI(Pmap, clusters, mask = NULL, alpha = 0.05, Statmap = function(ix) -qnorm(Pmap[ix]), summary_stat = c("max", "center-of-mass"), silent = FALSE)` The main input parameters of `ARI()` are: - `Pmap`: the map of p-values and - `clusters`: the map of cluster index. The function accepts both character file names and 3D arrays. Therefore the minimal sintax is `ARI(Pmap, clusters)` Others maps (parameters) are: - `mask` which is a 3D array of logicals (i.e.`TRUE`/`FALSE` means in/out of the brain). Alternatively, it may be a (character) nifti file name. If omitted, all voxels are considered. - `Statmap` which is a 3D array of statistics (usually t-values) on which the summaries are based. File name is also accepted. # <a name="nii"> Performing the analysis from nifti (nii) files </a> In order to perfom the analysis you need: - a `zstat.nii.gz` containing the test statistic used in the analysis - a `mask.nii.gz` (not mandatory, but usefull) - a `cluster.nii.gz` image of cluster index. ## Making the map cluster.nii.gz with FSL You simply need to run on the shell: `cluster -z zstat1.nii.gz -t 3.2 -o cluster.nii.gz` This will create the `cluster.nii.gz` that you need. *hint*: In case it retun an error message like `cluster: error while loading shared libraries: libutils.so: cannot open shared object file: No such file or directory`, type into the shell (replacing the path with your own path of the file fsl.sh): `source /etc/fsl/5.0/fsl.sh` and try again. Get a complete help for FSL at <https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Cluster> # ARI analysis ```{r} library(ARIbrain) pvalue_name <- system.file("extdata", "pvalue.nii.gz", package="ARIbrain") cluster_name <- system.file("extdata", "cluster_th_3.2.nii.gz", package="ARIbrain") zstat_name <- system.file("extdata", "zstat.nii.gz", package="ARIbrain") mask_name <- system.file("extdata", "mask.nii.gz", package="ARIbrain") res_ARI=ARI(Pmap = pvalue_name, clusters= cluster_name, mask=mask_name, Statmap = zstat_name) str(res_ARI) ``` # other ARI examples ## using arrays ```{r} library(RNifti) Tmap = readNifti(system.file("extdata", "zstat.nii.gz", package="ARIbrain")) # compute p-values from Test statistic (refering to Normal distribution, right-side alternative) Pmap=pnorm(-Tmap) #Read the mask file. mask = RNifti::readNifti(system.file("extdata", "mask.nii.gz", package="ARIbrain")) # Make sure that it is a logical map by: ()!=0 mask=mask!=0 #Create Clusters using a threshold equal to 3.2 Tmap[!mask]=0 clstr=cluster_threshold(Tmap>3.2) table(clstr) res_ARI=ARI(Pmap,clusters = clstr,mask = mask,Statmap = Tmap) ``` ## Define threshold and clusters on the basis of concentration set (optimal threshold) ```{r} hom=hommel::hommel(Pmap[mask]) (thr_p=hommel::concentration(hom)) (thr_z=-qnorm(thr_p)) Tmap[!mask]=0 clstr=cluster_threshold(Tmap>thr_z) table(clstr) res_ARI_conc=ARI(Pmap,clusters = clstr,mask = mask,Statmap = Tmap) ```
/scratch/gouwar.j/cran-all/cranData/ARIbrain/inst/doc/ARIbrain_example.Rmd
--- title: "Vignette for ARIbrain" author: "Livio Finos, Jelle Goeman, Wouter Weeda, Jonathan Rosenblatt, Aldo Solari" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Tutorial for ARIbrain package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- <!-- \VignetteEngine{knitr::rmarkdown} --> # Introduction `ARIbrain` is the package for All-Resolution Inference in neuroscience. Here we show how to compute lower bound for proportion of active voxels (or any other spatially located units) within given clusters. The clusters can be defined *a priori*, on the basis of previous knowledges or on the basis of anatomical regions. Clusters of such a kind are usually called ROIs. There are no limitations to the number of ROIs that can be evaluated in the same analysis; the lower bounds for each ROI is valid simultaneously for all estimates (i.e. corrected for multiplicity). Even more interestigly, the clusters can be defined on the basis of the same data. This is true, since the `ARI` allows for circular analysis, still controlling for multiplicity of inferences. In the following we show an analysis where clusters are defined by a supra-threshold-statistic rule. This is the typical case of cluster-wise analysis followed by a multiplicity correction based on Random Field Theory. Here we follow an alternative way: we provide lower bound for proportion for the estimate of active voxels. ## Sintax and parameters The sintax of the function is (type `?ARIbrain::ARI` for more details) `ARI(Pmap, clusters, mask = NULL, alpha = 0.05, Statmap = function(ix) -qnorm(Pmap[ix]), summary_stat = c("max", "center-of-mass"), silent = FALSE)` The main input parameters of `ARI()` are: - `Pmap`: the map of p-values and - `clusters`: the map of cluster index. The function accepts both character file names and 3D arrays. Therefore the minimal sintax is `ARI(Pmap, clusters)` Others maps (parameters) are: - `mask` which is a 3D array of logicals (i.e.`TRUE`/`FALSE` means in/out of the brain). Alternatively, it may be a (character) nifti file name. If omitted, all voxels are considered. - `Statmap` which is a 3D array of statistics (usually t-values) on which the summaries are based. File name is also accepted. # <a name="nii"> Performing the analysis from nifti (nii) files </a> In order to perfom the analysis you need: - a `zstat.nii.gz` containing the test statistic used in the analysis - a `mask.nii.gz` (not mandatory, but usefull) - a `cluster.nii.gz` image of cluster index. ## Making the map cluster.nii.gz with FSL You simply need to run on the shell: `cluster -z zstat1.nii.gz -t 3.2 -o cluster.nii.gz` This will create the `cluster.nii.gz` that you need. *hint*: In case it retun an error message like `cluster: error while loading shared libraries: libutils.so: cannot open shared object file: No such file or directory`, type into the shell (replacing the path with your own path of the file fsl.sh): `source /etc/fsl/5.0/fsl.sh` and try again. Get a complete help for FSL at <https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Cluster> # ARI analysis ```{r} library(ARIbrain) pvalue_name <- system.file("extdata", "pvalue.nii.gz", package="ARIbrain") cluster_name <- system.file("extdata", "cluster_th_3.2.nii.gz", package="ARIbrain") zstat_name <- system.file("extdata", "zstat.nii.gz", package="ARIbrain") mask_name <- system.file("extdata", "mask.nii.gz", package="ARIbrain") res_ARI=ARI(Pmap = pvalue_name, clusters= cluster_name, mask=mask_name, Statmap = zstat_name) str(res_ARI) ``` # other ARI examples ## using arrays ```{r} library(RNifti) Tmap = readNifti(system.file("extdata", "zstat.nii.gz", package="ARIbrain")) # compute p-values from Test statistic (refering to Normal distribution, right-side alternative) Pmap=pnorm(-Tmap) #Read the mask file. mask = RNifti::readNifti(system.file("extdata", "mask.nii.gz", package="ARIbrain")) # Make sure that it is a logical map by: ()!=0 mask=mask!=0 #Create Clusters using a threshold equal to 3.2 Tmap[!mask]=0 clstr=cluster_threshold(Tmap>3.2) table(clstr) res_ARI=ARI(Pmap,clusters = clstr,mask = mask,Statmap = Tmap) ``` ## Define threshold and clusters on the basis of concentration set (optimal threshold) ```{r} hom=hommel::hommel(Pmap[mask]) (thr_p=hommel::concentration(hom)) (thr_z=-qnorm(thr_p)) Tmap[!mask]=0 clstr=cluster_threshold(Tmap>thr_z) table(clstr) res_ARI_conc=ARI(Pmap,clusters = clstr,mask = mask,Statmap = Tmap) ```
/scratch/gouwar.j/cran-all/cranData/ARIbrain/vignettes/ARIbrain_example.Rmd
#' @title Hybrid ARMA-LSTM Model for Time Series Forecasting #' @description The linear ARMA model is fitted to the time series. The significant number of PACF values of ARMA residuals are considered as the lag. The LSTM model is fitted to the ARMA residuals setting the lag value as the time step. User needs to install keras, tensorflow and reticulate packages as the prerequisite to implement this package. #' @param X A univariate time series data #' @param p Order of AR #' @param q Order of MA #' @param arfima Whether to include arfima (0<d<0.5) #' @param dist.model The distribution density to use for the innovation. The default distribution for the mean model used is "ged". Other choices can be obtained from the rugarch package. #' @param out.sample A positive integer indicating the number of periods before the last to keep for out of sample forecasting. To be considered as test data. #' @param LSTM.units Number of units in the LSTM layer #' @param ACTIVATION.function Activation function #' @param DROPOUT Dropout rate #' @param Optimizer Optimizer used for optimization of the LSTM model #' @param Epochs Number of epochs of the LSTM model #' @param LSTM.loss Loss function #' @param LSTM.metrics Metrics #' @import rugarch tseries keras tensorflow reticulate #' @return #' \itemize{ #' \item ARMA.fit: Parameters of the fitted ARMA model #' \item ARMA.fitted: Fitted values of the ARMA model #' \item ARMA.residual: Residual values of the ARMA model #' \item ARMA.forecast: Forecast values obtained from the ARMA model for the test data #' \item ARMA.residual.nonlinearity.test: BDS test results for the ARMA residuals #' \item LSTM.lag: Lag used for the LSTM model #' \item FINAL.fitted: Fitted values of the hybrid ARMA-LSTM model #' \item FINAL.residual: Residual values of the hybrid ARMA-LSTM model #' \item FINAL.forecast: Forecast values obtained from the hybrid ARMA-LSTM model for the test data #' \item ACCURACY.MATRIX: RMSE, MAE and MAPE of the train and test data #' } #' @export #' #' @usage ARMA.LSTM(X, p, q, arfima = FALSE, dist.model= "ged", out.sample, LSTM.units, #' ACTIVATION.function = "tanh", DROPOUT = 0.2, Optimizer ="adam", Epochs = 100, #' LSTM.loss = "mse", LSTM.metrics = "mae") #' @examples #' \donttest{ #'y<-c(5,9,1,6,4,9,7,3,5,6,1,8,6,7,3,8,6,4,7,5) #'my.hybrid<-ARMA.LSTM(y, p=1, q=0, arfima=FALSE, dist.model = "ged", #'out.sample=10, LSTM.units=50, ACTIVATION.function = "tanh", #'DROPOUT = 0.2, Optimizer ="adam", Epochs = 10, LSTM.loss = "mse", LSTM.metrics = "mae") #'} #' @references #' \itemize{ #' \item Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015). Time series analysis: forecasting and control. John Wiley & Sons. #' \item Granger, C. W., & Joyeux, R. (1980). An introduction to long-memory time series models and fractional differencing. Journal of time series analysis, 1(1), 15-29. #' \item Hochreiter, S., & Schmidhuber, J. (1997). Long short-term memory. Neural computation, 9(8), 1735-1780. #' \item Rakshit, D., Paul, R. K., & Panwar, S. (2021). Asymmetric price volatility of onion in India. Indian Journal of Agricultural Economics, 76(2), 245-260. #' \item Rakshit, D., Paul, R. K., Yeasin, M., Emam, W., Tashkandy, Y., & Chesneau, C. (2023). Modeling Asymmetric Volatility: A News Impact Curve Approach. Mathematics, 11(13), 2793. #' \item Rakshit, D., Roy, A., Atta, K., Adhikary, S., & Vishwanath. (2022). Modeling Temporal Variation of Particulate Matter Concentration at Three Different Locations of Delhi. International Journal of Environment and Climate Change, 12(11), 1831-1839. #' } ARMA.LSTM<- function (X, p, q, arfima=FALSE, dist.model = "ged", out.sample, LSTM.units, ACTIVATION.function = "tanh", DROPOUT = 0.2, Optimizer ="adam", Epochs = 100, LSTM.loss = "mse", LSTM.metrics = "mae") { arma.spec<-rugarch::arfimaspec(mean.model = list(armaOrder = c(p,q), include.mean = TRUE, arfima = arfima, external.regressors = NULL), distribution.model = dist.model) arma.fit<-rugarch::arfimafit(spec=arma.spec, data=X, out.sample = out.sample) arma.fitted<-arma.fit@fit$fitted.values # FITTED VALUES arma.residual<-arma.fit@fit$residuals # residual values arma.forecast.temp<-rugarch::arfimaforecast(arma.fit, n.ahead=out.sample) #FORECAST arma.forecast<-rugarch::fitted(arma.forecast.temp) arma.forecast<-as.data.frame(arma.forecast) # test for nonlinearity of ARMA residual nonlinearity.test<- tseries::bds.test(arma.residual) # PACF of ARMA residual pacf.res<-stats::pacf(arma.residual) significant_pacf <- which(abs(pacf.res$acf) > (stats::qnorm(0.975)) / sqrt(length(X))) lag.lstm <- NULL for (i in 1:(length(significant_pacf)-1)){ if(significant_pacf[i] == significant_pacf[i+1]-1){ lag.lstm <- significant_pacf[i+1] } } ####### LSTM starts here ################# data_LSTM <- arma.residual # Function to create lagged data matrix create_lagged_matrix <- function(data, lag) { n <- length(data) matrix_data <- matrix(NA, nrow = n - lag , ncol = lag + 1) for (i in 1:(n - lag)) { matrix_data[i, ] <- data[(i + lag):i] } return(matrix_data) } # Create lagged matrix lagged_matrix <- create_lagged_matrix(data_LSTM, lag.lstm) LSTM.feature <- lagged_matrix[, -1] LSTM.target <- lagged_matrix[, 1] LSTM.feature<- array(LSTM.feature, dim = c(nrow(LSTM.feature), lag.lstm, 1)) # LSTM model lstm_model <- keras::keras_model_sequential() %>% layer_lstm(units =LSTM.units, input_shape = c(lag.lstm, 1), activation=ACTIVATION.function, dropout=DROPOUT) %>% layer_dense(units = 1) lstm_model %>% compile(optimizer = Optimizer, loss= LSTM.loss, metrics=LSTM.metrics) summary(lstm_model) history<- lstm_model %>% fit( LSTM.feature, LSTM.target, batch_size = 1, epochs = Epochs) lstm.fitted <- lstm_model %>% stats::predict(LSTM.feature) #### LSTM forecast ########################################### whole_feature <- lagged_matrix[, -1] forecast.inputdata <- whole_feature[nrow(whole_feature), ] interim.forecast <- NULL n.test <- out.sample for (i in 1: (n.test+1)){ pred <- lstm_model %>% stats::predict(array(forecast.inputdata, dim = c(1, lag.lstm, 1))) interim.forecast<- c(interim.forecast, pred) forecast.inputdata <- c(forecast.inputdata, pred) forecast.inputdata <- forecast.inputdata[-1] } lstm.forecast <- interim.forecast[-1] ######################### hybrid model output ######################## final.fitted<- arma.fitted[(lag.lstm+1):length(arma.fitted)]+lstm.fitted final.forecast<- arma.forecast[,1]+lstm.forecast final.residual<- X[(lag.lstm+1):length(arma.fitted)]-final.fitted ############# accuracy measures ###################### Accuracy.matrix<- matrix(nrow=2, ncol=3) row.names(Accuracy.matrix)<-c("Train", "Test") colnames(Accuracy.matrix)<-c("RMSE", "MAE", "MAPE") train.original<-X[(lag.lstm+1):length(arma.fitted)] Accuracy.matrix[1,1]<-round(sqrt(mean((train.original-final.fitted)^2)), digits = 4) Accuracy.matrix[1,2]<-round(mean(abs(train.original-final.fitted)), digits = 4) Accuracy.matrix[1,3]<-round(mean(abs((train.original-final.fitted)/train.original))*100, digits = 4) test.original<-X[(length(X)-n.test+1):length(X)] Accuracy.matrix[2,1]<-round(sqrt(mean((test.original-final.forecast)^2)), digits = 4) Accuracy.matrix[2,2]<-round(mean(abs(test.original-final.forecast)), digits = 4) Accuracy.matrix[2,3]<-round(mean(abs((test.original-final.forecast)/test.original))*100, digits = 4) output<- list(ARMA.fit = arma.fit, ARMA.fitted = arma.fitted, ARMA.residual = arma.residual, ARMA.forecast = arma.forecast[,1], ARMA.residual.nonlinearity.test = nonlinearity.test, LSTM.lag = lag.lstm, FINAL.fitted = final.fitted, FINAL.residual = final.residual, FINAL.forecast = final.forecast, ACCURACY.MATRIX = Accuracy.matrix) return(output) }
/scratch/gouwar.j/cran-all/cranData/ARMALSTM/R/ARMALSTM.R
AROC.bnp <- function(formula.healthy, group, tag.healthy, data, scale = TRUE, p = seq(0,1,l = 101), paauc = paauccontrol(), compute.lpml = FALSE, compute.WAIC = FALSE, m0, S0, nu, Psi, alpha = 1, a = 2, b = 0.5, L = 10, nsim = 10000, nburn = 2000) { if(inherits(formula.healthy, "character")) { formula.healthy <- as.formula(formula.healthy) } paauc <- do.call("paauccontrol", paauc) data.h <- data[data[,group] == tag.healthy,] data.d <- data[data[,group] != tag.healthy,] n0 <- nrow(data.h) n1 <- nrow(data.d) np <- length(p) # Construct design matrix MM0 <- design.matrix.bnp(formula.healthy, data.h) X0 <- MM0$X # Construct design matrix in diseased population (based on healthy) X1 <- predict(MM0, data.d)$X k <- ncol(X0) if(missing(m0)) { m0 <- rep(0,k) } else { if(length(m0) != k) { stop(paste0("Argument 'm0' must be a vector of length ", k)) } } if(missing(S0)) { S0 <- 100*diag(k) } else { if(!is.matrix(S0) | !all(dim(S0) == c(k,k))) { stop(paste0("Argument 'S0' must be a matrix of dimension ", k, "x", k)) } } if(missing(nu)) { nu <- k + 2 } else { if(nu < k + 2) { stop(paste0("Argument 'nu' must be larger than ", k + 2)) } } if(missing(Psi)) { Psi <- diag(k) } else { if(!is.matrix(Psi) | !all(dim(Psi) == c(k,k))) { stop(paste0("Argument 'Psi' must be a matrix of dimension ", k, "x", k)) } } # Fit the model in the healthy population res0 <- bddp(y = data.h[,MM0$iformula$marker], X = X0, alpha = alpha, m = m0, S = S0, nu = nu, psi = Psi, a = a, b = b, nsim = nsim, L = L, scale = TRUE) udddp <- matrix(0, nrow = n1, ncol = (nsim-nburn)) y1 <- data.d[,MM0$iformula$marker] prob <- res0[[1]] beta <- res0[[2]] sd <- sqrt(res0[[3]]) for(l in (nburn+1):nsim) { udddp[,l-nburn] <- 1 - apply(t(prob[l,]*t(pnorm(y1, mean = X1%*%t(beta[l,,]), sd = rep(sd[l,], each = length(y1))))),1, sum) } weights <- matrix(0, nrow = n1, ncol=(nsim-nburn)) for(l in 1:(nsim-nburn)) { aux1 <- rexp(n1,1) weights[,l] <- aux1/sum(aux1) } arocbbddp <- matrix(0, nrow = np, ncol = (nsim-nburn)) aucddp <- numeric(nsim-nburn) if(paauc$compute) { paucddp <- numeric(nsim-nburn) # Truncated pv tudddp <- matrix(pmin(paauc$value, udddp), nrow = n1) } for(j in 1:np) { arocbbddp[j,] <- colSums(weights*(udddp<=p[j])) } aucddp <- 1 - colSums(weights*udddp) if(paauc$compute) { paucddp <- paauc$value - colSums(weights*tudddp) } AROC <- matrix(0, ncol = 3, nrow = np, dimnames = list(1:np, c("est","ql", "qh"))) AROC[,1] <- apply(arocbbddp,1,mean) AROC[,2] <- apply(arocbbddp,1,ql) AROC[,3] <- apply(arocbbddp,1,qh) AUC <- c(mean(aucddp), quantile(aucddp,c(0.025,0.975))) names(AUC) <- c("est","ql", "qh") res <- list() res$call <- match.call() res$p <- p res$ROC <- AROC res$AUC <- AUC if(paauc$compute) { res$pAUC <- c(mean(paucddp), quantile(paucddp,c(0.025,0.975))) names(res$pAUC) <- c("est","ql", "qh") attr(res$pAUC, "value") <- paauc$value } if(compute.lpml) { res$lpml <- lpml(y = data.h[,MM0$iformula$marker], X = X0, res = res0, L = L, nsim = nsim, nburn = nburn) } if(compute.WAIC) { res$WAIC<- waicnp(y = data.h[,MM0$iformula$marker], X = X0, res = res0, L = L, nsim = nsim, nburn = nburn) } # Results of the fit in the healthy population (neeeded to calculate predictive checks or other statistics) res$fit <- list(mm = MM0, beta = res0[[2]][(nburn+1):nsim,,], sd = sqrt(res0[[3]][(nburn+1):nsim,]), probs = res0[[1]][(nburn+1):nsim,]) res$data_model <- list(y = list(h = data.h[,MM0$iformula$marker], d = data.d[,MM0$iformula$marker]), X = list(h = X0, d = X1)) class(res) <- c("AROC","AROC.bnp") res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/AROC.bnp.R
AROC.bsp <- function(formula.healthy, group, tag.healthy, data, scale = TRUE, p = seq(0,1,l = 101), paauc = paauccontrol(), compute.lpml = FALSE, compute.WAIC = FALSE, m0, S0, nu, Psi, a = 2, b = 0.5, nsim = 5000, nburn = 1500) { if(inherits(formula.healthy, "character")) { formula.healthy <- as.formula(formula.healthy) } paauc <- do.call("paauccontrol", paauc) tf <- terms.formula(formula.healthy, specials = c("f")) if (attr(tf, "response") > 0) { marker <- as.character(attr(tf, "variables")[2]) } else { stop("The formula should include the response variable (left hand side)") } data.h <- data[data[,group] == tag.healthy,] data.d <- data[data[,group] != tag.healthy,] marker <- all.vars(formula.healthy)[1] MM0 <- design.matrix.bsp(update(formula.healthy, NULL ~ .), data.h) X0 <- MM0$X X1 <- predict(MM0, data.d)$X n0 <- nrow(data.h) n1 <- nrow(data.d) np <- length(p) k <- ncol(X0) if(missing(m0)) { m0 <- rep(0,k) } else { if(length(m0) != k) { stop(paste0("Argument 'm0' must be a vector of length ", k)) } } if(missing(S0)) { S0 <- 100*diag(k) } else { if(!is.matrix(S0) | !all(dim(S0) == c(k,k))) { stop(paste0("Argument 'S0' must be a matrix of dimension ", k, "x", k)) } } if(missing(nu)) { nu <- k + 2 } else { if(nu < k + 2) { stop(paste0("Argument 'nu' must be larger than ", k + 2)) } } if(missing(Psi)) { Psi <- diag(k) } else { if(!is.matrix(Psi) | !all(dim(Psi) == c(k,k))) { stop(paste0("Argument 'Psi' must be a matrix of dimension ", k, "x", k)) } } res0 <- regnth(y = data.h[,marker], X = X0, m0 = m0, S0 = S0, nu0 = nu, psi0 = Psi, a = a, b = b, nsim = nsim, scale = TRUE) up <- matrix(0, nrow = n1, ncol = nsim-nburn) #for(k in (nburn+1):nsim) { # for(i in 1:n1){ # up[i,k-nburn] = 1 - pnorm(data.d[i,marker], mean = X1[i,]%*%res0[[1]][k,], sd = sqrt(res0[[2]][k])) # } #} for(k in (nburn+1):nsim) { up[,k-nburn] = 1 - pnorm(data.d[,marker], mean = X1%*%res0[[1]][k,], sd = sqrt(res0[[2]][k])) } weights <- matrix(0, nrow = n1, ncol=(nsim-nburn)) for(l in 1:(nsim-nburn)) { aux1 <- rexp(n1,1) weights[,l] <- aux1/sum(aux1) } arocp <- matrix(0, nrow = np, ncol = (nsim-nburn)) aarocp <- numeric(nsim-nburn) for(j in 1:np) { arocp[j,] <- colSums(weights*(up<=p[j])) } if(paauc$compute) { paucp <- numeric(nsim-nburn) # Truncated pv tup <- matrix(pmin(paauc$value, up), nrow = n1) } aarocp <- 1 - colSums(weights*up) if(paauc$compute) { paucp <- paauc$value - colSums(weights*tup) } AROC <- matrix(0, ncol = 3, nrow = np, dimnames = list(1:np, c("est","ql", "qh"))) AROC[,1] <- apply(arocp,1,mean) AROC[,2] <- apply(arocp,1,ql) AROC[,3] <- apply(arocp,1,qh) AUC <- c(mean(aarocp), quantile(aarocp,c(0.025,0.975))) names(AUC) <- c("est","ql", "qh") res <- list() res$call <- match.call() res$p <- p res$ROC <- AROC res$AUC <- AUC if(paauc$compute) { res$pAUC <- c(mean(paucp), quantile(paucp,c(0.025,0.975))) names(res$pAUC) <- c("est","ql", "qh") attr(res$pAUC, "value") <- paauc$value } if(compute.lpml) { res$lpml <- lpmlp(y = data.h[,marker], X = X0, res = res0, nsim = nsim, nburn = nburn) } if(compute.WAIC) { res$WAIC <- waicp(y = data.h[,marker], X = X0, res = res0, nsim = nsim, nburn = nburn) } # Results of the fit in the healthy population (neeeded to calculate predictive checks or other statistics) res$fit <- list(mm = MM0, beta = res0[[1]][(nburn+1):nsim,], sd = sqrt(res0[[2]][(nburn+1):nsim])) res$data_model <- list(y = list(h = data.h[,marker], d = data.d[,marker]), X = list(h = X0, d = X1)) class(res) <- c("AROC","AROC.bsp") res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/AROC.bsp.R
AROC.f <- function(x1 = NULL, by = NULL, K = 0) { args <- match.call() if(is.null(args$x1) & is.null(args$x2)) stop("x1 or x2 must be indicated") if(!is.null(args$x1) & is.null(args$by)) { # Smooth effect cov <- c("-1", deparse(args$x1, backtick = TRUE, width.cutoff = 500)) } else if (!is.null(args$x1) & !is.null(args$by)) { cov <- c(deparse(args$by, backtick = TRUE, width.cutoff = 500), deparse(args$x1, backtick = TRUE, width.cutoff = 500)) } else { stop("Invalid expression") } res <- list(cov = cov, K = K) res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/AROC.f.R
AROC.kernel <- function(marker, covariate, group, tag.healthy, data, p = seq(0,1,l = 101), B = 1000) { np <- length(p) compute.ROC <- function(marker, covariate, group, tag.healthy, data, p = seq(0,1,l = 101)) { data.h <- data[data[,group] == tag.healthy,] data.d <- data[data[,group] != tag.healthy,] n0 <- nrow(data.h) n1 <- nrow(data.d) np <- length(p) x0 <- data.h[,covariate] y0 <- data.h[,marker] x1 <- data.d[,covariate] y1 <- data.d[,marker] # Fit the location-scale model in the healthy population bw.mean.h.p <- npregbw(ydat = y0, xdat = x0, regtype = "lc", bwmethod = "cv.ls") fit.mean.h.p <- npreg(bw.mean.h.p, exdat = x0,residuals = TRUE) bw.var.h.p <- npregbw(ydat = (fit.mean.h.p$resid^2), xdat = x0, regtype = "lc", bwmethod = "cv.ls") fit.var.h.p <- npreg(bw.var.h.p, exdat = x0, residuals = TRUE) res0p <- fit.mean.h.p$resid/sqrt(fit.var.h.p$mean) F0res <- ecdf(res0p) # Evaluate the model in the diseased population, and compute the AROC fit.mean.d.p <- npreg(bw.mean.h.p, exdat = x1,residuals = TRUE) fit.var.d.p <- npreg(bw.var.h.p, exdat = x1, residuals = TRUE) u1 <- 1 - F0res((y1-fit.mean.d.p$mean)/sqrt(fit.var.d.p$mean)) arocp <- numeric(np) for(i in 1:np){ arocp[i] <- sum(u1<=p[i])/n1 } aarocp <- simpson(arocp, p) res <- list() res$p <- p res$ROC <- arocp res$AUC <- aarocp res$data.h <- data.h res$data.d <- data.d res$bw.mean <- bw.mean.h.p res$bw.var <- bw.var.h.p res$fit.mean <- fit.mean.h.p res$fit.var <- fit.var.h.p res } croc <- compute.ROC(marker = marker, covariate = covariate, group = group, tag.healthy = tag.healthy, data = data, p = p) arocp <- croc$ROC aarocp <- croc$AUC if(B > 0) { arocpb <- matrix(0, nrow = np, ncol = B) aarocpb <- numeric(B) for(l in 1:B) { # Another option: healthy (residuals) - diseased (original sample) data.boot.d <- croc$data.d[sample(nrow(croc$data.d), replace=TRUE),] data.boot.h <- croc$data.h res.h.b <- sample(croc$fit.mean$resid/sqrt(croc$fit.var$mean), replace = TRUE) data.boot.h[,marker] <-croc$fit.mean$mean + sqrt(croc$fit.var$mean)*res.h.b data.boot <- rbind(data.boot.d, data.boot.h) res.boot <- compute.ROC(marker = marker, covariate = covariate, group = group, tag.healthy = tag.healthy, data = data.boot, p = p) arocpb[,l] <- res.boot$ROC aarocpb[l] <- res.boot$AUC } } columns <-switch(as.character(B>0),"TRUE" = 1:3,"FALSE"=1) col.names <-c("est","ql", "qh")[columns] poolROC <- matrix(0, ncol = length(columns), nrow = np, dimnames = list(1:np, col.names)) poolROC[,1] <- arocp AUC <- aarocp if(B > 0) { poolROC[,2] <- apply(arocpb,1,ql) poolROC[,3] <- apply(arocpb,1,qh) AUC <- c(AUC,quantile(aarocpb,c(0.025,0.975))) } names(AUC) <- col.names res <- list() res$call <- match.call() res$p <- p res$ROC <- poolROC res$AUC <- AUC res$bw.mean <- croc$bw.mean res$bw.var <- croc$bw.var res$fit.mean <- croc$fit.mean res$fit.var <- croc$fit.var class(res) <- c("AROC","AROC.kernel") res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/AROC.kernel.R
AROC.sp <- function(formula.healthy, group, tag.healthy, data, est.surv.h = c("normal", "empirical"), p = seq(0,1,l = 101), B = 1000) { est.surv.h <- match.arg(est.surv.h) np <- length(p) if(inherits(formula.healthy, "character")) { formula.healthy <- as.formula(formula.healthy) } marker <- all.vars(formula.healthy)[1] compute.ROC <- function(formula.healthy, group, tag.healthy, data, est.surv.h, p = seq(0,1,l = 101)) { data.h <- data[data[,group] == tag.healthy,] data.d <- data[data[,group] != tag.healthy,] n0 <- nrow(data.h) n1 <- nrow(data.d) np <- length(p) marker <- all.vars(formula.healthy)[1] # Fit the model in the healthy population fit0p <- lm(formula = formula.healthy, data = data.h) sigma0p <- summary(fit0p)$sigma pre.placement.values <- (data.d[,marker]-predict(fit0p, newdata = data.d))/sigma0p # Evaluate the model in the diseased population if(est.surv.h == "normal") { u1 <- 1-pnorm(pre.placement.values) } else { res0p <- fit0p$residuals/sigma0p F0res <- ecdf(res0p) u1 <- 1 - F0res(pre.placement.values) } # Compute the AROC arocp <- numeric(np) for(i in 1:np){ arocp[i] <- sum(u1<=p[i])/n1 } aarocp <- simpson(arocp, p) res <- list() res$p <- p res$ROC <- arocp res$AUC <- aarocp res$fit <- fit0p res$data.h <- data.h res$data.d <- data.d res } res.fit <- compute.ROC(formula.healthy = formula.healthy, group = group, tag.healthy = tag.healthy, data = data, est.surv.h = est.surv.h, p = p) arocp <- res.fit$ROC aarocp <- res.fit$AUC if(B > 0) { # Confidence intervals arocpb <- matrix(0, nrow = np, ncol = B) aarocpb <- numeric(B) for(l in 1:B) { # Another option: healthy (residuals) - diseased (original sample) data.boot.d <- res.fit$data.d[sample(nrow(res.fit$data.d), replace=TRUE),] data.boot.h <- res.fit$data.h res.h.b <- sample(res.fit$fit$residuals, replace = TRUE) data.boot.h[,marker] <- res.fit$fit$fitted + res.h.b data.boot <- rbind(data.boot.d, data.boot.h) res.boot <- compute.ROC(formula.healthy = formula.healthy, group = group, tag.healthy = tag.healthy, data = data.boot, est.surv.h = est.surv.h, p = p) arocpb[,l] <- res.boot$ROC aarocpb[l] <- res.boot$AUC } } columns <-switch(as.character(B>0),"TRUE" = 1:3,"FALSE"=1) col.names <-c("est","ql", "qh")[columns] poolROC <- matrix(0, ncol = length(columns), nrow = np, dimnames = list(1:np, col.names)) poolROC[,1] <- arocp AUC <- aarocp if(B > 0) { poolROC[,2] <- apply(arocpb,1,ql) poolROC[,3] <- apply(arocpb,1,qh) AUC <- c(AUC,quantile(aarocpb,c(0.025,0.975))) } names(AUC) <- col.names res <- list() res$call <- match.call() res$p <- p res$ROC <- poolROC res$AUC <- AUC res$fit.h <- res.fit$fit res$est.surv.h <- est.surv.h class(res) <- c("AROC","AROC.sp") res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/AROC.sp.R
bbase.interaction.factor.by.curve.os <- function(x, factor, K, bdeg = 3, eps = 1e-5) { factor <- droplevels(factor) factor.levels <- levels(factor) # Ordering in the data set ord <- NULL for(i in 1:length(factor.levels)) { ord <- c(ord, which(factor == factor.levels[i])) } # Parametric part #mf <- model.frame("~ factor", data = data.frame(factor = factor), drop.unused.levels = TRUE) #mt <- terms(mf) #param.part <- model.matrix(mt, mf)[,-1, drop = FALSE] # Here we delete the intercept # Smooth part if(length(K) == 1) { K <- rep(K, length(factor.levels)) } else if (length(K) != length(factor.levels)) { stop("Error with the number of inner knots for the interaction") } temp <- interaction.smooth.part <- list() for(i in 1:length(factor.levels)) { Baux <- bbase.os(x = x[factor == factor.levels[i]], K = K[i], bdeg = bdeg, intercept = FALSE) interaction.smooth.part[[i]] <- Baux attributes(Baux) <- attributes(Baux)["dim"] temp[[i]] <- Baux } # Join parametric and smooth aux <- as.matrix(bdiag(temp)) #B <- cbind(param.part, aux[order(ord),]) B <- aux[order(ord),] names(interaction.smooth.part) <- factor.levels attr(B,"interaction.smooth.part") <- interaction.smooth.part #class(B) <- c("bbase.interaction.factor.by.curve.os", "matrix") B }
/scratch/gouwar.j/cran-all/cranData/AROC/R/bbase.interaction.factor.by.curve.os.R
bbase.os <- function(x, K, bdeg = 3, eps = 1e-5, intercept = TRUE) { # Using the function bs B <- bs(x, degree = bdeg, df = K + bdeg, intercept = intercept) #class(B) <- c("bbase.os", "matrix") B }
/scratch/gouwar.j/cran-all/cranData/AROC/R/bbase.os.R
bddp <- function(y, X, alpha = 1, m, S, nu, psi, a, b, nsim, L, scale = TRUE) { yt <- y if (scale==TRUE) {yt <- y/sd(y)} n <- length(y) k <- ncol(X) p <- ns <- rep(0,L) v <- rep(1/L,L) v[L] <- 1 beta <- matrix(0, nrow = L, ncol = k) aux <- try(solve(t(X)%*%X)%*%t(X)%*%yt, silent = TRUE) if(!inherits(aux, "try-error")) { for(l in 1:L) { beta[l,] <- aux } } tau <- rep(1/var(yt),L) prop <- prob <- matrix(0, nrow = n, ncol = L) P <- Tau <- Sigma2 <- matrix(0, nrow = nsim, ncol = L) Beta <- Beta1 <- array(0,c(nsim,L,k)) Beta[1,,] <- beta Tau[1,] <- tau mu <- matrix(0, nrow = nsim, ncol = k) Sigmainv <- array(0, c(nsim,k,k)) mu[1,] <- mvrnorm(1, mu = m, Sigma = S) Sigmainv[1,,] <- rWishart(1, df = nu, solve(nu*psi)) for(i in 2:nsim) { cumv <- cumprod(1-v) p[1] <- v[1] for(l in 2:L) { p[l] <- v[l]*cumv[l-1] } for(l in 1:L) { prop[,l] <- p[l]*dnorm(yt,mean=X%*%beta[l,],sd = sqrt(1/tau[l])) } prob <- prop/apply(prop,1,sum) z <- rMultinom(prob,1) P[i,] <- p for(l in 1:L) { ns[l] <- length(which(z == l)) } for(l in 1:(L-1)) { v[l] <- rbeta(1, 1 + ns[l],alpha+sum(ns[(l+1):L])) } for(l in 1:L) { tX <- matrix(t(X[z == l, ]),nrow = k, ncol = ns[l]) V <- solve(Sigmainv[i-1,,]+tau[l]*tX%*%X[z == l,]) mu1 <- V%*%(Sigmainv[i-1,,]%*%mu[i-1,]+tau[l]*tX%*%yt[z == l]) Beta1[i,l,] <- Beta[i,l,] <- beta[l,] <- mvrnorm(1, mu = mu1, Sigma = V) if (scale == TRUE) { Beta1[i,l,] <- sd(y)*Beta[i,l,] } Tau[i,l] <- tau[l] <- rgamma(1, shape = a + (ns[l]/2), rate = b + 0.5*(t(yt[z==l]-X[z==l,]%*%t(beta[l,,drop=FALSE]))%*%(yt[z==l]-X[z==l,]%*%t(beta[l,,drop=FALSE])))) Sigma2[i,l] <- 1/Tau[i,l] if (scale == TRUE){ Sigma2[i,l] <- var(y)*(1/Tau[i,l]) } } Vaux <- solve(solve(S)+L*Sigmainv[i-1,,]) if(k == 1) { meanmu <- Vaux%*%(solve(S)%*%m+Sigmainv[i-1,,]%*%sum(Beta[i,,])) } else { meanmu <- Vaux%*%(solve(S)%*%m+Sigmainv[i-1,,]%*%t(t(apply(Beta[i,,],2,sum)))) } mu[i,] <- mvrnorm(1, mu = meanmu, Sigma = Vaux) Vaux1 <- 0 for(l in 1:L) { Vaux1 <- Vaux1+(Beta[i,l,]-mu[i,])%*%t((Beta[i,l,]-mu[i,])) } Sigmainv[i,,] <- rWishart(1,nu+L,solve(nu*psi+Vaux1)) } return(list(P,Beta1,Sigma2)) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/bddp.R
bootstrap.sample <- function(data, group, method = c("ncoutcome","coutcome")) { method <- match.arg(method) if(method == "coutcome") { res <- do.call("rbind", lapply(split(data,data[,group]), function(x)x[sample(nrow(x), replace=TRUE),])) } else { res <- data[sample(nrow(data), replace=TRUE),] } res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/bootstrap.sample.R
compute.threshold.AROC.bnp <- function(object, newdata, FPF = 0.5) { #qF0 <- function(q, iter, Xpred, icov, object){ # toInvert <- function(x, q, iter, Xpred, icov, object){ # return(sum(object$fit$probs[iter,]*pnorm(x, mean = Xpred[icov,]%*%t(object$fit$beta[iter,,]), sd = object$fit$sd[iter,])) - q) # } # res <- uniroot(toInvert, interval = c(-10^15,10^15), q, iter, Xpred, icov, object)$root # return(res) #} if(class(object)[2] != "AROC.bnp") { stop(paste0("This function can not be used for this object class: ", class(object)[2])) } # Create the data frame Xp <- predict.design.matrix.bnp(object$fit$mm, newdata)$X ncov <- nrow(Xp) nrep <- nrow(object$fit$probs) np <- length(FPF) #thresholds <- array(0,c(np,ncov,nrep)) #for(inp in 1:np) { # for(inrep in 1:nrep) { # for(incov in 1:ncov) { # thresholds[inp,incov,inrep] <- qF0(q = 1-FPF[inp], iter = inrep, Xpred = Xp, icov = incov, object = object) # } # } #} thresholds <- array(0,c(np,ncov,nrep)) for(inrep in 1:nrep) { mu.h <- Xp%*%t(object$fit$beta[inrep,,]) for(incov in 1:ncov) { aux <- norMix(mu = c(mu.h[incov,]), sigma = object$fit$sd[inrep,], w = object$fit$probs[inrep,]) thresholds[,incov,inrep] <- qnorMix(1-FPF, aux) } } thresholdsm <- thresholdsl <- thresholdsh <- matrix(0, nrow = np, ncol = ncov) rownames(thresholdsm) <- rownames(thresholdsl) <- rownames(thresholdsh) <- FPF for(incov in 1:ncov){ for(inp in 1:np){ thresholdsm[inp,incov] <- mean(thresholds[inp,incov,]) thresholdsl[inp,incov] <- quantile(thresholds[inp,incov,],0.025) thresholdsh[inp,incov] <- quantile(thresholds[inp,incov,],0.975) } } res <- list() res$thresholds.est <- thresholdsm res$thresholds.ql <- thresholdsl res$thresholds.qh <- thresholdsh res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/compute.threshold.AROC.bnp.R
compute.threshold.AROC.bsp <- function(object, newdata, FPF = 0.5) { if(class(object)[2] != "AROC.bsp") { stop(paste0("This function can not be used for this object class: ", class(object)[2])) } Xp <- predict.design.matrix.bsp(object$fit$mm, newdata)$X ncov <- nrow(Xp) nrep <- length(object$fit$sd) np <- length(FPF) thresholds <- array(0,c(np,ncov,nrep)) for(inrep in 1:nrep) { for(incov in 1:ncov) { thresholds[,incov,inrep] <- qnorm(1-FPF, mean = Xp[incov,]%*%object$fit$beta[inrep,], sd = object$fit$sd[inrep]) } } thresholdsm <- thresholdsl <- thresholdsh <- matrix(0, nrow = np, ncol = ncov) rownames(thresholdsm) <- rownames(thresholdsl) <- rownames(thresholdsh) <- FPF thresholdsm <- apply(thresholds, c(1,2), mean) thresholdsl <- apply(thresholds, c(1,2), quantile, 0.025) thresholdsh <- apply(thresholds, c(1,2), quantile, 0.975) res <- list() res$thresholds.est <- thresholdsm res$thresholds.ql <- thresholdsl res$thresholds.qh <- thresholdsh res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/compute.threshold.AROC.bsp.R
compute.threshold.AROC.kernel <- function(object, newcovariate, FPF = 0.5) { if(class(object)[2] != "AROC.kernel") { stop(paste0("This function can not be used for this object class: ", class(object)[2])) } ncov <- length(newcovariate) np <- length(FPF) thresholds <- matrix(0, nrow = np, ncol = ncov) rownames(thresholds) <- FPF colnames(thresholds) <- newcovariate fit.mean.new <- npreg(object$bw.mean, exdat = newcovariate, residuals = TRUE) fit.var.new <- npreg(object$bw.var, exdat = newcovariate, residuals = TRUE) h.residuals <- object$fit.mean$resid/sqrt(object$fit.var$mean) csf0 <- apply(outer(h.residuals, h.residuals, ">="), 2, mean) csf0_inv <- apply(outer(csf0, FPF, "<="), 2, function(x, z) { res <- min(c(z[x], max(z))) res }, z = h.residuals) csf0_inv <- replace(csf0_inv, is.infinite(csf0_inv), max(h.residuals)) for(i in 1:ncov) { thresholds[,i] <- fit.mean.new$mean[i] + sqrt(fit.var.new$mean[i])*csf0_inv } res <- list() res$thresholds <- thresholds res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/compute.threshold.AROC.kernel.R
compute.threshold.AROC.sp <- function(object, newdata, FPF = 0.5) { if(class(object)[2] != "AROC.sp") { stop(paste0("This function can not be used for this object class: ", class(object)[2])) } ncov <- nrow(newdata) np <- length(FPF) thresholds <- matrix(0, nrow = np, ncol = ncov) rownames(thresholds) <- FPF #colnames(thresholds) <- newcovariate fit.new <- predict(object$fit.h, newdata = newdata) if(object$est.surv.h == "normal") { csf0_inv <- qnorm(1-FPF) } else { h.residuals <- object$fit.h$residuals/summary(object$fit.h)$sigma csf0 <- apply(outer(h.residuals, h.residuals, ">="), 2, mean) csf0_inv <- apply(outer(csf0, FPF, "<="), 2, function(x, z) { res <- min(c(z[x], max(z))) res }, z = h.residuals) csf0_inv <- replace(csf0_inv, is.infinite(csf0_inv), max(h.residuals)) } for(i in 1:ncov) { thresholds[,i] <- fit.new[i] + summary(object$fit.h)$sigma*csf0_inv } res <- list() res$thresholds <- thresholds res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/compute.threshold.AROC.sp.R
compute.threshold.pooledROC.BB <- function(object, FPF = 0.5) { if(class(object)[2] != "pooledROC.BB") { stop(paste0("This function can not be used for this object class: ", class(object)[2])) } B <- ncol(object$weights$h) weights.h <- object$weights$h weights.d <- object$weights$d np <- length(FPF) thresholds.s <- TPF.s <- matrix(0, nrow = np, ncol = B) for(l in 1:B) { thresholds.s[,l] <- quantile(ewcdf(object$marker$h, weights.h[,l]), 1- FPF, type = 1) TPF.s[,l] <- 1 - ewcdf(object$marker$d, weights.d[,l])(thresholds.s[,l]) } thresholds <- matrix(0, ncol = 3, nrow = np, dimnames = list(1:np, c("est","ql", "qh"))) rownames(thresholds) <- FPF thresholds[,1] <- apply(thresholds.s, 1, mean) thresholds[,2] <- apply(thresholds.s, 1, quantile, prob = 0.025) thresholds[,3] <- apply(thresholds.s, 1, quantile, prob = 0.975) TPF <- matrix(0, ncol = 3, nrow = np, dimnames = list(1:np, c("est","ql", "qh"))) rownames(TPF) <- FPF TPF[,1] <- apply(TPF.s, 1, mean) TPF[,2] <- apply(TPF.s, 1, quantile, prob = 0.025) TPF[,3] <- apply(TPF.s, 1, quantile, prob = 0.975) res <- list() res$thresholds <- thresholds res$FPF <- FPF res$TPF <- TPF res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/compute.threshold.pooledROC.BB.R
compute.threshold.pooledROC.emp <- function(object, FPF = 0.5) { if(class(object)[2] != "pooledROC.emp") { stop(paste0("This function can not be used for this object class: ", class(object)[2])) } F1emp <- ecdf(object$marker$d) thresholds <- quantile(object$marker$h, 1 - FPF, type = 1) TPF <- 1 - F1emp(thresholds) res <- list() res$thresholds <- thresholds res$FPF <- FPF res$TPF <- TPF res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/compute.threshold.pooledROC.emp.R
design.matrix.bnp <- function(formula, data) { iform <- interpret.AROCformula(formula, data) data.cov <- iform$data.cov X <- NULL Xterms <- list() paracoeff <- TRUE # First "coefficients is parametric" if(iform$npartial == 0) { # Only the intercept X <- matrix(1, ncol = 1, nrow = nrow(data)) colnames(X) <- "(Intercept)" res <- list() res$X <- X res$terms <- NULL res$iformula <- iform } else { for(i in 1:iform$npartial) { if(any(iform$II[,i] == -1)) { if(iform$h[i] == 0) { # Linear and factor mf <- model.frame(paste0("~", iform$II[2,i]), data.cov, drop.unused.levels = TRUE) mt <- terms(mf) MM <- model.matrix(mt, mf)[,-1, drop = FALSE] # Here we delete the intercept paracoeff <- c(paracoeff, rep(TRUE, ncol(MM))) X <- cbind(X, MM) attr(mt, "contrast") <- attr(MM,"contrast") attr(mt, "xlev") <- .getXlevels(mt, mf) Xterms[[i]] <- mt } else { Bs <- bbase.os(data.cov[,iform$II[2,i]], K = iform$K[[i]], intercept = FALSE) colnames(Bs) <- paste0(iform$partial[i],".", 1:ncol(Bs)) paracoeff <- c(paracoeff, rep(FALSE, ncol(Bs))) Xterms[[i]] <- Bs X <- cbind(X, Bs) } } else { # Factor by curve Bs <- bbase.interaction.factor.by.curve.os(data.cov[,iform$II[2,i]], data.cov[,iform$II[1,i]], K = iform$K[[i]]) colnames(Bs) <- paste0(iform$partial[i],".", 1:ncol(Bs)) paracoeff <- c(paracoeff, rep(FALSE, ncol(Bs))) Xterms[[i]] <- Bs X <- cbind(X, Bs) } } # Add the intercept names.X <- colnames(X) X <- cbind(1, X) colnames(X) <- c("(Intercept)", names.X) res <- list() res$X <- X res$terms <- Xterms res$iformula <- iform } res$paracoeff <- paracoeff class(res) <- "design.matrix.bnp" res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/design.matrix.bnp.R
design.matrix.bsp <- function(formula, data) { mf <- model.frame(formula, data, drop.unused.levels = TRUE) mt <- terms(mf) X <- model.matrix(mt, mf) # Includes the intercept res <- list(X = X, mf = mf, mt = mt) class(res) <- "design.matrix.bsp" res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/design.matrix.bsp.R
interpret.AROCformula <- function(formula, data) { env <- environment(formula) if(inherits(formula, "character")) formula <- as.formula(formula) tf <- terms.formula(formula, specials = c("f")) if (attr(tf, "response") > 0) { marker <- as.character(attr(tf, "variables")[2]) } else { stop("The formula should include the response variable (left hand side)") } terms <- attr(tf, "term.labels") #if(length(grep(":",terms)) != 0) stop("Symbol '*' is not allowed") nt <- length(terms) ns <- attr(tf,"specials")$f - 1 # Marker is in the formula II <- list() h <- list() K <- list() partial <- vector() k <- 0 data.cov <- data[,names(data) %in% all.vars(formula)[-1], drop = FALSE] if(nt) { for (i in 1:nt) { if (i %in% ns) { k <- k+1 st <- eval(parse(text = paste("AROC.",terms[i],sep=""))) II[[k]] <- st$cov h[[k]] <- -1 K[[k]] <- st$K partial[k] <- terms[i] } else { k <- k + 1 II[[k]]<- c(-1, terms[i]) h[[k]] <- 0 # parametric K[[k]] <- 0 partial[k] <- terms[i] } } } else { # Only the intercept data.cov <- NULL } II <- if(length(II)) { matrix(unlist(II), nrow = 2) } else { matrix(0, nrow = 2) } res <- list(marker = marker, II = II, h = unlist(h), K = K, npartial = k, partial = partial, data.cov = data.cov) res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/interpret.AROCformula.R
lpml <- function(y, X, res, L, nsim, nburn) { n <- length(y) term <- array(0,c(nsim-nburn,L,n)) for(k in 1:(nsim-nburn)) { for(l in 1:L) { term[k,l,] <- res[[1]][k+nburn,l]*dnorm(y, mean = X%*%res[[2]][k+nburn,l,], sd = sqrt(res[[3]][k+nburn,l])) } } termsum <- matrix(0, nrow = nsim-nburn, ncol = n) # Very time consuming: see why # termsum <- apply(term, c(1,3), sum) # Need this for(i in 1:n) { for(k in 1:(nsim-nburn)) { termsum[k,i] <- sum(term[k,,i]) } } cpoinv <- apply(termsum, 2, function(x) mean(1/x)) cpo <- 1/cpoinv lpml <- sum(log(cpo)) res <- list() res$cpo <- cpo res$lpml <- lpml res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/lpml.R
lpmlp <- function(y, X, res, nsim, nburn) { n <- length(y) aux <- matrix(0,nrow=n,ncol=nsim-nburn) cpoinv <- numeric(n) for(k in 1:(nsim-nburn)) { aux[,k] <- dnorm(y, mean = X%*%res[[1]][k+nburn,], sd = sqrt(res[[2]][k+nburn])) } cpoinv <- apply(aux, 1, function(x) mean(1/x)) cpo <- 1/cpoinv lpml <- sum(log(cpo)) res <- list() res$cpo <- cpo res$lpml <- lpml res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/lpmlp.R
paauccontrol <- function(compute = FALSE, value = 1) list(compute = compute, value = value)
/scratch/gouwar.j/cran-all/cranData/AROC/R/paauccontrol.R
plot.AROC <- function(x, ...) { main.roc <- switch(class(x)[2], "pooledROC.BB" = "Pooled ROC curve - Bayesian bootstrap", "pooledROC.emp" = "Pooled ROC curve - Empirical", "AROC.kernel" = "AROC Kernel-based", "AROC.bnp" = "AROC Bayesian nonparametric", "AROC.bsp" = "AROC Bayesian semiparametric", "AROC.sp" = "AROC semiparametric") main.auc <- ifelse(any(class(x) %in% c("pooledROC.BB", "pooledROC.emp")), "AUC", "AAUC") plot(x$p, x$ROC[,1], xlab = "FPF", ylab = "TPF", xlim = c(0,1), ylim = c(0,1), main = main.roc, type = "l", cex.lab = 1.3, cex.axis = 1.3,...) if(ncol(x$ROC) == 3) { lines(x$p, x$ROC[,2], lty = 2) lines(x$p, x$ROC[,3], lty = 2) } abline(0,1, col = "grey") if(length(x$AUC) == 3) { legend.text <- paste0(main.auc, ": ", paste(round(x$AUC[1], 3), " (", round(x$AUC[2], 3),"",", ", round(x$AUC[3], 3),")", sep = "")) } else { legend.text <- paste0(main.auc, ": ", round(x$AUC[1], 3)) } legend(0.4, 0.2, legend.text, bty = "n", cex = 1.3) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/plot.AROC.R
pooledROC.BB <- function(y0, y1, p = seq(0,1,l=101), B = 5000) { n1 <- length(y1) n0 <- length(y0) weights.h <- matrix(0, nrow = n0, ncol = B) weights.d <- matrix(0, nrow = n1, ncol = B) np <- length(p) u <- matrix(0,nrow = n1, ncol = B) for(l in 1:B){ q <- rexp(n0,1) weights.h[,l] <- q/sum(q) for(j in 1:n1){ u[j,l]<- sum(weights.h[,l]*(y0>y1[j])) } } rocbbpool <- matrix(0, nrow = np, ncol = B) aucbbpool <- numeric(B) for(l in 1:B) { q1 <- rexp(n1,1) weights.d[,l] <- q1/sum(q1) for(j in 1:np){ rocbbpool[j,l] <- sum(weights.d[,l]*(u[,l]<=p[j])) } aucbbpool[l] <- simpson(rocbbpool[,l], p) } poolROC <- matrix(0, ncol = 3, nrow = np, dimnames = list(1:np, c("est","ql", "qh"))) poolROC[,1] <- apply(rocbbpool,1,mean) poolROC[,2] <- apply(rocbbpool,1,ql) poolROC[,3] <- apply(rocbbpool,1,qh) res <- list() res$call <- match.call() res$marker <- list(h = y0, d = y1) res$p <- p res$ROC <- poolROC AUC <- c(mean(aucbbpool), quantile(aucbbpool,c(0.025,0.975))) names(AUC) <- c("est","ql", "qh") res$AUC <- AUC res$weights <- list(h = weights.h, d = weights.d) class(res) <- c("AROC","pooledROC.BB") res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/pooledROC.BB.R
pooledROC.emp <- function(y0, y1, p = seq(0,1,l=101), B = 500, method = c("ncoutcome","coutcome")) { method <- match.arg(method) n1 <- length(y1) n0 <- length(y0) np <- length(p) compute.ROC <- function(y0, y1, p = seq(0,1,l=101)) { F1emp <- ecdf(y1) rocemp <- 1 - F1emp(quantile(y0,1-p)) aucemp <- simpson(rocemp, p) res <- list() res$p <- p res$ROC <- rocemp res$AUC <- aucemp res } res <- compute.ROC(y0, y1, p = p) rocemp <- res$ROC aucemp <- res$AUC if(B > 0) { rocempb <- matrix(0,nrow = np, ncol = B) aucempb <- numeric(B) data.original <- data.frame(y = c(y0, y1), group = c(rep(0,n0), rep(1,n1))) for(l in 1:B){ data.boot <- bootstrap.sample(data.original, "group", method = method) y0b <- data.boot$y[data.boot$group == 0] y1b <- data.boot$y[data.boot$group == 1] res.boot <- compute.ROC(y0b, y1b, p = p) rocempb[,l] <- res.boot$ROC aucempb[l] <- res.boot$AUC } } columns <-switch(as.character(B>0),"TRUE" = 1:3,"FALSE"=1) col.names <-c("est","ql", "qh")[columns] poolROC <- matrix(0, ncol = length(columns), nrow = np, dimnames = list(1:np, col.names)) poolROC[,1] <- rocemp AUC <- aucemp if(B > 0) { poolROC[,2] <- apply(rocempb,1,ql) poolROC[,3] <- apply(rocempb,1,qh) AUC <- c(AUC, quantile(aucempb,c(0.025,0.975))) } names(AUC) <- col.names res <- list() res$call <- match.call() res$marker <- list(h = y0, d = y1) res$p <- p res$ROC <- poolROC res$AUC <- AUC class(res) <- c("AROC","pooledROC.emp") res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/pooledROC.emp.R
predict.bbase.interaction.factor.by.curve.os <- function(object, newx, newfactor) { newfactor <- droplevels(newfactor) factor.levels <- levels(newfactor) # Parametric part if(length(factor.levels) > 1) { #mf <- model.frame("~ factor", data = data.frame(factor = newfactor), drop.unused.levels = TRUE) #mt <- terms(mf) #param.part <- model.matrix(mt, mf)[,-1, drop = FALSE] ord <- NULL for(i in 1:length(factor.levels)) { ord <- c(ord, which(newfactor == factor.levels[i])) } } else { #param.part <- NULL ord <- 1:length(newx) } interaction.smooth.part.pred <- list() interaction.smooth.part <- attr(object,"interaction.smooth.part") for(i in 1:length(factor.levels)) { Baux <- suppressWarnings(predict.bbase.os(interaction.smooth.part[[factor.levels[i]]], newx[newfactor == factor.levels[i]])) attributes(Baux) <- attributes(Baux)["dim"] interaction.smooth.part.pred[[i]] <- Baux } # Join parametric and smooth aux <- as.matrix(bdiag(interaction.smooth.part.pred)) #B <- cbind(param.part, aux[order(ord),]) B <- aux[order(ord),] B }
/scratch/gouwar.j/cran-all/cranData/AROC/R/predict.bbase.interaction.factor.by.curve.os.R
predict.bbase.os <- function(object, newx) { B <- predict(object, newx = newx) B }
/scratch/gouwar.j/cran-all/cranData/AROC/R/predict.bbase.os.R
predict.design.matrix.bnp <- function(object, newdata, ...) { if(object$iformula$npartial == 0) { # Only the intercept Xp <- matrix(1, ncol = 1, nrow = nrow(newdata)) } else { Xp <- NULL # Organize the newdataframe as it was in the original data newdata <- newdata[,names(object$iformula$data.cov), drop = FALSE] for(i in 1:object$iformula$npartial) { if(any(object$iformula$II[,i] == -1)) { if(object$iformula$h[i] == 0 | object$iformula$h[i] == 1) { # Linear and factor mfp <- model.frame(object$terms[[i]], newdata, xlev = attr(object$terms[[i]], "xlev")) Xp <- cbind(Xp, model.matrix(object$terms[[i]], data = mfp, contrasts.arg = attr(object$terms[[i]], "contrast"))[,-1,drop = FALSE]) } else { Bs <- suppressWarnings(predict.bbase.os(object$terms[[i]], newdata[,object$iformula$II[2,i]])) Xp <- cbind(Xp, Bs) } } else { # Factor by curve Bs <- predict.bbase.interaction.factor.by.curve.os(object$terms[[i]], newdata[,object$iformula$II[2,i]], newdata[,object$iformula$II[1,i]]) Xp <- cbind(Xp, Bs) } } # Add the intercept Xp <- cbind(1, Xp) } res <- list() res$X <- Xp res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/predict.design.matrix.bnp.R
predict.design.matrix.bsp <- function(object, newdata, ...) { mfp <- model.frame(object$mt, newdata, xlev = attr(object$mt, "xlev")) Xp <- model.matrix(object$mt, data = mfp, contrasts.arg = attr(object$mt, "contrast")) # Includes the intercept res <- list() res$X <- Xp res }
/scratch/gouwar.j/cran-all/cranData/AROC/R/predict.design.matrix.bsp.R
predictive.checks.AROC.bnp <- function(object, statistics = c("min","max","kurtosis","skewness"), devnew = TRUE) { if(class(object)[2] != "AROC.bnp") { stop(paste0("This function can not be used for this object class: ", class(object)[object])) } y0 <- object$data_model$y$h n0 <- length(y0) nrep <- nrow(object$fit$probs) yrep <- matrix(0, nrow = n0, ncol = nrep) aux <- t(apply(object$fit$probs[1:nrep,], 1, function(x, n) sample(1:length(x), n, replace = TRUE, prob = x), n = n0)) for(l in 1:nrep) { yrep[,l] <- rnorm(n = n0, mean = colSums(t(object$data_model$X$h)*t(object$fit$beta[l,aux[l,],])), sd = object$fit$sd[l,aux[l,]]) } i = 1 for(stat in statistics) { if(i != 1 & devnew) dev.new() yrepstat <- apply(yrep, 2, function(y, stat) {do.call(stat, list(y))}, stat = stat) ystat <- do.call(stat, list(y0)) xlim <- range(c(yrepstat,ystat)) hist(yrepstat, col = "gray60", main = stat, xlim = xlim, xlab = "Statistic") abline(v = ystat,col="red",lwd=3) i = i + 1 } # Density if(devnew) dev.new() ylim <- c(0, max(density(y0)$y) + 0.2) xlim <- c(min(density(y0)$x) - 0.2, max(density(y0)$x) - 0.2) plot(density(yrep[,1]),col = "lightskyblue1", ylim = ylim, xlim = xlim, main = "Density", xlab = "Diagnostic test outcome (nondiseased group)") s <- sample(1:nrep, ifelse(nrep < 500, nrep, 500)) for(i in s){ lines(density(yrep[,i]), col="lightskyblue1") } lines(density(y0), col = "black", lwd = 4) res <- list() res$yrep <- yrep res$y0 <- y0 invisible(res) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/predictive.checks.AROC.bnp.R
predictive.checks.AROC.bsp <- function(object, statistics = c("min","max","median","skewness"), devnew = TRUE) { if(class(object)[2] != "AROC.bsp") { stop(paste0("This function can not be used for this object class: ", class(object)[2])) } y0 <- object$data_model$y$h n0 <- length(y0) nrep <- length(object$fit$sd) yrep <- matrix(0, nrow = n0, ncol = nrep) for(l in 1:nrep) { yrep[,l] <- rnorm(n = n0, mean = as.numeric(object$data_model$X$h%*%object$fit$beta[l,]), sd = object$fit$sd[l]) } i = 1 for(stat in statistics) { if(i != 1 & devnew) dev.new() yrepstat <- apply(yrep, 2, function(y, stat) {do.call(stat, list(y))}, stat = stat) ystat <- do.call(stat, list(y0)) xlim <- range(c(yrepstat,ystat)) hist(yrepstat, col = "gray60", main = stat, xlim = xlim, xlab = "Statistic") abline(v = ystat,col="red",lwd=3) i = i + 1 } if(devnew) dev.new() ylim <- c(0, max(density(y0)$y) + 0.2) xlim <- c(min(density(y0)$x) - 0.2, max(density(y0)$x) - 0.2) plot(density(yrep[,1]), col = "lightskyblue1", ylim = ylim, xlim = xlim, main = "Density", xlab = "Diagnostic test outcome (nondiseased group)") # Only a sample s <- sample(1:nrep, ifelse(nrep < 500, nrep, 500)) for(i in s){ lines(density(yrep[,i]), col = "lightskyblue1") } lines(density(y0),col = "black",lwd = 4) res <- list() res$yrep <- yrep res$y0 <- y0 invisible(res) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/predictive.checks.AROC.bsp.R
print.AROC <- function(x, ...) { method <- switch(class(x)[2], "pooledROC.BB" = "Pooled ROC curve - Bayesian bootstrap", "pooledROC.emp" = "Pooled ROC curve - Empirical", "AROC.kernel" = "AROC Kernel-based", "AROC.bnp" = "AROC Bayesian nonparametric", "AROC.bsp" = "AROC Bayesian semiparametric", "AROC.sp" = "AROC semiparametric") cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n", sep = "") cat(paste0("\nApproach: ", method)) cat("\n----------------------------------------------\n") auc_aauc <- ifelse(any(class(x) %in% c("pooledROC.BB", "pooledROC.emp")), "Area under the pooled ROC curve", "Area under the covariate-adjusted ROC curve") if(length(x$AUC) == 3) { legend.text <- paste0(auc_aauc, ": ", paste(round(x$AUC[1], 3), " (", round(x$AUC[2], 3),"",", ", round(x$AUC[3], 3),")", sep = "")) } else { legend.text <- paste0(auc_aauc, ": ", round(x$AUC[1], 3)) } cat(paste0(legend.text,"\n")) if(!is.null(x$pAUC)) { p_auc_aauc <- ifelse(any(class(x) %in% c("pooledROC.BB", "pooledROC.emp")), "Partial area under the pooled ROC curve", "Partial area under the covariate-adjusted ROC curve") p_auc_aauc <- paste0(p_auc_aauc, " (FPF = ", attr(x$pAUC, "value"), ")") if(length(x$AUC) == 3) { legend.text <- paste0(p_auc_aauc, ": ", paste(round(x$pAUC[1], 3), " (", round(x$pAUC[2], 3),"",", ", round(x$pAUC[3], 3),")", sep = "")) } else { legend.text <- paste0(p_auc_aauc, ": ", round(x$pAUC[1], 3)) } cat(paste0(legend.text,"\n")) } waic <- any(class(x) %in% c("AROC.bnp", "AROC.bsp")) & !is.null(x$WAIC) lpml <- any(class(x) %in% c("AROC.bnp", "AROC.bsp")) & !is.null(x$lpml) if(waic | lpml) { cat("\n\nModel selection criteria - Healthy population") cat("\n----------------------------------------------\n") if(waic) { cat(paste("Widely applicable information criterion (WAIC): ", round(x$WAIC, 3),"\n")) } if(lpml) { cat(paste("Pseudo marginal likelihood (LPML): ", round(x$lpml$lpml),"\n")) } } invisible(x) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/print.AROC.R
print.summary.AROC <- function(x,...) { print.AROC(x) invisible(x) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/print.summary.AROC.R
qh <- function(x) { quantile(x,0.975) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/qh.R
ql <- function(x) { quantile(x,0.025) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/ql.R
regnth <- function(y,X,m0,S0,nu0,psi0,a,b,nsim,scale=TRUE) { yt=y if(scale==TRUE){ yt <- y/sd(y) } n <- length(y) k <- ncol(X) beta <- beta1 <- matrix(0, nrow = nsim, ncol = k) sigma2 <- sigma21 <- numeric(nsim) mu0 <- matrix(0, nrow = nsim, ncol = k) V0inv <- array(0, c(nsim,k,k)) bols <- solve(t(X)%*%X)%*%(t(X)%*%yt) e <- yt-X%*%bols s2 <- (t(e)%*%e)/(n-k) beta[1,] <-bols sigma2[1] <- s2 if(scale == TRUE){ beta1[1,] <- sd(y)*beta[1,] sigma21 <- var(y)*sigma2[1] } mu0[1,] <- rep(0,k) V0inv[1,,] <- solve(100*diag(k)) for(i in 2:nsim) { V1 <- solve(V0inv[i-1,,]+(1/sigma2[i-1])*t(X)%*%X) mu1 <- V1%*%(V0inv[i-1,,]%*%mu0[i-1,]+(1/sigma2[i-1])*t(X)%*%yt) beta1[i,] = beta[i,] <- mvrnorm(1, mu = mu1, Sigma = V1) if(scale==TRUE){ beta1[i,] <- sd(y)*beta[i,] } a1 <- a+(n/2) b1 <- b+0.5*(t(yt-X%*%beta[i,])%*%(yt-X%*%beta[i,])) sigma21[i] = sigma2[i] <- 1/rgamma(1, shape = a1, rate = b1) if(scale==TRUE){ sigma21[i] <- var(y)*sigma2[i] } Vaux <- solve(solve(S0) + V0inv[i-1,,]) mu0[i,] <- mvrnorm(1, mu = Vaux%*%(V0inv[i-1,,]%*%t(t(beta[i,]))+solve(S0)%*%m0), Sigma = Vaux) V0inv[i,,] <- rWishart(1, df = nu0+1, solve(nu0*psi0 + (beta[i,]-mu0[i,])%*%(t(beta[i,]-mu0[i,])))) } return(list(beta1,sigma21)) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/regnth.R
simpson <- function(ROC, set.p) { l.set.p <- length(set.p) integral <- (set.p[l.set.p] - set.p[1])/(l.set.p - 1)/3*(ROC[1] + ROC[l.set.p] + 4*sum(ROC[seq(2,l.set.p - 1, by = 2)]) + 2*sum(ROC[seq(3, l.set.p - 2, by = 2)])) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/simpson.R
summary.AROC <- function(object, ...) { class(object) <- c(class(object),"summary.AROC") object }
/scratch/gouwar.j/cran-all/cranData/AROC/R/summary.AROC.R
waicnp <- function(y, X, res, L, nsim, nburn) { n <- length(y) term <- array(0,c(nsim-nburn,L,n)) logtermsum <- matrix(0, nrow = nsim-nburn, ncol = n) for(k in 1:(nsim-nburn)) { for(l in 1:L) { term[k,l,] <- res[[1]][k+nburn,l]*dnorm(y, mean = X%*%res[[2]][k+nburn,l,], sd = sqrt(res[[3]][k+nburn,l])) } # Need this :( logtermsum[k,] <- apply(term[k,,], 2, function(x) log(sum(x))) } # Very time consuming: see why #logtermsum <- apply(term, c(1,3), function(x) log(sum(x))) lpd <- sum(log(apply(exp(logtermsum),2,mean))) p2 <- sum(apply(logtermsum,2,var)) waic <- -2*(lpd-p2) return(waic) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/waicnp.R
waicp <- function(y, X, res, nsim, nburn) { n <- length(y) logterm <- matrix(0, nrow = nsim-nburn, ncol = n) for(k in 1:(nsim-nburn)){ logterm[k,] <- dnorm(y, mean = X%*%res[[1]][k+nburn,], sd = sqrt(res[[2]][k+nburn]),log = TRUE) } lpd <- sum(log(apply(exp(logterm),2,mean))) p2 <- sum(apply(logterm,2,var)) waic <- -2*(lpd-p2) return(waic) }
/scratch/gouwar.j/cran-all/cranData/AROC/R/waicp.R
#' @keywords internal #' @noRd AQ_metadata_reshape <- function() { '%notin%' <- Negate('%in%') ##### Check online availability for AQ metadata temp <- tempfile() res <- suppressWarnings(try(curl::curl_fetch_disk("https://www.dati.lombardia.it/resource/ib47-atvt.csv", temp), silent = TRUE)) if(res$status_code != 200) { message(paste0("The internet resource for air quality stations metadata is not available at the moment. Status code: ",res$status_code,".\nPlease, try later. If the problem persists, please contact the package maintainer.")) return(invisible(NULL)) } else { Metadata <- RSocrata::read.socrata("https://www.dati.lombardia.it/resource/ib47-atvt.csv") } Metadata <- Metadata %>% dplyr::rename(IDSensor = .data$idsensore, IDStation = .data$idstazione, Pollutant = .data$nometiposensore, NameStation = .data$nomestazione, Altitude = .data$quota, Province = .data$provincia, City = .data$comune, DateStart = .data$datastart, DateStop = .data$datastop, Latitude = .data$lat, Longitude = .data$lng) %>% dplyr::mutate(DateStart = lubridate::make_date(year = lubridate::year(.data$DateStart), month = lubridate::month(.data$DateStart), day = lubridate::day(.data$DateStart)), DateStop = lubridate::make_date(year = lubridate::year(.data$DateStop), month = lubridate::month(.data$DateStop), day = lubridate::day(.data$DateStop))) %>% dplyr::select(.data$IDSensor, .data$IDStation, .data$Pollutant, .data$NameStation, .data$Altitude, .data$Province, .data$City, .data$DateStart, .data$DateStop, .data$Latitude, .data$Longitude) %>% dplyr::mutate(Pollutant = dplyr::recode(.data$Pollutant, "Ammoniaca" = "Ammonia", "Arsenico" = "Arsenic", "Benzene" = "Benzene", "Benzo(a)pirene" = "Benzo_a_pyrene", "Biossido di Azoto" = "NO2", "Monossido di Azoto" = "NO", "Ossidi di Azoto" = "NOx", "Biossido di Zolfo" = "Sulfur_dioxide", "BlackCarbon" = "BlackCarbon", "Monossido di Carbonio" = "CO", "Nikel" = "Nikel", "Ozono" = "Ozone", "Cadmio" = "Cadmium", "PM10 (SM2005)" = "PM10", "PM10" = "PM10", "Particelle sospese PM2.5" = "PM2.5", "Particolato Totale Sospeso" = "PM_tot", "Piombo" = "Lead")) ### Name stations # dplyr::across(c(.data$NameStation,.data$City), ~ stringi::stri_trans_general(str = .x, id="Latin-ASCII")) Metadata <- Metadata %>% dplyr::mutate(dplyr::across(c(.data$NameStation,.data$City), toupper), dplyr::across(c(.data$NameStation,.data$City), ~ gsub("\\-", " ", .x)), dplyr::across(c(.data$NameStation,.data$City), ~ stringr::str_replace_all(.x, c("S\\."="San ", "V\\."="Via ", "V\\.LE" = "Viale", " D\\`" = " D\\' ", " D\\` " = " D\\'", "D\\`" = " D\\'", "D\\'" = " D\\' ", "P\\.ZZA" = "Piazza", "C\\.SO" = "Corso", "LOC\\." = "Localita"))), dplyr::across(c(.data$NameStation,.data$City), tm::removePunctuation), dplyr::across(c(.data$NameStation,.data$City), tm::removeNumbers), dplyr::across(c(.data$NameStation,.data$City), tm::stripWhitespace), dplyr::across(c(.data$NameStation,.data$City), stringr::str_to_title), dplyr::across(c(.data$NameStation,.data$City), ~ stringr::str_replace_all(.x, c(" D " = " D\\'", " Xi " = " XI ", " Xxv " = " XXV ", " Via " = " - Via ", " Corso " = " - Corso ", " Localita " = rlang::as_utf8_character(" - Localit\u00e0 "), " Piazza " = " - Piazza ", " Via Le " = " Viale ", "F Turati" = "Turati", "Via Serafino Delluomo" = "Via Serafino Dell'Uomo", "Via Dellartigianato" = "Via Dell'Artigianato", "Montanaso Lombardo Sp" = "Montanaso Lombardo SP202", "Sp Casa Dellalpino" = " - SP27 Casa Dell'Alpino", "Spino D'Adda Sp" = "Spino D'Adda SP1", "Villasanta - Via A Volta" = "Villasanta - Via Volta", "Vimercate - Via Dellospedale" = "Vimercate - Via Dell'Ospedale", "Ss Sempione" = "SS Sempione")))) ### Add extra information from ARPA offices (uploaded on Paolo Maranzano's GitHub page) # ARPA_zone = ARPA Lombardia zoning of the region: https://www.arpalombardia.it/Pages/Aria/Rete-di-rilevamento/Zonizzazione.aspx # ARPA_stat_type = stations type: https://www.arpalombardia.it/Pages/Aria/Rete-di-rilevamento/Criteri-di-rilevamento/Tipologia-delle-stazioni.aspx?firstlevel=Ieri ##### Check online availability for further AQ metadata from GitHub temp <- tempfile() res <- curl::curl_fetch_disk("https://raw.githubusercontent.com/PaoloMaranzano/ARPALData/main/AQ_stations_ARPA_Lombardia.csv", temp) if(res$status_code != 200) { stop(paste0("The internet resource for further air quality stations metadata (from GitHub) is not available at the moment, try later. If the problem persists, please contact the package maintainer.")) } else { Metadata_ARPA_url <- "https://raw.githubusercontent.com/PaoloMaranzano/ARPALData/main/AQ_stations_ARPA_Lombardia.csv" } Metadata_ARPA <- readr::read_csv(Metadata_ARPA_url) Metadata_ARPA <- Metadata_ARPA %>% dplyr::select(.data$IDStation,.data$ARPA_zone,.data$ARPA_stat_type) %>% dplyr::distinct() Metadata <- dplyr::left_join(Metadata,Metadata_ARPA,by = c("IDStation")) Metadata <- Metadata %>% filter(.data$IDStation %notin% c(518,602,603,612,694,698,700)) # Galliate 518 (NO) --> Fuori regione, chiusa nel 2017 # Melegnano 602 --> Chiusa dal 2017 --> Chiusa dal 2017 # Filago via Fermi Marne (612) --> Chiusa dal 2017 # Castiraga 603 --> Chiusa dal 2017 # Salionze 694 (VR) --> Fuori regione # Ceneselli 698 (RO) --> Fuori regione # Melara 700 (RO) --> Fuori regione, chiusa dal 2018 structure(list(Metadata = Metadata)) attr(Metadata, "class") <- c("ARPALdf","tbl_df","tbl","data.frame") return(Metadata) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/AQ_metadata_reshape.R
AQ_municipal_metadata_reshape <- function() { ##### Check online availability for AQ municipal metadata temp <- tempfile() res <- suppressWarnings(try(curl::curl_fetch_disk("https://www.dati.lombardia.it/resource/5rep-i3mj.csv", temp), silent = TRUE)) if(res$status_code != 200) { message(paste0("The internet resource for municipal air quality stations metadata is not available at the moment. Status code: ",res$status_code,".\nPlease, try later. If the problem persists, please contact the package maintainer.")) return(invisible(NULL)) } else { Metadata <- RSocrata::read.socrata("https://www.dati.lombardia.it/resource/5rep-i3mj.csv") } # if(res$status_code != 200) { # message(paste0("The internet resource for air quality of municipalities metadata is not available at the moment, try later.\nIf the problem persists, please contact the package maintainer.")) # return(invisible(NULL)) # } else { # Metadata <- RSocrata::read.socrata("https://www.dati.lombardia.it/resource/5rep-i3mj.csv") # } Metadata <- Metadata %>% dplyr::rename(IDSensor = .data$idsensore, IDStation = .data$idstazione, Pollutant = .data$nometiposensore, Province = .data$provincia, NameStation = .data$comune, DateStart = .data$datastart, DateStop = .data$datastop) %>% dplyr::mutate(DateStart = lubridate::ymd(.data$DateStart), DateStop = lubridate::ymd(.data$DateStop)) %>% dplyr::select(.data$IDSensor, .data$IDStation, .data$Pollutant, .data$Province, .data$NameStation, .data$DateStart, .data$DateStop) %>% dplyr::mutate(Pollutant = dplyr::recode(.data$Pollutant, "Ammoniaca" = "Ammonia", "Arsenico" = "Arsenic", "Benzene" = "Benzene", "Benzo(a)pirene" = "Benzo_a_pyrene", "Biossido di Azoto" = "NO2", "Monossido di Azoto" = "NO", "Ossidi di Azoto" = "NOx", "Biossido di Zolfo" = "Sulfur_dioxide", "BlackCarbon" = "BlackCarbon", "Monossido di Carbonio" = "CO", "Nikel" = "Nikel", "Ozono" = "Ozone", "Cadmio" = "Cadmium", "PM10 (SM2005)" = "PM10", "PM10" = "PM10", "Particelle sospese PM2.5" = "PM2.5", "Particolato Totale Sospeso" = "PM_tot", "Piombo" = "Lead")) ### Name stations (municipalities) Metadata <- Metadata %>% dplyr::mutate(dplyr::across(c(.data$NameStation), toupper), dplyr::across(c(.data$NameStation), ~ gsub("\\-", " ", .x)), dplyr::across(c(.data$NameStation), ~ stringr::str_replace_all(.x, c("S\\."="San ","s\\."="San ", "V\\."="Via ","v\\."="Via ", " D\\`" = " D\\' ", " D\\` " = " D\\'", "D\\`" = " D\\'", "D\\'" = " D\\' "))), dplyr::across(c(.data$NameStation), tm::removePunctuation), dplyr::across(c(.data$NameStation), tm::removeNumbers), dplyr::across(c(.data$NameStation), tm::stripWhitespace)) %>% dplyr::mutate(NameStation = dplyr::recode(.data$NameStation, "CASASCO DINTELVI" = "CASASCO INTELVI", "CERANO DINTELVI" = "CERANO INTELVI", "SAN GIORGIO BIGARELLO" = "BIGARELLO", "PUEGNAGO DEL GARDA" = "PUEGNAGO SUL GARDA", "FELONICA" = "SERMIDE E FELONICA", "GERRE DE CAPRIOLI" = "GERRE DECAPRIOLI")) %>% dplyr::mutate(dplyr::across(c(.data$NameStation), stringr::str_to_title), dplyr::across(c(.data$NameStation), ~ stringr::str_replace_all(.x, c(" D " = " D\\'", "Sermide E Felonica" = "Sermide e Felonica")))) structure(list(Metadata = Metadata)) attr(Metadata, "class") <- c("ARPALdf","tbl_df","tbl","data.frame") return(Metadata) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/AQ_municipal_metadata_reshape.R
#' ARPALData Package #' #' Contains functions for downloading and managing air quality and weather data from Regione Lombardia open database. #' Data are collected by ARPA Lombardia (Lombardia Environmental Protection Agency), Italy. #' #' @docType package #' #' @author Paolo Maranzano \email{[email protected]} #' #' @name ARPALData NULL
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/ARPALData.R
#' Summary statistics for a data frame of class 'ARPALdf' #' #' @description 'ARPALdf_Summary' returns many descriptive statistics summaring the data contained in a data frame #' of class ARPALdf. Statistics are calculated at overall level (full sample), by station ID and by year. #' For each variable are reported the basic positioning indices (min, max, mean, median, quantile) and #' variability indices (range, standard deviation). Other reported statistics are the Pearson's linear correlation #' by station and some graphical representation of the distribution (kernel density plot, histogram, boxplot). #' In addition, the function returns useful data-quality information, such as gap length statistics (i.e. number of #' missing observations for each variable by station and by year) and outlier detection tools #' (e.g., Hampel filter and boxplot rule) #' #' @param Data Dataset of class 'ARPALdf' containing the data to be summarised. #' @param by_IDStat Logic value (TRUE or FALSE). Use TRUE (default) to compute summary statistics by Station ID. #' @param by_Year Logic value (TRUE or FALSE). Use TRUE (default) to compute summary statistics by year. #' @param gap_length Logic value (TRUE or FALSE). Use TRUE (default) to compute summary statistics for the gap length of each variable. #' @param correlation Logic value (TRUE or FALSE). Use TRUE (default) to compute linear correlation of available variables. #' @param histogram Logic value (TRUE or FALSE). Use TRUE to plot the histogram of each variable. Default is FALSE. #' @param density Logic value (TRUE or FALSE). Use TRUE to plot the kernel density plot of each variable. Default is FALSE. #' @param outlier Logic value (TRUE or FALSE). Use TRUE to analyse extreme values of each variable #' (boxplot and Hampel filter). Default is FALSE. #' @param verbose Logic value (TRUE or FALSE). Toggle warnings and messages. If 'verbose = TRUE' (default) the function #' prints on the screen some messages describing the progress of the tasks. If 'verbose = FALSE' any message about #' the progression is suppressed. #' #' @return A list of data.frames containing summary descriptive statistics for a data frame of class 'ARPALdf'. #' Summary statistics are computed for the overall sample (Descr), by Station ID (Descr_by_IDStat) and by #' year (Descr_by_Year). Available statistics are: number of NAs, % of NAs over the total sample, number of null values, #' number of negative values, minimum, mean, maximum and standard deviation. #' #' @examples #' \donttest{ #' ## Download daily air quality data from all the stations for year 2020 #' if (require("RSocrata")) { #' d <- get_ARPA_Lombardia_AQ_data(ID_station = NULL, Date_begin = "2020-01-01", #' Date_end = "2020-12-31", Frequency = "daily") #' } #' ## Summarising observed data #' sum_stats <- ARPALdf_Summary(Data = d) #' } #' #' @export ARPALdf_Summary <- function(Data, by_IDStat = TRUE, by_Year = TRUE, gap_length = TRUE, correlation = TRUE, histogram = FALSE, density = FALSE, outlier = FALSE, verbose=TRUE) { ### Checks stopifnot("histogram must be TRUE or FALSE" = histogram == FALSE | histogram == TRUE) stopifnot("density must be TRUE or FALSE" = density == FALSE | density == TRUE) stopifnot("by_IDStat must be TRUE or FALSE" = by_IDStat == FALSE | by_IDStat == TRUE) stopifnot("by_Year must be TRUE or FALSE" = by_Year == FALSE | by_Year == TRUE) stopifnot("gap_length must be TRUE or FALSE" = gap_length == FALSE | gap_length == TRUE) stopifnot("Data is not of class 'ARPALdf'" = is_ARPALdf(Data = Data) == TRUE) ### Print message about dimensions n <- dim(Data)[1] m <- length(unique(Data$IDStation)) t <- length(unique(Data$Date)) t1 <- min(Data$Date) t2 <- max(Data$Date) if (verbose==TRUE) { cat("The dataset contains: \n") cat(paste0(" ** ",n," total observations \n")) cat(paste0(" ** ",m," stations/ground sites \n")) cat(paste0(" ** ",t," time stamps from ",t1," to ",t2,"\n")) cat("Inspect this object to obtain summary statistics: \n") cat(" ** on the whole sample (Descr) \n") if (by_IDStat == TRUE) { cat(" ** by Station ID (Descr_by_IDStat) \n") cat("Attention! NA values of the statistics indicate that data don't exist for a specific Station \n")} if (by_Year == TRUE) { cat(" ** by Year (Descr_by_year) \n") cat("Attention! NA values of the statistics indicate that data don't exist for a specific Year \n")} } ### Fix IDStation to integer Data <- Data %>% dplyr::mutate(IDStation = as.integer(.data$IDStation)) ### Overall statistics NA_count <- Data %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ sum(is.na(.x)))) NA_count_perc <- Data %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(sum(is.na(.x))/dim(Data)[1]*100,2))) mean_vals <- Data %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(mean(.x,na.rm=T),2))) sd_vals <- Data %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(sd(.x,na.rm=T),2))) min_vals <- Data %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(min(.x,na.rm=T),2))) max_vals <- Data %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(max(.x,na.rm=T),2))) null_vals <- Data %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ sum(.x == 0, na.rm=T))) neg_vals <- Data %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ sum(.x < 0, na.rm=T))) descriptives <- dplyr::bind_rows(NA_count,NA_count_perc,null_vals,neg_vals,min_vals,mean_vals,max_vals,sd_vals) descriptives <- t(descriptives) descriptives <- data.frame(descriptives) %>% tibble::rownames_to_column() colnames(descriptives) <- c("Var","NA_count","NA_perc","Null_count","Negative_count", "Min","Mean","Max","Std.Dev.") if (is_ARPALdf_AQ(Data = Data) == T) { attr(descriptives, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") } else if (is_ARPALdf_W(Data = Data) == T) { attr(descriptives, "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") } ### Statistics by Station ID if (by_IDStat == TRUE) { if (verbose==TRUE) { cat("Computing summary statistics by Station ID \n") } NA_count_stat <- Data %>% dplyr::group_by(.data$IDStation) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ sum(is.na(.x)))) NA_count_perc_stat <- Data %>% dplyr::group_by(.data$IDStation) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(sum(is.na(.x))/dplyr::n()*100,2))) mean_vals_stat <- Data %>% dplyr::group_by(.data$IDStation) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(mean(.x, na.rm=T),2))) sd_vals_stat <- Data %>% dplyr::group_by(.data$IDStation) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(sd(.x, na.rm=T),2))) min_vals_stat <- Data %>% dplyr::group_by(.data$IDStation) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(min(.x,na.rm=T),2))) max_vals_stat <- Data %>% dplyr::group_by(.data$IDStation) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(max(.x,na.rm=T),2))) null_vals_stat <- Data %>% dplyr::group_by(.data$IDStation) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ sum(.x == 0, na.rm=T))) neg_vals_stat <- Data %>% dplyr::group_by(.data$IDStation) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ sum(.x < 0, na.rm=T))) Descr_by_IDStat <- list(NA_count_stat,NA_count_perc_stat,null_vals_stat,neg_vals_stat, min_vals_stat,mean_vals_stat,max_vals_stat,sd_vals_stat) names(Descr_by_IDStat) <- c("NA_count_by_stat","NA_perc_by_stat","Null_count_by_stat","Negative_count_by_stat", "Min_by_stat","Mean_by_stat","Max_by_stat","StdDev_by_stat") for (i in 1:length(Descr_by_IDStat)) { Descr_by_IDStat[[i]][is.nan_df(Descr_by_IDStat[[i]])] <- NA Descr_by_IDStat[[i]][is.infinite_df(Descr_by_IDStat[[i]])] <- NA if (is_ARPALdf_AQ(Data = Data) == TRUE) { attr(Descr_by_IDStat[[i]], "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") } else if (is_ARPALdf_W(Data = Data) == TRUE) { attr(Descr_by_IDStat[[i]], "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") } } } ### Statistics by Year if (by_Year == TRUE) { if (verbose == TRUE) { cat("Computing summary statistics by Year \n") } NA_count_year <- Data %>% dplyr::group_by(lubridate::year(.data$Date)) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ sum(is.na(.x)))) NA_count_perc_year <- Data %>% dplyr::group_by(lubridate::year(.data$Date)) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(sum(is.na(.x))/dplyr::n()*100,2))) mean_vals_year <- Data %>% dplyr::group_by(lubridate::year(.data$Date)) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(mean(.x, na.rm=T),2))) sd_vals_year <- Data %>% dplyr::group_by(lubridate::year(.data$Date)) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(sd(.x, na.rm=T),2))) min_vals_year <- Data %>% dplyr::group_by(lubridate::year(.data$Date)) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(min(.x,na.rm=T),2))) max_vals_year <- Data %>% dplyr::group_by(lubridate::year(.data$Date)) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ round(max(.x,na.rm=T),2))) null_vals_year <- Data %>% dplyr::group_by(lubridate::year(.data$Date)) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ sum(.x == 0, na.rm=T))) neg_vals_year <- Data %>% dplyr::group_by(lubridate::year(.data$Date)) %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ sum(.x < 0, na.rm=T))) Descr_by_year <- list(NA_count_year,NA_count_perc_year,null_vals_year,neg_vals_year, min_vals_year, mean_vals_year,max_vals_year,sd_vals_year) names(Descr_by_year) <- c("NA_count_by_year","NA_perc_by_year","Null_count_by_year","Negative_count_by_year", "Min_by_year","Mean_by_year","Max_by_year","StdDev_by_year") for (i in 1:length(Descr_by_year)) { Descr_by_year[[i]][is.nan_df(Descr_by_year[[i]])] <- NA Descr_by_year[[i]][is.infinite_df(Descr_by_year[[i]])] <- NA if (is_ARPALdf_AQ(Data = Data) == TRUE) { attr(Descr_by_year[[i]], "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") } else if (is_ARPALdf_W(Data = Data) == TRUE) { attr(Descr_by_year[[i]], "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") } } } ### Gap length by variable if (gap_length == TRUE) { if (verbose == TRUE) { cat("Computing gap lengths statistics by variable \n") } vars <- Data %>% dplyr::select(tidyselect::vars_select_helpers$where(is.double) & tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer)) %>% colnames() gap_length <- vector("list", length = length(vars)) for (v in 1:length(vars)) { var <- vars[v] gl <- Data %>% dplyr::select(.data$Date,.data$IDStation,.data$NameStation,var = var) %>% dplyr::filter(!is.na(.data$var)) %>% dplyr::group_by(.data$IDStation,.data$NameStation) %>% dplyr::summarise(.groups = "keep", gap = lubridate::interval(.data$Date,.data$Date[-1])) %>% dplyr::mutate(gap = lubridate::time_length(.data$gap,unit = attributes(Data)$units)) %>% dplyr::filter(.data$gap > 0) %>% dplyr::summarise(.groups = "keep", min_gap = min(.data$gap), q25_gap = quantile(.data$gap,probs = 0.25), mean_gap = round(mean(.data$gap),3), median_gap = quantile(.data$gap,probs = 0.50), q75_gap = quantile(.data$gap,probs = 0.75), max_gap = max(.data$gap), sd_gap_length = round(sd(.data$gap),3), # Va sistemato per versione CRAN Length1 = sum(.data$gap == 1), Length2 = sum(.data$gap == 2), Length24 = sum(.data$gap == 24)) %>% as.data.frame() %>% dplyr::mutate(dplyr::across(tidyselect::contains("gap"), ~ as_difftime(.x,units = attributes(Data)$units))) colnames(gl) <- c("IDStation","NameStation", paste0(var,"_min_gap"),paste0(var,"_q25_gap"), paste0(var,"_mean_gap"),paste0(var,"_median_gap"), paste0(var,"_q75_gap"),paste0(var,"_max_gap"),paste0(var,"_sd_gap"), paste0(var,"_freq_gap1"),paste0(var,"_freq_gap2"),paste0(var,"_freq_gap24")) if (is_ARPALdf_AQ(Data = Data) == TRUE) { attr(gl, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") } else if (is_ARPALdf_W(Data = Data) == TRUE) { attr(gl, "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") } gap_length[[v]] <- gl } names(gap_length) <- vars } ### Histogram if (histogram == TRUE) { if (verbose == TRUE) { cat("Graphics: plotting histogram of each variable \n") } hist_plot <- Data %>% dplyr::select(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer)) %>% tidyr::pivot_longer(cols = dplyr::everything()) %>% ggplot2::ggplot(aes(.data$value)) + ggplot2::geom_histogram(bins = 30, fill="blue") + ggplot2::facet_wrap(~ .data$name, scales = "free") print(hist_plot) } ### Kernel density plot if (density == TRUE) { if (verbose == TRUE) { cat("Graphics: plotting density of each variable \n") } dens_plot <- Data %>% dplyr::select(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer)) %>% tidyr::pivot_longer(cols = dplyr::everything()) %>% ggplot2::ggplot(aes(.data$value)) + ggplot2::geom_density(fill="blue",alpha = 0.5) + ggplot2::facet_wrap(~ .data$name, scales = "free") print(dens_plot) } ### Outlier analysis if (outlier == TRUE) { if (verbose == TRUE) { cat("Computing outlier statisics for each variable \n") } Data_long <- Data %>% tidyr::pivot_longer(cols = tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), names_to = "Vars", values_to = "Value") boxp <- ggplot(Data_long, aes(y = .data$Value)) + geom_boxplot(aes(fill = .data$Vars)) + coord_flip() + facet_wrap(~ .data$Vars, scales = "free") + labs(title = "Boxplot on the whole sample by variable") + theme(legend.position = "") print(boxp) if (verbose == TRUE) { cat("Computing Hampel filter for each variable \n") cat("Reports the % of observations above +3*MAD and below -3*MAD \n") } out <- Data %>% dplyr::summarise(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric) & !tidyselect::vars_select_helpers$where(is.integer), ~ Hampel_flt(.x))) %>% list() hampel <- matrix(NA,nrow = dim(out[[1]])[2], ncol = 7) for (i in 1:dim(out[[1]])[2]) { hampel[i,2] <- round(as.numeric(out[[1]][[i]]$I_low),2) hampel[i,3] <- round(length(out[[1]][[i]]$outlier_ind_low),2) hampel[i,4] <- round(length(out[[1]][[i]]$outlier_ind_low)/dim(Data)[1]*100,2) hampel[i,5] <- round(as.numeric(out[[1]][[i]]$I_upp),2) hampel[i,6] <- round(length(out[[1]][[i]]$outlier_ind_upp),2) hampel[i,7] <- round(length(out[[1]][[i]]$outlier_ind_upp)/dim(Data)[1]*100,2) } hampel <- data.frame(hampel) hampel[,1] <- names(out[[1]]) colnames(hampel) <- c("Variable","Lower_bound","Obs_below_low_count","Obs_below_low_perc","Upper_bound","Obs_above_upp_count","Obs_above_upp_perc") if (is_ARPALdf_AQ(Data = Data) == T) { attr(hampel, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") } else if (is_ARPALdf_W(Data = Data) == T) { attr(hampel, "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") } } ### Correlation analysis if (correlation == TRUE) { if (verbose == TRUE) { cat("Computing linear correlation analysis for available variable \n") } stz <- unique(Data$IDStation) name_stz <- unique(Data$NameStation) cor_matrix <- vector("list", length = length(stz)) for (s in 1:length(stz)) { cor_matrix[[s]] <- Data %>% dplyr::filter(.data$IDStation == stz[s]) %>% dplyr::select(tidyselect::vars_select_helpers$where(is.double) & tidyselect::vars_select_helpers$where(is.numeric)) %>% stats::cor(use = "pairwise.complete.obs") %>% as.data.frame() %>% tibble::rownames_to_column(var = "Var1") %>% tidyr::pivot_longer(cols = -.data$Var1, names_to = "Var2", values_to = "corr") %>% dplyr::mutate(IDStation = stz[s], NameStation = name_stz[s]) %>% dplyr::filter(.data$Var1 != .data$Var2) %>% tidyr::pivot_wider(names_from = c("Var1","Var2"),values_from = "corr") } cor_matrix <- bind_rows(cor_matrix) if (is_ARPALdf_AQ(Data = Data) == TRUE) { attr(cor_matrix, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") } else if (is_ARPALdf_W(Data = Data) == TRUE) { attr(cor_matrix, "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") } } ### Output list ret_list <- list(Descr = descriptives) if (exists("Descr_by_IDStat", inherits = FALSE)) { ret_list <- c(ret_list, Descr_by_IDStat = list(Descr_by_IDStat)) } if (exists("Descr_by_year", inherits = FALSE)) { ret_list <- c(ret_list, Descr_by_year = list(Descr_by_year)) } if (exists("hampel", inherits = FALSE)) { ret_list <- c(ret_list, Hampel = list(hampel)) } if (exists("gap_length", inherits = FALSE)) { ret_list <- c(ret_list, Gap_length = list(gap_length)) } if (exists("cor_matrix", inherits = FALSE)) { ret_list <- c(ret_list, Cor_matrix = list(cor_matrix)) } ### Output return(ret_list) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/ARPALdf_Summary.R
#' Generate a map of summary statistics for a given ARPALdf data.frame #' #' @description 'ARPALdf_Summary_map' represents on a map (polygon of Lombardy) the data contained in a data frame #' of class 'ARPALdf' containing the values or the descriptive statistics by station. Data can be either #' a ARPALdf of observed data (from 'get_ARPA_Lombardia_xxx' commands) and an ARPALdf obtained as summary descriptive #' statistic (from 'ARPALdf_Summary' command). #' #' @param Data Dataset of class 'ARPALdf' containing the values or the descriptive statistics to plot on the map. #' Data can be either a ARPALdf of observed data (from 'get_ARPA_Lombardia_xxx' commands) and an ARPALdf obtained #' as summary descriptive statistic (from 'ARPALdf_Summary' command). #' @param Title_main Title of the plot. #' @param Title_legend Title fo the legend #' @param Variable Summary variable to represent #' @param prov_line_type Linetype for Lombardy provinces. Default is 1. #' @param prov_line_size Size of the line for Lombardy provinces. Default is 1. #' @param col_scale Vector indicating the minimum, the middle and the average point colors. #' Default is c("green","yellow","red"). #' @param val_midpoint Numeric. Value associated to the middle-point scale color. #' Default is NULL (midpoint is set equal to the average of the variable to represent). #' @param xlab x-axis label. Default is 'Longitude'. #' @param ylab y-axis label. Default is 'Latitude'. #' #' @return A map of selected stations across the Lombardy region #' #' @examples #' \donttest{ #' ## Download daily air quality data from all the stations for year 2020 #' if (require("RSocrata")) { #' d <- get_ARPA_Lombardia_AQ_data(ID_station = NULL, Date_begin = "2020-01-01", #' Date_end = "2020-12-31", Frequency = "daily") #' } #' ## Summarising observed data #' s <- ARPALdf_Summary(Data = d) #' ## Mapping of the average NO2 in 2020 at several stations #' ARPALdf_Summary_map(Data = s$Descr_by_IDStat$Mean_by_stat, #' Title_main = "Mean NO2 by station in 2020", Variable = "NO2") #' } #' #' @export ARPALdf_Summary_map <- function(Data, Title_main, Title_legend = "Variable", Variable, prov_line_type = 1, prov_line_size = 1, col_scale = c("#00FF00","#FFFF00","#FF0000"), val_midpoint = NULL,xlab = "Longitude", ylab = "Latitude") { ### Checks stopifnot("Data is not of class ARPALdf or it does not contains the column IDStation" = is_ARPALdf(Data = Data) == T & sum(colnames(Data) == "IDStation") == 1) if (is_ARPALdf_AQ_mun(Data = Data) == T) { Stats <- get_ARPA_Lombardia_AQ_municipal_registry() NUTS_level <- "LAU" } else { NUTS_level <- "NUTS3" } Lombardia <- get_Lombardia_geospatial(NUTS_level) if (is.null(Lombardia)) { message("The map will not include the ground layer with Lombardy's shapefile. Only points/coordinates will be plot.") } if (is_ARPALdf_AQ(Data = Data) == T) { Stats <- get_ARPA_Lombardia_AQ_registry() } else if (is_ARPALdf_W(Data = Data) == T) { Stats <- get_ARPA_Lombardia_W_registry() } if (is_ARPALdf_AQ_mun(Data = Data) == T) { Data$var <- as.numeric(dplyr::pull(Data[,Variable])) ### Scale color midpoint if (is.null(val_midpoint)==T) { val_midpoint <- mean(Data$var,na.rm=T) } Data <- dplyr::left_join(Data,Lombardia,by=c("NameStation"="City")) Data <- Data %>% sf::st_as_sf() geo_plot <- Data %>% ggplot2::ggplot() + ggplot2::geom_sf(aes(fill = .data$var)) + ggplot2::scale_fill_gradient2(Title_legend, na.value = NA, low = col_scale[1], mid = col_scale[2], midpoint = val_midpoint, high = col_scale[3]) + ggplot2::guides(size = FALSE, scale = "none") + ggplot2::labs(title = Title_main) + ggplot2::theme_bw() + ggplot2::scale_x_continuous(labels = function(x) paste0(x, '\u00B0', "E")) + ggplot2::scale_y_continuous(labels = function(x) paste0(x, '\u00B0', "N")) } else { Stats <- Stats %>% dplyr::filter(.data$IDStation %in% unique(Data$IDStation)) %>% dplyr::distinct(.data$IDStation,.data$Latitude,.data$Longitude) Data <- dplyr::left_join(Data,Stats,by=c("IDStation")) Data$var <- as.numeric(dplyr::pull(Data[,Variable])) ### Scale color midpoint if (is.null(val_midpoint)==T) { val_midpoint <- mean(Data$var,na.rm=T) } Data <- Data %>% sf::st_as_sf(coords = c("Longitude", "Latitude"),crs = 4326) geo_plot <- Lombardia %>% ggplot2::ggplot() + ggplot2::geom_sf(linetype = prov_line_type, size = prov_line_size) + ggplot2::geom_sf(data = Data, aes(size = .data$var, col = .data$var)) + ggplot2::scale_color_gradient2(Title_legend, na.value = NA, low = col_scale[1], mid = col_scale[2], midpoint = val_midpoint, high = col_scale[3]) + ggplot2::guides(size = FALSE, scale = "none") + ggplot2::labs(title = Title_main) + ggplot2::theme_bw() + ggplot2::scale_x_continuous(labels = function(x) paste0(x, '\u00B0', "E")) + ggplot2::scale_y_continuous(labels = function(x) paste0(x, '\u00B0', "N")) } print(geo_plot) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/ARPALdf_Summary_map.R
#' @keywords internal #' @noRd Custom_summarise <- function(grouped_data,var_vec,fns_vec) { '%notin%' <- Negate('%in%') # Wind direction can only be averaged if(sum(var_vec %in% c("Wind_direction","Wind_direction_gust") & fns_vec != "mean") > 0) { stop("Error: on the Wind_direction and Wind_direction_gust is possible to calculate only the average value. Use 'mean' in 'Fns_vec.'", call. = FALSE) } # Wind speed can only be averaged, maximized or minimized if(sum(var_vec %in% c("Wind_speed","Wind_speed_gust") & fns_vec %notin% c("mean","min","max")) > 0) { stop("Error: on the Wind_speed and Wind_speed_gust is possible to calculate only mean, max or min values. Use 'mean' or 'max' or 'min' in 'Fns_vec.'", call. = FALSE) } # Checks if all the selected variables are available for the actual dataset if (all(dplyr::all_of(var_vec) %in% names(grouped_data)) == F) { stop("Error: one ore more measures are not avaiable for the selected stations! Change the values of 'Var_vec'", call. = FALSE) } vv_vec <- (duplicated(var_vec,fromLast = T) | duplicated(var_vec,fromLast = F)) summ_data <- grouped_data %>% dplyr::summarise(dplyr::across(var_vec[vv_vec & var_vec %in% c("Wind_speed","Wind_speed_gust") & fns_vec=="mean"], ~ Wind_averaging(Wind_speed,Wind_direction)$Wind_speed,.names = "{.col}_mean"), dplyr::across(var_vec[!vv_vec & var_vec %in% c("Wind_speed","Wind_speed_gust") & fns_vec=="mean"], ~ Wind_averaging(Wind_speed,Wind_direction)$Wind_speed), dplyr::across(var_vec[vv_vec & var_vec %in% c("Wind_direction","Wind_direction_gust") & fns_vec=="mean"], ~ Wind_averaging(Wind_speed,Wind_direction)$Wind_direction,.names = "{.col}_mean"), dplyr::across(var_vec[!vv_vec & var_vec %in% c("Wind_direction","Wind_direction_gust") & fns_vec=="mean"], ~ Wind_averaging(Wind_speed,Wind_direction)$Wind_direction), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="mean"], ~ mean(.x, na.rm=T),.names = "{.col}_mean"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="mean"], ~ mean(.x, na.rm=T)), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="median"], ~ median(.x, na.rm=T),.names = "{.col}_median"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="median"], ~ median(.x, na.rm=T)), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & grepl('\\bq[0-9]+$',fns_vec)], list(!!!quantilep(.data$.x,fns_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & grepl('\\bq[0-9]+$',fns_vec)])), .names = "{.col}_{.fn}"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & grepl('\\bq[0-9]+$',fns_vec)], list(!!!quantilep(.data$.x,fns_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & grepl('\\bq[0-9]+$',fns_vec)]))), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="sum"], ~ sum(.x, na.rm=T),.names = "{.col}_cum"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="sum"], ~ sum(.x, na.rm=T)), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_direction","Wind_direction_gust") & fns_vec=="min"], ~ min(.x, na.rm=T),.names = "{.col}_min"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_direction","Wind_direction_gust") & fns_vec=="min"], ~ min(.x, na.rm=T)), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_direction","Wind_direction_gust") & fns_vec=="max"], ~ max(.x, na.rm=T),.names = "{.col}_max"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_direction","Wind_direction_gust") & fns_vec=="max"], ~ max(.x, na.rm=T)), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="sd"], ~ sd(.x, na.rm=T),.names = "{.col}_sd"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="sd"], ~ sd(.x, na.rm=T)), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="var"], ~ var(.x, na.rm=T),.names = "{.col}_var"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="var"], ~ var(.x, na.rm=T)), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="vc"], ~ sd(.x, na.rm=T)/mean(.x, na.rm=T),.names = "{.col}_vc"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="vc"], ~ sd(.x, na.rm=T)/mean(.x, na.rm=T)), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="skew"], ~ mom_skew(.x, na.rm=T),.names = "{.col}_skew"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="skew"], ~ mom_skew(.x, na.rm=T)), dplyr::across(var_vec[vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="kurt"], ~ mom_kurt(.x, na.rm=T),.names = "{.col}_kurt"), dplyr::across(var_vec[!vv_vec & var_vec %notin% c("Wind_speed","Wind_direction","Wind_speed_gust","Wind_direction_gust") & fns_vec=="kurt"], ~ mom_kurt(.x, na.rm=T))) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ ifelse(is.infinite(.x), NA, .x))) # Drop non-useful columns concerning the quantiles if (sum(grepl('\\q[0-9]+$',names(summ_data))) > 0) { '%notin%' <- Negate('%in%') names_full <- names(summ_data)[grepl('\\q[0-9]+$',names(summ_data))] to_drop <- names_full[names_full %notin% paste0(var_vec,"_",fns_vec)] summ_data <- summ_data %>% dplyr::select(-to_drop) } return(summ_data) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/Custom_summarise.R
#' @keywords internal #' @noRd Excess_na_converter <- function(grouped_data,verbose=T) { grouped_data2 <- grouped_data %>% dplyr::mutate(dplyr::across(tidyselect::contains(c("NO2","NOx","NO","Ozone","CO")), ~ dplyr::case_when(mean(is.na(.x)) >= 0.25 ~ NA_real_, mean(is.na(.x)) < 0.25 ~ .x))) if (verbose==T) { cat("Before aggregation: converting to NA all the obs. belonging to a group with more than 75% missing values \n") for (j in 1:dim(grouped_data2)[2]) { cat(paste0("Number of obs. converted to NA for ",colnames(grouped_data2)[j],": ", sum(is.na(grouped_data2[,j]) - is.na(grouped_data[,j])),"\n")) } } return(grouped_data2) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/Excess_na_converter.R
#' @keywords internal #' @noRd Hampel_flt <- function(var) { I_low <- max(median(var,na.rm = T) - 3*mad(var,na.rm = T), min(var,na.rm=T)) I_upp <- min(median(var,na.rm = T) + 3*mad(var,na.rm = T), max(var,na.rm=T)) outlier_ind_low <- which(var < I_low) outlier_ind_upp <- which(var > I_upp) return(list(I_low = I_low,I_upp = I_upp, outlier_ind_upp = outlier_ind_upp, outlier_ind_low = outlier_ind_low)) } # The Hampel Filter detects and removes the outliers of the input signal by using the Hampel # identifier. The Hampel identifier is a variation of the three-sigma rule of statistics, # which is robust against outliers. For each sample of the input signal, the block computes the # median of a window composed of the current sample and Length−12 adjacent samples on each side of # the current sample. Len is the window length you specify through the Window length parameter. # The block also estimates the standard deviation of each sample about its window median by using # the median absolute deviation. If a sample differs from the median by more than the threshold # multiplied by the standard deviation, the filter replaces the sample with the median.
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/Hampel_flt.R
#' Aggregate any ARPALdf object (with higher temporal frequency) to hourly, daily, weekly, monthly and yearly #' temporal frequencies. #' #' @description Starting from an ARPALdf object with high frequency (e.g., 10mins or hourly), 'Time_aggregate' #' aggregates the dataset to lower temporal frequencies (e.g., hourly, daily, weekly, monthly and yearly) by station. #' The output is an ARPALdf object with observations having hourly, daily, weekly, monthly or yearly frequency. #' The function can be applied only to ARPALdf objects. #' User can indicate specific variables to aggregate and an aggregation function among #' mean, median, sum (cumulated), min, max, quantiles, and variability metrics for each variable. #' It is possible to specify different aggregation functions on the same variable #' by repeating the name of the variable in 'Var_vec' and specifying the functions in 'Fns_vec'. #' #' @param Dataset ARPALdf dataframe to aggregate. #' @param Frequency Temporal aggregation frequency. It can be "hourly", "daily", "weekly", #' "monthly" or "yearly. #' @param Var_vec Vector of variables to aggregate. If NULL (default) all the variables are averaged, #' expect for 'Temperature' and 'Snow_height' which are summed. #' @param Fns_vec Vector of aggregation functions to apply to the selected variables. Available functions #' are 'mean', 'median', 'min', 'max', 'sum', 'qPP' (PP-th percentile), 'sd', 'var', 'vc' (variability coefficient), #' 'skew' (skewness) and 'kurt' (kurtosis). Attention: for Wind Speed and Wind Speed Gust only mean, min #' and max are available; for Wind Direction and Wind Direction Gust only mean is available. #' @param verbose Logic value (TRUE or FALSE). Toggle warnings and messages. If 'verbose=T' (default) the function #' prints on the screen some messages describing the progress of the tasks. If 'verbose=F' any message about #' the progression is suppressed. #' #' @return A data frame #' #' @examples #' \donttest{ #' ## Download hourly observed concentrations during 2020 for station 501 (Milano - Via Marche). #' if (require("RSocrata")) { #' data <- get_ARPA_Lombardia_AQ_data(ID_station=501, Date_begin = "2020-01-01", #' Date_end = "2020-12-31", Frequency="hourly") #' } #' ## Aggregate all the data to daily frequency #' Time_aggregate(Dataset=data,Frequency="daily",Var_vec=NULL,Fns_vec=NULL) #' ## Aggregate NO2 to weekly maximum concentrations and NOx to weekly minimum concentrations. #' Time_aggregate(Dataset=data,Frequency="weekly",Var_vec=c("NO2","NOx"),Fns_vec=c("max","min")) #' } #' #' @export Time_aggregate <- function(Dataset, Frequency, Var_vec = NULL, Fns_vec = NULL, verbose = T) { ### Checks stopifnot("Data is not of class 'ARPALdf'" = is_ARPALdf(Data = Dataset) == T) if (is.null(Var_vec) & is.null(Fns_vec)) { vv <- c("Ammonia","Arsenic","Benzene","Benzo_a_pyrene","BlackCarbon","Cadmium", "CO","Lead","Nikel","NO","NO2","NOx","Ozone","PM_tot","PM10","PM2.5","Sulfur_dioxide", "Rainfall","Temperature","Relative_humidity","Global_radiation","Water_height", "Snow_height","Wind_speed","Wind_speed_max","Wind_direction","Wind_direction_max", "NO2_mean","NO2_max_day","Ozone_max_8h","Ozone_max_day","PM10_mean","PM2.5_mean") vv <- vv[vv %in% names(Dataset)] fv <- ifelse(vv == "Rainfall" | vv == "Snow_height", "sum", "mean") } else { vv <- Var_vec fv <- Fns_vec } data_aggr <- switch(Frequency, yearly = { # Aggregation to yearly data Dataset %>% data.frame() %>% dplyr::mutate(Y = lubridate::year(.data$Date)) %>% dplyr::group_by(.data$Y, .data$NameStation, .data$IDStation) %>% Custom_summarise(vv, fv) %>% dplyr::ungroup() %>% dplyr::mutate(Date = lubridate::make_datetime(year = .data$Y)) %>% dplyr::select(-c(.data$Y)) %>% dplyr::relocate(.data$Date,.data$IDStation) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ ifelse(is.nan(.), NA, .))) %>% dplyr::mutate(dplyr::across(tidyselect::matches(c("Wind_direction","Wind_direction_max")), ~ round(.x,0)))}, monthly = { # Aggregation to monthly data Dataset %>% data.frame() %>% dplyr::mutate(Y = lubridate::year(.data$Date), M = lubridate::month(.data$Date)) %>% dplyr::group_by(.data$Y, .data$M, .data$NameStation, .data$IDStation) %>% Custom_summarise(vv, fv) %>% dplyr::ungroup() %>% dplyr::mutate(Date = lubridate::make_datetime(year = .data$Y, month = .data$M)) %>% dplyr::select(-c(.data$Y,.data$M)) %>% dplyr::relocate(.data$Date,.data$IDStation) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ ifelse(is.nan(.), NA, .))) %>% dplyr::mutate(dplyr::across(tidyselect::matches(c("Wind_direction","Wind_direction_max")), ~ round(.x,0)))}, weekly = { # Aggregation to weekly data Dataset %>% data.frame() %>% dplyr::mutate(Y = lubridate::year(.data$Date), M = lubridate::month(.data$Date), D = lubridate::day(.data$Date), W = aweek::date2week(ISOdate(year = .data$Y, month = .data$M, day = .data$D),factor = T)) %>% dplyr::group_by(.data$W, .data$NameStation, .data$IDStation) %>% Custom_summarise(vv, fv) %>% dplyr::ungroup() %>% dplyr::mutate(Date = aweek::week2date(.data$W), Date = lubridate::make_datetime(year = lubridate::year(.data$Date), month = lubridate::month(.data$Date), day = lubridate::day(.data$Date))) %>% dplyr::select(-c(.data$W)) %>% dplyr::relocate(.data$Date,.data$IDStation) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ ifelse(is.nan(.), NA, .))) %>% dplyr::mutate(dplyr::across(tidyselect::matches(c("Wind_direction","Wind_direction_max")), ~ round(.x,0)))}, # Aggregation to daily data daily = { Dataset %>% data.frame() %>% dplyr::mutate(Y = lubridate::year(.data$Date), M = lubridate::month(.data$Date), D = lubridate::day(.data$Date)) %>% dplyr::group_by(.data$Y, .data$M, .data$D, .data$NameStation,.data$IDStation) %>% Excess_na_converter(verbose=verbose) %>% Custom_summarise(vv, fv) %>% dplyr::ungroup() %>% dplyr::mutate(Date = lubridate::make_datetime(year = .data$Y, month = .data$M, day = .data$D)) %>% dplyr::select(-c(.data$Y,.data$M,.data$D)) %>% dplyr::relocate(.data$Date,.data$IDStation) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ ifelse(is.nan(.), NA, .))) %>% dplyr::mutate(dplyr::across(tidyselect::matches(c("Wind_direction","Wind_direction_max")), ~ round(.x,0)))}, # Aggregation to hourly data hourly = { Dataset %>% data.frame() %>% dplyr::mutate(Y = lubridate::year(.data$Date), M = lubridate::month(.data$Date), D = lubridate::day(.data$Date), H = lubridate::hour(.data$Date)) %>% dplyr::group_by(.data$Y, .data$M, .data$D, .data$H, .data$NameStation, .data$IDStation) %>% Custom_summarise(vv, fv) %>% dplyr::ungroup() %>% dplyr::mutate(Date = lubridate::make_datetime(year = .data$Y, month = .data$M, day = .data$D, hour = .data$H)) %>% dplyr::select(-c(.data$Y,.data$M,.data$D,.data$H)) %>% dplyr::relocate(.data$Date,.data$IDStation) %>% dplyr::mutate(dplyr::across(tidyselect::vars_select_helpers$where(is.numeric), ~ ifelse(is.nan(.), NA, .))) %>% dplyr::mutate(dplyr::across(tidyselect::matches(c("Wind_direction","Wind_direction_max")), ~ round(.x,0)))}) attr(data_aggr, "frequency") <- Frequency freq_unit <- dplyr::case_when(Frequency == "hourly" ~ "hours", Frequency == "daily" ~ "days", Frequency == "weekly" ~ "weeks", Frequency == "monthly" ~ "months", Frequency == "yearly" ~ "years") attr(data_aggr, "units") <- freq_unit structure(list(data_aggr = data_aggr)) if (is_ARPALdf_AQ(Dataset)==TRUE) { attr(data_aggr, "class") <- c("ARPALdf","ARPALdf_AQ","tbl_df","tbl","data.frame") } if (is_ARPALdf_AQ_mun(Dataset)==TRUE) { attr(data_aggr, "class") <- c("ARPALdf","ARPALdf_AQ_mun","tbl_df","tbl","data.frame") } if (is_ARPALdf_W(Dataset)==TRUE) { attr(data_aggr, "class") <- c("ARPALdf","ARPALdf_W","tbl_df","tbl","data.frame") } return(data_aggr) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/Time_aggregate.R
#' @keywords internal #' @noRd W_metadata_reshape <- function() { '%notin%' <- Negate('%in%') ##### Check online availability for weather metadata temp <- tempfile() res <- suppressWarnings(try(curl::curl_fetch_disk("https://www.dati.lombardia.it/resource/nf78-nj6b.csv", temp), silent = TRUE)) if(res$status_code != 200) { message(paste0("The internet resource for weather stations metadata is not available at the moment. Status code: ",res$status_code,".\nPlease, try later. If the problem persists, please contact the package maintainer.")) return(invisible(NULL)) } else { Metadata <- RSocrata::read.socrata("https://www.dati.lombardia.it/resource/nf78-nj6b.csv") } Metadata <- Metadata %>% dplyr::rename(IDSensor = .data$idsensore, IDStation = .data$idstazione, Measure = .data$tipologia, NameStation = .data$nomestazione, Altitude = .data$quota, Province = .data$provincia, DateStart = .data$datastart, DateStop = .data$datastop, Latitude = .data$lat, Longitude = .data$lng) %>% dplyr::mutate(Altitude = as.numeric(.data$Altitude), DateStart = lubridate::ymd(.data$DateStart), DateStop = lubridate::ymd(.data$DateStop)) %>% dplyr::select(.data$IDSensor, .data$IDStation, .data$Measure, .data$NameStation, .data$Altitude, .data$Province, .data$DateStart, .data$DateStop, .data$Latitude, .data$Longitude) %>% dplyr::mutate(Measure = dplyr::recode(.data$Measure, "Altezza Neve" = "Snow_height", "Direzione Vento" = "Wind_direction", "Livello Idrometrico" = "Water_height", "Precipitazione" = "Rainfall", "Radiazione Globale" = "Global_radiation", "Temperatura" = "Temperature"), Measure = ifelse(.data$Measure == rlang::as_utf8_character("Umidit\u00e0 Relativa"),"Relative_humidity",.data$Measure), Measure = ifelse(.data$Measure == rlang::as_utf8_character("Velocit\u00e0 Vento"),"Wind_speed",.data$Measure)) ### Name stations # dplyr::across(c(.data$NameStation), ~ stringi::stri_trans_general(str = .x, id="Latin-ASCII")), Metadata <- Metadata %>% dplyr::mutate(dplyr::across(c(.data$NameStation), toupper), dplyr::across(c(.data$NameStation), ~ gsub("\\-", " ", .x)), dplyr::across(c(.data$NameStation), ~ stringr::str_replace_all(.x, c("S\\."="San ", "V\\."="Via ", "V\\.LE" = "Viale", " D\\`" = " D\\' ", " D\\` " = " D\\'", "D\\`" = " D\\'", "D\\'" = " D\\' ", "P\\.ZZA" = "Piazza", "C\\.SO" = "Corso", "LOC\\." = "Localita"))), dplyr::across(c(.data$NameStation), tm::removePunctuation), dplyr::across(c(.data$NameStation), tm::removeNumbers), dplyr::across(c(.data$NameStation), tm::stripWhitespace), dplyr::across(c(.data$NameStation), stringr::str_to_title), dplyr::across(c(.data$NameStation), ~ stringr::str_replace_all(.x, c(" D " = " D\\'", " Xi " = " XI ", " Xxv " = " XXV ", " Xxiv " = " XXIV ", " Via " = " - Via ", " Viale " = " - Viale ", " Corso " = " - Corso ", " Localita " = rlang::as_utf8_character(" - Localit\u00e0 "), " Piazza " = " - Piazza ", " Via Le " = " Viale ", "Smr" = "SMR", "Bagolino Sp" = "Bagolino SP669", "Borgoforte Ss" = "Borgoforte SS12", "Capriolo Sp" = "Capriolo SP12", "G Matteotti" = "Matteotti", "Via Novembre" = "Via IV Novembre", "Clusone Sp" = "Clusone SP671", "Cremona Sp" = "Cremona SP10", "Darfo Boario Terme Ss" = "Darfo Boario Terme SS42", "Gavardo Sp" = "Gavardo SP116", "Idro Ss" = "Idro SS237", "Lainate Sp" = "Lainate SP109", "Lomello Ss" = "Lomello SS211", "Orio Litta Ss" = "Orio Litta SS234", "Pavia Ss" = "Pavia SS35", "Pralboino Sp" = "Pralboino SP64", "Rivolta D'Adda Sp" = "Rivolta d'Adda SP4", "Salerano Sul Lambro Sp" = "Salerano sul Lambro SP115", "Sermide E Felonica Sp" = "Sermide e Felonica SP91", "Vigevano Ss" = "Vigevano SS494")))) Metadata <- Metadata %>% filter(.data$IDStation %notin% c(891)) # Isola San Antonio 891 (AL) --> Fuori regione return(Metadata) }
/scratch/gouwar.j/cran-all/cranData/ARPALData/R/W_metadata_reshape.R