content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#library(mvtnorm)
#library(fields)
#library(copula)
#library(MASS)
#' @importFrom mvtnorm rmvnorm
#' @importFrom mvtnorm rmvt
#' @importFrom copula normalCopula
#' @importFrom copula tCopula
#' @importFrom copula dCopula
#' @importFrom stats quantile
#' @importFrom stats ecdf
#' @importFrom stats mahalanobis
#' @importFrom stats optim
#' @importFrom stats pnorm
#' @importFrom stats pt
#' @importFrom stats qgamma
#' @importFrom stats qnorm
#' @importFrom stats qt
#' @importFrom stats rt
#' @importFrom stats sd
#' @export
Data.COST = function(n,n.total,seed1,coord,par.t)
{
set.seed(seed1)
d = nrow(coord)
V.theta = matrix(c(cos(par.t[1]),sin(par.t[1]),-sin(par.t[1]),cos(par.t[1])),2,2)
V.lambda = diag(c(1,1/par.t[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d,d)
for (i in 1:d)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
R.m = matrix(0,2*d,2*d)
R.m[1:d,1:d] = R.m[(d+1):(2*d),(d+1):(2*d)] = exp(-par.t[3]*dd^(2*par.t[4]))
R.m[1:d,(d+1):(2*d)] = exp(-par.t[3]*dd^(2*par.t[4])/(par.t[5]^par.t[4]))/par.t[5]
R.m[(d+1):(2*d),1:d] = R.m[1:d,(d+1):(2*d)]
R.11 = R.m[1:d,1:d]
R.12 = R.m[1:d,(d+1):(2*d)]
R.21 = R.m[(d+1):(2*d),1:d]
R.11.inv = solve(R.11)
B.m = R.21%*%R.11.inv
Omega.m = R.11-R.21%*%R.11.inv%*%R.12
dfs = par.t[6]
if (dfs>=50)
{
X.var = matrix(0,d,n.total)
X.var[,1] = rmvnorm(1,sigma=R.11)
V.var = rmvnorm(n.total,sigma=Omega.m)
V.var = t(V.var)
for (i in 2:n.total)
{
X.var[,i] = B.m%*%X.var[,i-1]+V.var[,i]
}
alpha=2*coord[,1]+coord[,2]^2
beta=coord[,1]+coord[,2]
U.var = pnorm(X.var)
ytrans.var=qgamma(U.var,shape=alpha,scale=beta)
ytrans.var = t(ytrans.var)
Y = ytrans.var[(n.total-n):n.total,]
X.con = B.m%*%X.var[,n.total-1]
m1 = 500
xx = rmvnorm(m1,mean=X.con,sigma=Omega.m)
yy = qgamma(pnorm(t(xx)),shape=alpha,scale=beta)
mean.true = apply(yy,1,mean)
}
if (dfs<50)
{
X.var = matrix(0,d,n.total)
X.var[,1] = as.vector(rmvt(1,sigma=R.11,df=dfs,delta=rep(0,nrow(R.11)),type = "shifted"))
for (i in 2:n.total)
{
aa.t = t(X.var[,i-1])%*%R.11.inv%*%X.var[,i-1]
aa.t = as.numeric(aa.t)
R.11.t = (dfs+aa.t)/(dfs+d)*Omega.m
V.t = rmvt(1, sigma = R.11.t, df = dfs+d, delta = rep(0, nrow(R.11.t)), type = "shifted")
X.var[,i] = as.vector(V.t)+as.vector(B.m%*%X.var[,i-1])
}
alpha=2*coord[,1]+coord[,2]^2
beta=coord[,1]+coord[,2]
U.var = pt(X.var,df=dfs)
ytrans.var=qgamma(U.var,shape=alpha,scale=beta)
ytrans.var = t(ytrans.var)
Y = ytrans.var[(n.total-n):n.total,]
X.con = B.m%*%X.var[,n.total-1]
m1 = 500
aa = t(X.var[,n.total-1])%*%R.11.inv%*%X.var[,n.total-1]
aa = as.numeric(aa)
R.11.t = (dfs+aa)/(dfs+d)*Omega.m
V.t = rmvt(m1, sigma = R.11.t, df = dfs+d, delta = rep(0, nrow(R.11.t)), type = "shifted")
xx = t(V.t)+as.vector(B.m%*%X.var[,n.total-1])
yy = qgamma(pt(xx,df=dfs),shape=alpha,scale=beta)
mean.true = apply(yy,1,mean)
}
dat = list(Y.all=Y,mean.true=mean.true)
return(dat)
}
#' @export
logL.COST.t <- function(par,Y,s.ob){
d = ncol(Y)
coord = s.ob
V.theta = matrix(c(cos(par[1]),sin(par[1]),-sin(par[1]),cos(par[1])),2,2)
V.lambda = diag(c(1,1/par[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d,d)
for (i in 1:d)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
R.m = matrix(0,2*d,2*d)
R.m[1:d,1:d] = R.m[(d+1):(2*d),(d+1):(2*d)] = exp(-par[3]*dd^(2*par[4]))
R.m[1:d,(d+1):(2*d)] = R.m[(d+1):(2*d),1:d] = exp(-par[3]*dd^(2*par[4])/(par[5]^par[4]))/par[5]
params = R.m[lower.tri(R.m)]
myCop <- tCopula(param=params, dim = 2*d, dispstr = "un", df=par[6], df.fixed=FALSE)
R.11 = R.m[1:d,1:d]
params.marginal = R.11[lower.tri(R.11)]
myCop.marginal = tCopula(param=params.marginal,dim=d,dispstr="un",df=par[6],df.fixed=FALSE)
n = nrow(Y)
Gn = matrix(0,n,d)
for (k in 1:d) Gn[,k] = ecdf(Y[,k])(Y[,k])*n/(n+1)
L = 0
aa = log(dCopula(Gn[1,],myCop.marginal))
L = L+aa
for(i in 2:n)
{
c = log(dCopula(c(Gn[i-1,],Gn[i,]),myCop))
c.marginal = log(dCopula(Gn[i-1,],myCop.marginal))
L <- L + c - c.marginal
}
return(-L)
}
#' @export
logL.COST.G <- function(par,Y,s.ob){
d = ncol(Y)
coord = s.ob
V.theta = matrix(c(cos(par[1]),sin(par[1]),-sin(par[1]),cos(par[1])),2,2)
V.lambda = diag(c(1,1/par[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d,d)
for (i in 1:d)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
R.m = matrix(0,2*d,2*d)
R.m[1:d,1:d] = R.m[(d+1):(2*d),(d+1):(2*d)] = exp(-par[3]*dd^(2*par[4]))
R.m[1:d,(d+1):(2*d)] = R.m[(d+1):(2*d),1:d] = exp(-par[3]*dd^(2*par[4])/(par[5]^par[4]))/par[5]
params = R.m[lower.tri(R.m)]
myCop <- normalCopula(param=params, dim = 2*d, dispstr = "un")
R.11 = R.m[1:d,1:d]
params.marginal = R.11[lower.tri(R.11)]
myCop.marginal = normalCopula(param=params.marginal, dim = d, dispstr = "un")
n = nrow(Y)
Gn = matrix(0,n,d)
for (k in 1:d) Gn[,k] = ecdf(Y[,k])(Y[,k])*n/(n+1)
L = 0
aa = log(dCopula(Gn[1,],myCop.marginal))
L = L+aa
for(i in 2:n)
{
c = log(dCopula(c(Gn[i-1,],Gn[i,]),myCop))
c.marginal = log(dCopula(Gn[i-1,],myCop.marginal))
L <- L + c - c.marginal
}
return(-L)
}
#' @export
logL.CF <- function(par,Yk,dfs){
deltak = par
n = length(Yk)
myCop <- tCopula(deltak, df=dfs, df.fixed=TRUE)
Gnk = ecdf(Yk)(Yk)*n/(n+1)
L = 0
for(i in 2:n)
{
L = L+log(dCopula(c(Gnk[i-1],Gnk[i]),myCop))
}
return(-L)
}
#' @export
logL.GP <- function(par,Y,s.ob)
{
coord = s.ob
d = nrow(coord)
n = nrow(Y)
mu.est = apply(Y,2,mean)
sigma.est = as.vector(apply(Y,2,sd))
var.est = sigma.est%*%t(sigma.est)
V.theta = matrix(c(cos(par[1]),sin(par[1]),-sin(par[1]),cos(par[1])),2,2)
V.lambda = diag(c(1,1/par[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d,d)
for (i in 1:d)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
Sigma = matrix(0,2*d,2*d)
Sigma[1:d,1:d] = Sigma[(d+1):(2*d),(d+1):(2*d)] = exp(-par[3]*dd^(2*par[4]))*var.est
Sigma[1:d,(d+1):(2*d)] = Sigma[(d+1):(2*d),1:d] = exp(-par[3]*dd^(2*par[4])/(par[5]^par[4]))/par[5]*var.est
Sigma.11.inv = solve(Sigma[1:d,1:d])
Sigma.12 = Sigma[1:d,(d+1):(2*d)]
Sigma.21 = Sigma[(d+1):(2*d),1:d]
B = Sigma.21%*%Sigma.11.inv
Omega = Sigma[1:d,1:d]-Sigma.21%*%Sigma.11.inv%*%Sigma.12
Omega.inv = solve(Omega)
y.cen = t(Y[2:n,])-mu.est-B%*%(t(Y[1:(n-1),])-mu.est)
L = -(n-1)/2*log(det(Omega))-sum(apply(y.cen,2,function(x){t(x)%*%Omega.inv%*%x/2}))
return(-L)
}
#' @export
Forecasts.COST.G <- function(par,Y,s.ob,seed1,m,isotropic)
{
coord = s.ob
d = nrow(coord)
if (isotropic==TRUE)
{
par = c(0,1,par)
}
V.theta = matrix(c(cos(par[1]),sin(par[1]),-sin(par[1]),cos(par[1])),2,2)
V.lambda = diag(c(1,1/par[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d,d)
for (i in 1:d)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
n = nrow(Y)
R.m = matrix(0,2*d,2*d)
R.m[1:d,1:d] = R.m[(d+1):(2*d),(d+1):(2*d)] = exp(-par[3]*dd^(2*par[4]))
R.m[1:d,(d+1):(2*d)] = R.m[(d+1):(2*d),1:d] = exp(-par[3]*dd^(2*par[4])/(par[5]^par[4]))/par[5]
R.11 = R.m[1:d,1:d]
R.11.inv = solve(R.11)
R.12 = R.m[1:d,(d+1):(2*d)]
R.21 = R.m[(d+1):(2*d),1:d]
B.m = R.21%*%R.11.inv
Gn = matrix(0,n,d)
for (k in 1:d) Gn[,k] = ecdf(Y[,k])(Y[,k])*n/(n+1)
x.n = qnorm(Gn[n,])
qq = c(0.025,0.975,0.5)
y.qq = matrix(NA,d,length(qq))
for (k in 1:d)
{
R.m.k = R.m[c(1:d,d+k),c(1:d,d+k)]
sigma.dk = R.m.k[1:d,d+1]
mean.k = t(sigma.dk)%*%R.11.inv%*%x.n
var.k = max(1-t(sigma.dk)%*%R.11.inv%*%sigma.dk,0.001)
sd.k = var.k^0.5
q.x.qq = pnorm(qnorm(qq,mean.k,sd.k))
y.qq[k,] = as.numeric(quantile(Y[,k],q.x.qq,type=6))
}
y.draw.random = matrix(0,d,m)
mean.fore = B.m%*%x.n
sigma.fore = R.11-B.m%*%R.12
set.seed(seed1)
x.draw.random = t(rmvnorm(m,mean=mean.fore,sigma=sigma.fore))
for (k in 1:d) y.draw.random[k,] = as.numeric(quantile(Y[,k],pmin(pnorm(x.draw.random[k,]),0.999),type=6))
mean.est = apply(y.draw.random,1,mean)
result = list(y.qq=y.qq,mean.est=mean.est,y.draw.random=y.draw.random)
return(result)
}
#' @export
Forecasts.COST.t <- function(par,Y,s.ob,seed1,m,isotropic)
{
coord = s.ob
d = nrow(coord)
if (isotropic==TRUE)
{
par = c(0,1,par)
}
V.theta = matrix(c(cos(par[1]),sin(par[1]),-sin(par[1]),cos(par[1])),2,2)
V.lambda = diag(c(1,1/par[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d,d)
for (i in 1:d)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
n = nrow(Y)
R.m = matrix(0,2*d,2*d)
R.m[1:d,1:d] = R.m[(d+1):(2*d),(d+1):(2*d)] = exp(-par[3]*dd^(2*par[4]))
R.m[1:d,(d+1):(2*d)] = R.m[(d+1):(2*d),1:d] = exp(-par[3]*dd^(2*par[4])/(par[5]^par[4]))/par[5]
R.11 = R.m[1:d,1:d]
R.11.inv = solve(R.11)
R.12 = R.m[1:d,(d+1):(2*d)]
R.21 = R.m[(d+1):(2*d),1:d]
B.m = R.21%*%R.11.inv
Omega.m = R.11-R.21%*%R.11.inv%*%R.12
Gn = matrix(0,n,d)
for (k in 1:d) Gn[,k] = ecdf(Y[,k])(Y[,k])*n/(n+1)
x.n = as.vector(qt(Gn[n,],df=par[6]))
qq = c(0.025,0.975,0.5)
y.qq = matrix(NA,d,length(qq))
for (k in 1:d)
{
R.m.k = R.m[c(1:d,d+k),c(1:d,d+k)]
sigma.dk = R.m.k[1:d,d+1]
mean.k = as.numeric(t(sigma.dk)%*%R.11.inv%*%x.n)
Omega.k = max(1-t(sigma.dk)%*%R.11.inv%*%sigma.dk,0.001)
aa = as.numeric(t(x.n)%*%R.11.inv%*%x.n)
Omega.k.star = (par[6]+aa)*Omega.k/(par[6]+d)
aaa = qt(qq,df=par[6]+d)*Omega.k.star^0.5+mean.k
q.x.qq = pt(aaa,df=par[6])
y.qq[k,] = as.numeric(quantile(Y[,k],q.x.qq,type=6))
}
aa = t(x.n)%*%R.11.inv%*%x.n
aa = as.numeric(aa)
R.11.t = (par[6]+aa)/(par[6]+d)*Omega.m
set.seed(seed1)
V.t = rmvt(m, sigma = R.11.t, df = par[6]+d, delta = rep(0, nrow(R.11.t)), type = "shifted")
x.draw.random = t(V.t)+as.vector(B.m%*%x.n)
y.draw.random = matrix(0,d,m)
for (k in 1:d) y.draw.random[k,] = as.numeric(quantile(Y[,k],pmin(pt(x.draw.random[k,],df=par[6]),0.999),type=6))
mean.est = apply(y.draw.random,1,mean)
result = list(y.qq=y.qq,mean.est=mean.est,y.draw.random=y.draw.random)
return(result)
}
#' @export
Forecasts.CF <- function(par,Y,seed1,m)
{
d = ncol(Y)
n = nrow(Y)
y.draw.random = matrix(0,d,m)
dfs = par[1]
deltas = par[2:(d+1)]
qq = c(0.025,0.975,0.5)
y.qq = matrix(NA,d,length(qq))
for (k in 1:d)
{
deltak = deltas[k]
Yk = Y[,k]
Gnk = ecdf(Yk)(Yk)*n/(n+1)
x.kn = qt(Gnk[n],df=dfs)
mean.k = deltak*x.kn
var.k = max(1-deltak^2,0.001)
var.k.star = (dfs+x.kn^2)*var.k/(dfs+1)
aaa = qt(qq,df=dfs+1)*var.k.star^0.5+mean.k
q.x.qq = pt(aaa,df=dfs)
y.qq[k,] = as.numeric(quantile(Yk,q.x.qq,type=6))
set.seed(seed1)
x.draw.random = rt(m,df=dfs+1)*var.k.star^0.5+mean.k
y.draw.random[k,] = as.numeric(quantile(Yk,pmin(pt(x.draw.random,df=dfs),0.999),type=6))
}
mean.est = apply(y.draw.random,1,mean)
result = list(y.qq=y.qq,mean.est=mean.est,y.draw.random=y.draw.random)
return(result)
}
#' @export
Forecasts.GP <- function(par,Y,s.ob,seed1,m,isotropic)
{
coord = s.ob
d = nrow(coord)
n = nrow(Y)
mu.est = apply(Y,2,mean)
sigma.est = as.vector(apply(Y,2,sd))
var.est = sigma.est%*%t(sigma.est)
if (isotropic==TRUE)
{
par = c(0,1,par)
}
V.theta = matrix(c(cos(par[1]),sin(par[1]),-sin(par[1]),cos(par[1])),2,2)
V.lambda = diag(c(1,1/par[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d,d)
for (i in 1:d)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
Sigma = matrix(0,2*d,2*d)
Sigma[1:d,1:d] = Sigma[(d+1):(2*d),(d+1):(2*d)] = exp(-par[3]*dd^(2*par[4]))*var.est
Sigma[1:d,(d+1):(2*d)] = Sigma[(d+1):(2*d),1:d] = exp(-par[3]*dd^(2*par[4])/(par[5]^par[4]))/par[5]*var.est
Sigma.11.inv = solve(Sigma[1:d,1:d])
Sigma.12 = Sigma[1:d,(d+1):(2*d)]
Sigma.21 = Sigma[(d+1):(2*d),1:d]
B = Sigma.21%*%Sigma.11.inv
Omega = Sigma[1:d,1:d]-Sigma.21%*%Sigma.11.inv%*%Sigma.12
Omega.inv = solve(Omega)
Y = t(t(Y)-mu.est)
y.mean0 = B%*%as.vector(Y[n,])
qq = c(0.025,0.975,0.5)
y.qq = matrix(NA,d,length(qq))
for (k in 1:d)
{
y.qq[k,] = qnorm(qq,mean=y.mean0[k],sd=Omega[k,k]^0.5)+mu.est[k]
}
set.seed(seed1)
y.draw.random = t(rmvnorm(m,mean=y.mean0,sigma=Omega))+mu.est #d*m
mean.est = apply(y.draw.random,1,mean)
result = list(mean.est=mean.est,y.qq=y.qq,y.draw.random=y.draw.random)
return(result)
}
#' @export
rank.multivariate = function(y.test,y.random,seed1)
{
yy = cbind(y.test,y.random)
mm = ncol(yy)
pre.rank = rep(0,mm)
for (j in 1:mm)
{
dif.j = (yy-yy[,j]<=0)*1
pre.rank[j] = sum(apply(dif.j,2,min))
}
s.less = sum(pre.rank<pre.rank[1])
s.eq = sum(pre.rank-pre.rank[1]==0)
set.seed(seed1)
rank.multi = s.less+sample(1:s.eq,1)
return(rank.multi)
}
#' @export
example.forecast <- function(n,n.total,seed1)
{
m = 500 #number of random draws for forecast
s.ob = cbind(rep(c(1,3,5)/6,each=3),rep(c(1,3,5)/6,3))
coord = s.ob
d = nrow(coord) #number of locations, =9
par.t = c(0,1,1,0.5,1.5,100) #the true angel, lambda12, c, gamma, eta and degrees of freedom
#df=100, means data from Gaussian copula
dat = Data.COST(n,n.total,seed1,coord,par.t)
Y.all = dat$Y.all
Y = Y.all[1:n,] #for parameter estimation
Y.d.test = Y.all[n+1,] #test data for forecast
#t copula
#pars.t.COST<-optim(par=c(0,1,2,0.8,2,5),logL.COST.t,Y=Y,s.ob=s.ob,method="L-BFGS-B",
#lower=c(0,0.1,0.2,0.05,1.1,1),upper=c(pi/2,50,10,0.95,30,100))$par
#pars.t.COST <- round(pars.t.COST,4)
result.COST.t <- Forecasts.COST.t(par.t,Y,s.ob,seed1,m,isotropic=FALSE)
low.up.COST.t = result.COST.t$y.qq
COST.t.fore.ECP = (Y.d.test>=low.up.COST.t[,1])*(Y.d.test<=low.up.COST.t[,2])*1
COST.t.fore.ML = low.up.COST.t[,2]-low.up.COST.t[,1]
COST.t.fore.ML <- round(COST.t.fore.ML,4)
COST.t.fore.rank = rank.multivariate(Y.d.test,result.COST.t$y.draw.random,seed1)
#Gaussian copula
#pars.G.COST<-optim(par=c(0,1,2,0.8,2),logL.COST.G,Y=Y,s.ob=s.ob,method="L-BFGS-B",
#lower=c(0,0.1,0.2,0.05,1.1),upper=c(pi/2,50,10,0.95,30))$par
#pars.G.COST <- round(pars.G.COST,4)
result.COST.G <- Forecasts.COST.G(par.t,Y,s.ob,seed1,m,isotropic=FALSE)
low.up.COST.G = result.COST.G$y.qq
COST.G.fore.ECP = (Y.d.test>=low.up.COST.G[,1])*(Y.d.test<=low.up.COST.G[,2])*1
COST.G.fore.ML = low.up.COST.G[,2]-low.up.COST.G[,1]
COST.G.fore.ML <- round(COST.G.fore.ML,4)
COST.G.fore.rank = rank.multivariate(Y.d.test,result.COST.G$y.draw.random,seed1)
#Temporal fit
#dfs = pars.t.COST[6]
#pars.CF = c(dfs,rep(0,d))
#for (k in 1:d)
#{
#pars.CF[k+1]<-optim(par=0.5,logL.CF,Yk=Y[,k],dfs=dfs,method="L-BFGS-B",lower=0.05,upper=0.95)$par
#}
#pars.CF <- round(pars.CF,4)
#result.CF <- Forecasts.CF(pars.CF,Y,seed1,m)
#low.up.CF = result.CF$y.qq
#CF.fore.ECP = (Y.d.test>=low.up.CF[,1])*(Y.d.test<=low.up.CF[,2])*1
#CF.fore.ML = low.up.CF[,2]-low.up.CF[,1]
#CF.fore.ML <- round(CF.fore.ML,4)
#CF.fore.rank = rank.multivariate(Y.d.test,result.CF$y.draw.random,seed1)
#Gaussian process
#pars.GP <- optim(par=c(0,1,2,0.8,2),logL.GP,Y=Y,
#s.ob=s.ob,method="L-BFGS-B",lower=c(0,0.1,0.2,0.05,1.1),upper=c(pi/2,50,10,0.95,30))$par
#pars.GP <- round(pars.GP,4)
result.GP <- Forecasts.GP(par.t[-6],Y,s.ob,seed1,m,isotropic=FALSE)
low.up.GP = result.GP$y.qq
GP.fore.ECP = (Y.d.test>=low.up.GP[,1])*(Y.d.test<=low.up.GP[,2])*1
GP.fore.ML = low.up.GP[,2]-low.up.GP[,1]
GP.fore.ML <- round(GP.fore.ML,4)
GP.fore.rank = rank.multivariate(Y.d.test,result.GP$y.draw.random,seed1)
result.forecast = list(COST.t.fore.ECP=COST.t.fore.ECP,
COST.t.fore.ML=COST.t.fore.ML,COST.t.fore.rank=COST.t.fore.rank,
COST.G.fore.ECP=COST.G.fore.ECP,
COST.G.fore.ML=COST.G.fore.ML,COST.G.fore.rank=COST.G.fore.rank,
GP.fore.ECP=GP.fore.ECP,GP.fore.ML=GP.fore.ML,
GP.fore.rank=GP.fore.rank)
return(result.forecast)
}
##########################################################
##########################################################
##########################################################
##########################################################
##########################################################
##########################################################
#' @export
Predictions.COST.t <- function(par,Y,s.ob,s.new,isotropic)
{
if (isotropic==TRUE)
{
par = c(0,1,par)
}
coord = rbind(s.ob,s.new)
d.all = nrow(coord)
V.theta = matrix(c(cos(par[1]),sin(par[1]),-sin(par[1]),cos(par[1])),2,2)
V.lambda = diag(c(1,1/par[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d.all,d.all)
for (i in 1:d.all)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
dfs = par[6]
d = ncol(Y)
n = nrow(Y)
Gn = matrix(0,n,d)
for (k in 1:d) Gn[,k] = ecdf(Y[,k])(Y[,k])*n/(n+1)
R.m = matrix(0,2*d.all,2*d.all)
R.m[1:d.all,1:d.all] = R.m[(d.all+1):(2*d.all),(d.all+1):(2*d.all)] = exp(-par[3]*dd^(2*par[4]))
R.m[1:d.all,(d.all+1):(2*d.all)] = exp(-par[3]*dd^(2*par[4])/(par[5]^par[4]))/par[5]
R.m[(d.all+1):(2*d.all),1:d.all] = R.m[1:d.all,(d.all+1):(2*d.all)]
h = nrow(s.new)
pre.CP = pre.ML = rep(0,h)
qq = c(0.025,0.975,0.5)
y.qq = x.qq = matrix(0,3,h)
R.2d = R.m[c(1:d,(d.all+1):(d.all+d)),c(1:d,(d.all+1):(d.all+d))]
R.2d.inv = solve(R.2d)
R.2d.new = R.m[c(1:d,(d.all+1):(d.all+d)),c((d.all+d+1):(d.all*2))]
B.m = t(R.2d.new)%*%R.2d.inv
Omega.m = R.m[c((d.all+d+1):(d.all*2)),c((d.all+d+1):(d.all*2))]-B.m%*%R.2d.new
x.c0 = c(qt(Gn[n-1,],df=dfs),qt(Gn[n,],df=dfs))
x.mean0 = as.vector(B.m%*%x.c0)
aa0 = t(x.c0)%*%R.2d.inv%*%x.c0
aa0 = as.numeric(aa0)
R.11.t0 = (dfs+aa0)/(dfs+2*d)*Omega.m
for (k in 1:h)
{
x.qq[,k] = qt(qq,dfs+2*d)*R.11.t0[k,k]^0.5+x.mean0[k]
}
for (k in 1:h)
{
dd.2 = dd[c(1:d,d+k),c(1:d,d+k)]
neighbor = order(dd.2[1:d,d+1])[1:4]
weight.nei = rep(1/4,4)
Gn.new.low = min(Y[,neighbor])
Gn.new.up = max(Y[,neighbor])
Gn.new.gri = seq(Gn.new.low,Gn.new.up,length=n)
Gn.new.all = matrix(0,n,4)
for (j in 1:4) Gn.new.all[,j] = ecdf(Y[,neighbor[j]])(Gn.new.gri)*n/(n+1)
Gn.new = Gn.new.all%*%weight.nei
yy1 = pt(x.qq[,k],df=dfs)
zz1 = pmin(ecdf(Gn.new)(yy1)*n+1,n)
y.qq[,k] = Gn.new.gri[zz1]
}
return(y.qq)
}
#' @export
Predictions.COST.G <- function(par,Y,s.ob,s.new,isotropic)
{
if (isotropic==TRUE)
{
par = c(0,1,par)
}
coord = rbind(s.ob,s.new)
d.all = nrow(coord)
V.theta = matrix(c(cos(par[1]),sin(par[1]),-sin(par[1]),cos(par[1])),2,2)
V.lambda = diag(c(1,1/par[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d.all,d.all)
for (i in 1:d.all)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
dfs = par[6]
d = ncol(Y)
n = nrow(Y)
Gn = matrix(0,n,d)
for (k in 1:d) Gn[,k] = ecdf(Y[,k])(Y[,k])*n/(n+1)
R.m = matrix(0,2*d.all,2*d.all)
R.m[1:d.all,1:d.all] = R.m[(d.all+1):(2*d.all),(d.all+1):(2*d.all)] = exp(-par[3]*dd^(2*par[4]))
R.m[1:d.all,(d.all+1):(2*d.all)] = exp(-par[3]*dd^(2*par[4])/(par[5]^par[4]))/par[5]
R.m[(d.all+1):(2*d.all),1:d.all] = R.m[1:d.all,(d.all+1):(2*d.all)]
h = nrow(s.new)
pre.CP = pre.ML = rep(0,h)
qq = c(0.025,0.975,0.5)
y.qq = x.qq = matrix(0,3,h)
R.2d = R.m[c(1:d,(d.all+1):(d.all+d)),c(1:d,(d.all+1):(d.all+d))]
R.2d.inv = solve(R.2d)
R.2d.new = R.m[c(1:d,(d.all+1):(d.all+d)),c((d.all+d+1):(d.all*2))]
B.m = t(R.2d.new)%*%R.2d.inv
Omega.m = R.m[c((d.all+d+1):(d.all*2)),c((d.all+d+1):(d.all*2))]-B.m%*%R.2d.new
x.c0 = c(qnorm(Gn[n-1,]),qnorm(Gn[n,]))
x.mean0 = as.vector(B.m%*%x.c0)
for (k in 1:h)
{
x.qq[,k] = qnorm(qq)*Omega.m[k,k]^0.5+x.mean0[k]
dd.2 = dd[c(1:d,d+k),c(1:d,d+k)]
neighbor = order(dd.2[1:d,d+1])[1:4]
weight.nei = rep(1/4,4)
Gn.new.low = min(Y[,neighbor])
Gn.new.up = max(Y[,neighbor])
Gn.new.gri = seq(Gn.new.low,Gn.new.up,length=n)
Gn.new.all = matrix(0,n,4)
for (j in 1:4) Gn.new.all[,j] = ecdf(Y[,neighbor[j]])(Gn.new.gri)*n/(n+1)
Gn.new = Gn.new.all%*%weight.nei
yy1 = pnorm(x.qq[,k])
zz1 = pmin(ecdf(Gn.new)(yy1)*n+1,n)
y.qq[,k] = Gn.new.gri[zz1]
}
return(y.qq)
}
#' @export
Predictions.GP <- function(par,Y,s.ob,s.new,isotropic)
{
h = nrow(s.new)
if (isotropic==TRUE)
{
par = c(0,1,par)
}
coord = rbind(s.ob,s.new)
d.all = nrow(coord)
n = nrow(Y)
V.theta = matrix(c(cos(par[1]),sin(par[1]),-sin(par[1]),cos(par[1])),2,2)
V.lambda = diag(c(1,1/par[2]))
V.m = V.theta%*%V.lambda%*%t(V.theta)
dd = matrix(0,d.all,d.all)
for (i in 1:d.all)
{
dd[,i] = sqrt(mahalanobis(x=coord,center=coord[i,],V.m))
}
d = ncol(Y)
sigma.est = as.vector(apply(Y,2,sd))
sigma.new = rep(0,h)
for (k in 1:h)
{
dd.2 = dd[c(1:d,d+k),c(1:d,d+k)]
neighbor = order(dd.2[1:d,d+1])[1:4]
weight.nei = rep(1/4,4)
sigma.new[k] = (t(weight.nei)%*%(sigma.est[neighbor])^2)^0.5
}
sigma.est.all = as.vector(c(sigma.est,sigma.new))
var.est = sigma.est.all%*%t(sigma.est.all)
Sigma = matrix(0,2*d.all,2*d.all)
Sigma[1:d.all,1:d.all] = Sigma[(d.all+1):(2*d.all),(d.all+1):(2*d.all)] = exp(-par[3]*dd^(2*par[4]))*var.est
Sigma[1:d.all,(d.all+1):(2*d.all)] = exp(-par[3]*dd^(2*par[4])/(par[5]^par[4]))/par[5]*var.est
Sigma[(d.all+1):(2*d.all),1:d.all] = Sigma[1:d.all,(d.all+1):(2*d.all)]
Sigma.2d = Sigma[c(1:d,(d.all+1):(d.all+d)),c(1:d,(d.all+1):(d.all+d))]
Sigma.2d.inv = solve(Sigma.2d)
Sigma.2d.new = Sigma[c(1:d,(d.all+1):(d.all+d)),c((d.all+d+1):(d.all*2))]
B.m = t(Sigma.2d.new)%*%Sigma.2d.inv
Omega.m = Sigma[c((d.all+d+1):(d.all*2)),c((d.all+d+1):(d.all*2))]-B.m%*%Sigma.2d.new
qq = c(0.025,0.975,0.5)
mean.est = apply(Y,2,mean)
Y = t(t(Y)-mean.est)
y.mean0 = B.m%*%c(Y[n-1,],Y[n,])
mean.new = rep(0,h)
y.qq = matrix(0,3,h)
for (k in 1:h)
{
dd.2 = dd[c(1:d,d+k),c(1:d,d+k)]
neighbor = order(dd.2[1:d,d+1])[1:4]
weight.nei = rep(1/4,4)
mean.new[k] = weight.nei%*%mean.est[neighbor]
y.qq[,k] = qnorm(qq,y.mean0[k],Omega.m[k,k]^0.5)+mean.new[k]
}
return(y.qq)
}
#########################################
##Example
#########################################
#' @export
example.prediction <- function(n,n.total,seed1)
{
s.ob = cbind(rep(c(1,3,5)/6,each=3),rep(c(1,3,5)/6,3))
s.new = cbind(rep(c(1,2)/3,each=2),rep(c(1,2)/3,2))
coord = rbind(s.ob,s.new)
d.ob = nrow(s.ob) #number of locations, =9
par.t = c(0,1,1,0.5,1.5,100) #the true angel, lambda12, c, gamma, eta and degrees of freedom
#df=100, means data from Gaussian copula
Y.all = Data.COST(n,n.total,seed1,coord,par.t)$Y.all
Y = Y.all[1:n,1:d.ob] #for parameter estimation
Y.newloc.ob = as.vector(Y.all[n,-(1:d.ob)])
#pars.t.COST<-optim(par=c(0.1,1,2,0.8,2,5),logL.COST.t,Y=Y,s.ob=s.ob,method="L-BFGS-B",
#lower=c(0,0.1,0.2,0.05,1.1,1),upper=c(pi/2,20,10,0.95,10,100))$par
low.up.COST.t <- Predictions.COST.t(par.t,Y,s.ob,s.new,isotropic=FALSE)
COST.t.pre.ECP = (Y.newloc.ob>=low.up.COST.t[1,])*(Y.newloc.ob<=low.up.COST.t[2,])*1
COST.t.pre.ML = low.up.COST.t[2,]-low.up.COST.t[1,]
COST.t.pre.med.error = low.up.COST.t[3,]-Y.newloc.ob
#pars.G.COST<-optim(par=c(0.1,1,2,0.8,2),logL.COST.G,Y=Y,s.ob=s.ob,method="L-BFGS-B",
#lower=c(0,0.1,0.2,0.05,1.1),upper=c(pi/2,20,10,0.95,10))$par
low.up.COST.G <- Predictions.COST.G(par.t[-6],Y,s.ob,s.new,isotropic=FALSE)
COST.G.pre.ECP = (Y.newloc.ob>=low.up.COST.G[1,])*(Y.newloc.ob<=low.up.COST.G[2,])*1
COST.G.pre.ML = low.up.COST.G[2,]-low.up.COST.G[1,]
COST.G.pre.med.error = low.up.COST.G[3,]-Y.newloc.ob
#pars.GP <- optim(par=c(0,1,2,0.8,2),logL.GP,Y=Y,
#s.ob=s.ob,method="L-BFGS-B",lower=c(0,0.1,0.2,0.05,1.1),upper=c(pi/2,50,10,0.95,30))$par
low.up.GP <- Predictions.GP(par.t[-6],Y,s.ob,s.new,isotropic=FALSE)
GP.pre.ECP = (Y.newloc.ob>=low.up.GP[1,])*(Y.newloc.ob<=low.up.GP[2,])*1
GP.pre.ML = low.up.GP[2,]-low.up.GP[1,]
GP.pre.med.error = low.up.GP[3,]-Y.newloc.ob
result.prediction = list(COST.t.pre.ECP=COST.t.pre.ECP,
COST.t.pre.ML=COST.t.pre.ML,COST.t.pre.med.error=COST.t.pre.med.error,
COST.G.pre.ECP=COST.G.pre.ECP,
COST.G.pre.ML=COST.G.pre.ML,COST.G.pre.med.error=COST.G.pre.med.error,
GP.pre.ECP=GP.pre.ECP,GP.pre.ML=GP.pre.ML,
GP.pre.med.error=GP.pre.med.error)
return(result.prediction)
}
|
/scratch/gouwar.j/cran-all/cranData/COST/R/COST.R
|
# Generic synthetic binomial logistic regression 24Sept 2010
# Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
# Hilbe, Logistic Regression Models, Chapman & Hall/CRC
require(MASS) # logit_syn.r
logit_syn <- function(nobs=50000, d = 1, xv = c(1, 0.5, -1.5)) {
p <- length(xv) - 1
X <- cbind(1, matrix(rnorm(nobs * p), ncol = p))
xb <- X %*% xv
exb <- 1/(1+exp(-xb))
by <- rbinom(nobs, size = d, prob =exb)
dby <- d - by
out <- data.frame(cbind(cbind(by,dby), X[,-1]))
names(out) <- c("by","dby", paste("x", 1:p, sep=""))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/logit_syn.r
|
# NB1 maximum likelihood function J Hilbe and A Robinson 11Apr 2010, 10Jul 2011
ml.nb1 <- function(formula, data, offset = 0, start = NULL, verbose = FALSE) {
mf <- model.frame(formula, data)
mt <- attr(mf, "terms")
y <- model.response(mf, "numeric")
nb1X <- model.matrix(formula, data = data)
nb1.reg.ml <- function(b.hat, X, y) {
a.hat <- b.hat[1]
xb.hat <- X %*% b.hat[-1] + offset
mu.hat <- exp(xb.hat)
r.hat <- (1/a.hat) * mu.hat
sum(dnbinom(y,
size = r.hat,
mu = mu.hat,
log = TRUE))
}
if (is.null(start))
start <- c(0.5, -1, rep(0, ncol(nb1X) - 1))
fit <- optim(start,
nb1.reg.ml,
X = nb1X,
y = y,
control = list(
fnscale = -1,
maxit = 10000),
hessian = TRUE
)
if (verbose | fit$convergence > 0) print(fit)
beta.hat <- fit$par
se.beta.hat <- sqrt(diag(solve(-fit$hessian)))
results <- data.frame(Estimate = beta.hat,
SE = se.beta.hat,
Z = beta.hat / se.beta.hat,
LCL = beta.hat - 1.96 * se.beta.hat,
UCL = beta.hat + 1.96 * se.beta.hat)
rownames(results) <- c("alpha", colnames(nb1X))
results <- results[c(2:nrow(results), 1),]
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/ml.nb1.r
|
# NB2 maximum likelihood function J Hilbe and A Robinson 11Apr 2010, 10Jul 2011
ml.nb2 <- function(formula, data, offset = 0, start = NULL, verbose = FALSE) {
mf <- model.frame(formula, data)
mt <- attr(mf, "terms")
y <- model.response(mf, "numeric")
nb2X <- model.matrix(formula, data = data)
nb2.reg.ml <- function(b.hat, X, y) {
a.hat <- b.hat[1]
xb.hat <- X %*% b.hat[-1] + offset
mu.hat <- exp(xb.hat)
r.hat <- 1 / a.hat
sum(dnbinom(y,
size = r.hat,
mu = mu.hat,
log = TRUE))
}
if (is.null(start))
start <- c(0.5, -1, rep(0, ncol(nb2X) - 1))
fit <- optim(start,
nb2.reg.ml,
X = nb2X,
y = y,
control = list(
fnscale = -1,
maxit = 10000),
hessian = TRUE
)
if (verbose | fit$convergence > 0) print(fit)
beta.hat <- fit$par
se.beta.hat <- sqrt(diag(solve(-fit$hessian)))
results <- data.frame(Estimate = beta.hat,
SE = se.beta.hat,
Z = beta.hat / se.beta.hat,
LCL = beta.hat - 1.96 * se.beta.hat,
UCL = beta.hat + 1.96 * se.beta.hat)
rownames(results) <- c("alpha", colnames(nb2X))
results <- results[c(2:nrow(results), 1),]
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/ml.nb2.r
|
ml.nbc <- function(formula, data, start = NULL, verbose = FALSE) {
mf <- model.frame(formula, data)
mt <- attr(mf, "terms")
y <- model.response(mf, "numeric")
nbcX <- model.matrix(formula, data = data)
nbc.reg.ml <- function(b.hat, X, y, offset) {
a.hat <- b.hat[1]
xb.hat <- X %*% b.hat[-1]
mu.hat <- 1 / ((exp(-xb.hat)-1)*a.hat)
p.hat <- 1 / (1 + a.hat*mu.hat)
r.hat <- 1 / a.hat
sum(dnbinom(y,
size = r.hat,
prob = p.hat,
log = TRUE))
}
if (is.null(start))
start <- c(0.5, -1, rep(0, ncol(nbcX) - 1))
fit <- optim(start,
nbc.reg.ml,
X = nbcX,
y = y,
control = list(
fnscale = -1,
maxit = 10000),
hessian = TRUE
)
if (verbose | fit$convergence > 0) print(fit)
beta.hat <- fit$par
se.beta.hat <- sqrt(diag(solve(-fit$hessian)))
results <- data.frame(Estimate = beta.hat,
SE = se.beta.hat,
Z = beta.hat / se.beta.hat,
LCL = beta.hat - 1.96 * se.beta.hat,
UCL = beta.hat + 1.96 * se.beta.hat)
rownames(results) <- c("alpha", colnames(nbcX))
results <- results[c(2:nrow(results), 1),]
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/ml.nbc.r
|
# Poisson maximum likelihood function J Hilbe and A Robinson 11Apr 2010, 10Jul 2011
ml.pois <- function(formula, data, offset = 0, start = NULL, verbose = FALSE) {
mf <- model.frame(formula, data)
mt <- attr(mf, "terms")
y <- model.response(mf, "numeric")
poisX <- model.matrix(formula, data = data)
pois.reg.ml <- function(b.hat, X, y) {
xb.hat <- X %*% b.hat + offset
mu.hat <- exp(xb.hat)
sum(dpois(y,
lambda = mu.hat,
log = TRUE))
}
if (is.null(start))
start <- c(-1, rep(0, ncol(poisX) - 1))
fit <- optim(start,
pois.reg.ml,
X = poisX,
y = y,
control = list(
fnscale = -1,
maxit = 10000),
hessian = TRUE
)
if (verbose | fit$convergence > 0) print(fit)
beta.hat <- fit$par
se.beta.hat <- sqrt(diag(solve(-fit$hessian)))
results <- data.frame(Estimate = beta.hat,
SE = se.beta.hat,
Z = beta.hat / se.beta.hat,
LCL = beta.hat - 1.96 * se.beta.hat,
UCL = beta.hat + 1.96 * se.beta.hat)
rownames(results) <- colnames(poisX)
results <- results[c(2:nrow(results), 1),]
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/ml.pois.r
|
# AIC and BIC statistics following glm, glm.nb, nbinomial
# Joseph Hilbe, Modeling Count Data, Cambridge Univ Press
# version 2 13 Aug, 2014. Amend xvars line
modelfit <- function(x) {
obs <- x$df.null + 1
aic <- x$aic
xvars <- x$df.null - x$df.residual + 1
rdof <- x$df.residual
aic_n <- aic/obs
ll <- xvars - aic/2
bic_r <- x$deviance - (rdof * log(obs))
bic_l <- -2*ll + xvars * log(obs)
bic_qh <- -2*(ll - xvars * log(xvars))/obs
return(list("AIC" = aic, "AICn" = aic_n, "BIC" = bic_l, "BICqh" = bic_qh))
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/modelfit.r
|
# Frequency count and percentage table
# Table 9.40 : Hilbe, JM (2011) Negative Binomial Regression, 2nd ed, Cambridge Univ Press
myTable <- function(x) {
myDF <- data.frame( table(x) )
myDF$Prop <- prop.table( myDF$Freq )
myDF$CumProp <- cumsum( myDF$Prop )
myDF
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/myTable.r
|
# Generic synthetic NB1 model
# In Hilbe, J.M., Negative Binomial Regression, 2nd ed, Cambridge Univ Press
require(MASS) # nb1_syn.r
nb1_syn <- function(nobs = 50000,
delta = 1,
xv = c(1, 0.75, -1.25)) {
p <- length(xv) - 1
X <- cbind(1, matrix(rnorm(nobs * p), ncol = p))
xb <- X %*% xv
d <- delta
exb <- exp(xb)
idelta <- (1/delta)*exb
xg <- rgamma(n = nobs, shape = idelta, rate = idelta)
xbg <- exb*xg
nb1y <- rpois(nobs, xbg)
out <- data.frame(cbind(nb1y, X[,-1]))
names(out) <- c("nb1y", paste("x", 1:p, sep=""))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/nb1_syn.r
|
# library(COUNT)
# data(medpar)
# mdpar <- glm.nb(los ~ hmo+white+type2+type3, data=medpar, y=TRUE, model=TRUE)
nb2.obs.pred <- function(len, model) {
mu <- fitted(model)
trun.y <- model$y[model$y < len+1]
obs <- as.data.frame(table(trun.y) / length(model$y) * 100)
names(obs) <- c("Count","propObsv")
alpha <- 1/model$theta
amu <- alpha*mu
pred <- data.frame(Count = 0:len,
propPred = sapply(0:len, function(i)
mean(exp(i*log(amu/(1+amu)) - (1/alpha) * log(1+amu) +
log(gamma(i + 1/alpha) ) - log(gamma(i + 1) ) -
log(gamma(1 / alpha))))) * 100)
out <- merge(pred, obs, all=TRUE)
out$propObsv[is.na(out$propObsv)] <- 0
out$Diff <- with(out, propObsv - propPred)
return(out[,c(1,3,2,4)])
}
# nb2.obs.pred(len=25, model=mdpar)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/nb2.obs.pred.r
|
# Generic synthetic negative binomial (NB2) data and model
# Hilbe, J.M Negative Binomial Regression, 2nd ed, Cambridge University Press
nb2_syn <- function(nobs = 50000, off = 0,
alpha = 1,
xv = c(1, 0.75, -1.5)) {
p <- length(xv) - 1
X <- cbind(1, matrix(rnorm(nobs * p), ncol = p))
xb <- X %*% xv
a <- alpha
ia <- 1/a
exb <- exp(xb + off)
xg <- rgamma(n = nobs, shape = a, rate = a)
xbg <-exb*xg
nby <- rpois(nobs, xbg)
out <- data.frame(cbind(nby, X[,-1]))
names(out) <- c("nby", paste("x", 1:p, sep=""))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/nb2_syn.r
|
# Generic synthetic NB-C model
# Table 10.9, Hilbe, J.M., Negative Binomial Regression, 2nd ed.
# Cambridge Univ Press nbc_syn.r
require(MASS)
nbc_syn <- function(nobs = 50000,
alpha = 1.15,
xv = c(-1.5, -1.25, -.1)) {
q <- length(xv) - 1
X <- cbind(1, matrix(runif(nobs * q), ncol = q))
xb <- X %*% xv
a <- alpha
mu <- 1/((exp(-xb)-1)*a)
p <- 1/(1+a*mu)
r <- 1/a
nbcy <- rnbinom(nobs, size=r, prob = p)
out <- data.frame(cbind(nbcy, X[,-1]))
names(out) <- c("nbcy", paste("x", 1:q, sep=""))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/nbc_syn.r
|
# Create a table of observed vs predicted Poisson counts, and difference
# following glm() : see poi.obs.pred.Rd for usage
# See Hilbe, J.M (2011), Negative Binomial Regression, 2nd ed, Cambridge Univ Press
poi.obs.pred <- function(len, model) {
mu <- fitted(model)
trun.y <- model$y[model$y < len+1]
obs <- as.data.frame(table(trun.y) / length(model$y) * 100)
names(obs) <- c("Count","propObsv")
pred <- data.frame(Count = 0:len,
propPred = sapply(0:len, function(i)
mean(exp(-mu)*(mu^i)/factorial(i))) * 100)
out <- merge(pred, obs, all=TRUE)
out$propObsv[is.na(out$propObsv)] <- 0
out$Diff <- with(out, propObsv - propPred[1:(len+1)])
return(out[,c(1,3,2,4)])
}
#library(COUNT)
#data(medpar)
#
#mdpar <- glm(los ~ hmo+white+type2+type3, family=poisson, data=medpar,
# y=TRUE, model=TRUE)
#poi.obs.pred(len=25, model=mdpar)
#
#data(affairs)
#myglm <- glm(naffairs ~ kids, family=poisson, data=affairs, y=TRUE, model=TRUE)
#poi.obs.pred(len=8, model=myglm)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/poi.obs.pred.r
|
# Generic Synthetic Rate Parameterized Poisson
# Table 6.20, Hilbe, J.M. Negative Binomial Regression. 2nd ed, Cambridge Univ Press
require(MASS)
poisson_syn <- function(nobs = 50000, off = 0, xv = c(1, -.5, 1)) {
p <- length(xv) - 1
X <- cbind(1, matrix(rnorm(nobs * p), ncol = p))
xb <- X %*% xv
exb <- exp(xb + off)
py <- rpois(nobs, exb)
out <- data.frame(cbind(py, X[,-1]))
names(out) <- c("py", paste("x", 1:p, sep=""))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/poisson_syn.r
|
# Generic synthetic probit regression 24Sept 2010
# Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
# Hilbe, Logistic Regression Models, Chapman & Hall/CRC
require(MASS) # probit_syn.r
probit_syn <- function(nobs=50000, d = 1, xv = c(1, 0.5, -1.5)) {
p <- length(xv) - 1
X <- cbind(1, matrix(rnorm(nobs * p), ncol = p))
xb <- X %*% xv
pxb <- pnorm(xb)
py <- rbinom(nobs, size = d, prob =pxb)
dpy <- d - py
out <- data.frame(cbind(cbind(py,dpy), X[,-1]))
names(out) <- c("py","dpy", paste("x", 1:p, sep=""))
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/R/probit_syn.r
|
# hilbe.NBR2.F10.1.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# Figures 10.1 default NB2/Geometric: means at 0.5, 1, 2, 5, 10 and alpha=1
#
obs <- 11
mu <- c(0.5,1,2,5,10)
y <- 0:10
alpha <- 1
amu <- mu*alpha
layout(1)
for (i in 1:length(mu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha)*log(1+amu[i])
+ log( gamma(y +1/alpha) )
- log( gamma(y+1) )
- log( gamma(1/alpha) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F10.1.r
|
# hilbe.NBR2.F11.1.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# FIGURE 11.1
load("c://source/mdvis.RData")
histogram(mdvis$numvisit)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F11.1.r
|
# hilbe.NBR2.F14.1.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# FIGURE 14.1
library(gamlss.mx)
load("c://source/medpar.RData")
rinb <- gamlssNP(los~ hmo +white+ type2 +type3, random=~1|provnum,
data=medpar, family=NBI, mixture="gq", K=20)
summary(rinb)
summary(rinb$sigma.fv)
m<-rinb$mu.fv # fitted values for extended model
s<-rinb$sigma.fv # sigma
presid <- (medpar$los-m)/sqrt(m+s*m*m)
summary(presid)
hist(presid)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F14.1.r
|
# hilbe_NBR2_F3_1.r
# Table 3.1
# Script for creating Poisson and negative binomial (NB2) distributions
# on single graph with user specified number of observations (obs),
# mean (mu), counts (0:specified), and NB2 ancillary parameter value (alpha).
# Additional enhanced graphic can be displayed; -ggplot2- must be installed.
# From Hilbe, Negative Binomial Regression, 2nd ed, Cambridge Univ. Press
# Table 3.1; Figure 3.1
#
# ------------------------------------------
# User specified values; defaults displayed
obs <- 11
mu <- 2
y <- 0:10
alpha <- 1.5
# ------------------------------------------
amu <- mu*alpha
ynb2 = exp(
y*log(amu/(1+amu))
- (1/alpha)*log(1+amu)
+ log( gamma(y +1/alpha) )
- log( gamma(y+1) )
- log( gamma(1/alpha) )
)
xbar = "mu"
a = "alpha"
plot( y, ynb2, col="red", pch=5,
main="Poisson vs Negative Binomial PDFs")
lines( y, ynb2, col="red")
points(y, yp2, col="blue", pch=2)
lines( y, yp2, col="blue")
legend(4.3,.40,
c("Negative Binomial: mean=2, a=1.5",
"Poisson: mean=2"),
col=( c("red","blue") ),
pch=( c(5,2) ),
lty=1)
#========= FOR NICER GRAPHIC =======================
zt <- 0:10 #zt is Zero to Ten
x <- c(zt,zt) #two zt's stacked for use with ggplot2
newY <- c(yp2, ynb2) #Now stacking these two vars
Distribution <- gl(n=2,k=11,length=22,
label=c("Poisson","Negative Binomial")
)
NBPlines <- data.frame(x,newY,Distribution)
library("ggplot2")
ggplot( NBPlines, aes(x,newY,shape=Distribution,col=Distribution ) ) +
geom_line() + geom_point()
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F3.1.r
|
# hilbe.NBR2.F6.1.r
# Table 6.4 plus added code
# Synthetic Poisson model with a user defined values;
# graphic of predicted mean values
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Table 6.4; Figure 6.1
#
nobs <- 50000
x1 <- qnorm(runif(nobs))
x2 <- qnorm(runif(nobs))
py <-rpois(nobs, exp(2 + .75*x1 -1.25*x2))
mpy <- mean(py)
ypoi <- (exp(-mpy)*mpy^py)/gamma(py+1)
plot(ypoi~ py, xlim=c(0,50), main="Synthetic Poisson Model: Mean=21")
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F6.1.r
|
# hilbe.NBR2.F6.2.r
# Table 6.15 + added code
# Table of Poisson observed vs predicted mean counts and diffrence;
# graphic of observed vs predicted counts
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Table 6.15; Figure 6.2
# User to amend default dataset, response, and number of counts
#
rm(list=ls())
load("c://source/medpar.RData")
mdpar <- glm(los ~ hmo+white+type2+type3, family=poisson, data=medpar)
mu <- fitted(mdpar)
avgp <- sapply(0:25, function(i) mean(exp(-mu)*(mu^i)/factorial(i)))
propObsv <- with(subset(medpar, los < 26), table(los) / nrow(medpar))
Diff <- c(0,propObsv)*100 - avgp[1:25]*100
data.frame(LOS=0:25, ObsProp=c(0,propObsv)*100, avgp*100, Diff)
plot(0:25, avgp, type="b", xlim=c(0,25),
main = "Observed vs Predicted Days",
xlab = "Days in Hospital", ylab = "Probability of LOS")
lines(0:25, c(0,propObsv), type = "b", pch = 2)
legend("topright", legend = c("Predicted Days","Observed Days"),
lty = c(1,1), pch = c(1,2))
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F6.2.r
|
# hilbe.NBR2.F6.2alt.r
# Table 6.15 + added code
# Table of Poisson observed vs predicted mean counts and diffrence;
# graphic of observed vs predicted counts
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Table 6.15; Figure 6.2 Alternative
# User to amend default dataset, response, and number of counts
#
load("c://source/medpar.RData")
mdpar <- glm(los ~ hmo+white+type2+type3,family=poisson, data=medpar)
mu <- fitted.values(mdpar)
p <- NULL
avgp <- NULL
for (i in 0:25) {
p[[i+1]] <- exp(-mu)*(mu^i)/factorial(i)
avgp[i+1] <- mean(p[[i+1]])
}
nCases <- dim(medpar)
n<- NULL
propObs<- NULL
probFit<- NULL
yFitMean<- NULL
for (i in 0:25) { #possible values for LOS
bLos<- medpar$los==i #selector for los=i
n[i+1]<- sum(bLos) #number for los=i
propObs[i+1]<- n[i+1]/nCases[1] #observed proportion for LOS=i
}
Diff <- propObs*100 - avgp*100
data.frame(LOS=0:25, ObsProp=propObs*100, avgp*100, Diff)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F6.2alt.r
|
# hilbe.NBR2.F6.3.r
# Table 6.16
# Graphic of Poisson distributions with user specified mean values
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Table 6.16; Figure 6.3
#
m<- c(0.5,1,3,5,7,9) #Poisson means
y<- 0:19 #Observed counts
layout(1)
for (i in 1:length(m)) {
p<- dpois(y, m[i]) #poisson pmf
if (i==1) {
plot(y, p, col=i, type='l', lty=i)
} else {
lines(y, p, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F6.3.r
|
# hilbe.NBR2.F6.4.r
# Conditional effects Poisson plot with user specified mean values
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Table 6.17; Figure 6.4
# User to amend data, model variables, and effects for graphing
load("c://source/rwm5yr.RData")
eppoi <- glm(docvis ~ outwork+age+female+married+edlevel2+edlevel3+ edlevel4, family=poisson, data=rwm5yr)
rest <- eppoi$coef[4]*mean(rwm5yr$female) + eppoi$coef[5]*mean(rwm5yr$married) +
eppoi$coef[6]*mean(rwm5yr$edlevel2) + eppoi$coef[7]*mean(rwm5yr$edlevel3) +
eppoi$coef[8]*mean(rwm5yr$edlevel4)
out0 <- eppoi$coef[1] + eppoi$coef[3]*rwm5yr$age + rest
out1 <- eppoi$coef[1] + eppoi$coef[2]*1 + eppoi$coef[3]*rwm5yr$age + rest
eout1 <- exp(out1)
eout0 <- exp(out0)
matplot(cbind(rwm5yr$age, rwm5yr$age), cbind(eout0, eout1),
pch=1:2, col=1:2, xlab='Count', ylab='Frequency?')
matplot(cbind(rwm5yr$age, rwm5yr$age), cbind(eout0, eout1), type='l',
lty=1:2, col=1:2, xlab='Doctor visits', ylab='Frequency')
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F6.4.r
|
# hilbe.NBR2.F8.1.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# Figure 8.2-6 default: means at .5, 1, 2, 5, 10 and alpha=0
#
m<- c(0.5,1,2,5,10) #mean values
y<- 0:10 #Observed counts
layout(1)
for (i in 1:length(m)) {
p<- dpois(y, m[i]) #poisson pmf
if (i==1) {
plot(y, p, col=i, type='l', lty=i)
} else {
lines(y, p, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.1.r
|
# hilbe.NBR2.F8.10.r
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figures 8.10 default: alpha at 0, .33, .67, 1, 1.5, 3 and mean=5
#
obs <- 11
alpha <- c(.009, .33, .67, 1, 1.5, 3)
y <- 0:10
mu <- 5
amu <- mu*alpha
layout(1)
for (i in 1:length(amu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha[i])*log(1+amu[i])
+ log( gamma(y +1/alpha[i]) )
- log( gamma(y+1) )
- log( gamma(1/alpha[i]) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.10.r
|
# hilbe.NBR2.F8.11.r
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figures 8.11 default: alpha at 0, .33, .67, 1, 1.5, 3 and mean=10
#
obs <- 11
alpha <- c(.009, .33, .67, 1, 1.5, 3)
y <- 0:10
mu <- 10
amu <- mu*alpha
layout(1)
for (i in 1:length(amu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha[i])*log(1+amu[i])
+ log( gamma(y +1/alpha[i]) )
- log( gamma(y+1) )
- log( gamma(1/alpha[i]) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.11.r
|
# hilbe.NBR2.F8.12.r
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figures 8.12 default: alpha at .009, .1, .3, .5 and mean=10
#
obs <- 11
alpha <- c(.009, .1, .3, .5)
y <- 0:30
mu <- 10
amu <- mu*alpha
layout(1)
for (i in 1:length(amu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha[i])*log(1+amu[i])
+ log( gamma(y +1/alpha[i]) )
- log( gamma(y+1) )
- log( gamma(1/alpha[i]) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.12.r
|
# hilbe.NBR2.F8.13.r
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figures 8.13 default: alpha at 0.6, .8, 1, 1.2 and mean=10
#
obs <- 11
alpha <- c(.6, .8, 1, 1.2)
y <- 0:30
mu <- 10
amu <- mu*alpha
layout(1)
for (i in 1:length(amu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha[i])*log(1+amu[i])
+ log( gamma(y +1/alpha[i]) )
- log( gamma(y+1) )
- log( gamma(1/alpha[i]) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.13.r
|
# hilbe.NBR2.F8.2.r
# Negative binomial regression distributions with
# user specified series of mean values for a specified alpha value
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figure 8.2-6 default: means at .5, 1, 2, 5, 10 and alpha=.33
#
obs <- 11
mu <- c(0.5,1,2,5,10)
y <- 0:10
alpha <- .33
amu <- mu*alpha
layout(1)
for (i in 1:length(mu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha)*log(1+amu[i])
+ log( gamma(y +1/alpha) )
- log( gamma(y+1) )
- log( gamma(1/alpha) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.2.r
|
# hilbe.NBR2.F8.3.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# Figures 8.3 default: means at 0.5, 1, 2, 5, 10 and alpha=.67
#
obs <- 11
mu <- c(0.5,1,2,5,10)
y <- 0:10
alpha <- .67
amu <- mu*alpha
layout(1)
for (i in 1:length(mu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha)*log(1+amu[i])
+ log( gamma(y +1/alpha) )
- log( gamma(y+1) )
- log( gamma(1/alpha) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.3.r
|
# hilbe.NBR2.F8.4.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# Figures 8.4 default: means at 0.5, 1, 2, 5, 10 and alpha=1
#
obs <- 11
mu <- c(0.5,1,2,5,10)
y <- 0:10
alpha <- 1
amu <- mu*alpha
layout(1)
for (i in 1:length(mu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha)*log(1+amu[i])
+ log( gamma(y +1/alpha) )
- log( gamma(y+1) )
- log( gamma(1/alpha) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.4.r
|
# hilbe.NBR2.F8.5.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# Figures 8.5 default: means at 0.5, 1, 2, 5, 10 and alpha=1.5
#
obs <- 11
mu <- c(0.5,1,2,5,10)
y <- 0:10
alpha <- 1.5
amu <- mu*alpha
layout(1)
for (i in 1:length(mu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha)*log(1+amu[i])
+ log( gamma(y +1/alpha) )
- log( gamma(y+1) )
- log( gamma(1/alpha) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.5.r
|
# hilbe.NBR2.F8.6.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# Figures 8.6 default: means at 0.5, 1, 2, 5, 10 and alpha=3
#
obs <- 11
mu <- c(0.5,1,2,5,10)
y <- 0:10
alpha <- 3
amu <- mu*alpha
layout(1)
for (i in 1:length(mu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha)*log(1+amu[i])
+ log( gamma(y +1/alpha) )
- log( gamma(y+1) )
- log( gamma(1/alpha) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.6.r
|
# hilbe.NBR2.F8.7.r
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figures 8.7 default: alpha at 0, .33, .67, 1, 1.5, 3 and mean=.5
#
obs <- 11
alpha <- c(.009, .33, .67, 1, 1.5, 3)
y <- 0:10
mu <- .5
amu <- mu*alpha
layout(1)
for (i in 1:length(amu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha[i])*log(1+amu[i])
+ log( gamma(y +1/alpha[i]) )
- log( gamma(y+1) )
- log( gamma(1/alpha[i]) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.7.r
|
# hilbe.NBR2.F8.8.r
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figures 8.8 default: alpha at 0, .33, .67, 1, 1.5, 3 and mean=1
#
obs <- 11
alpha <- c(.009, .33, .67, 1, 1.5, 3)
y <- 0:10
mu <- 1
amu <- mu*alpha
layout(1)
for (i in 1:length(amu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha[i])*log(1+amu[i])
+ log( gamma(y +1/alpha[i]) )
- log( gamma(y+1) )
- log( gamma(1/alpha[i]) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.8.r
|
# hilbe.NBR2.F8.9.r
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figures 8.9 default: alpha at 0, .33, .67, 1, 1.5, 3 and mean=2
#
obs <- 11
alpha <- c(.009, .33, .67, 1, 1.5, 3)
y <- 0:10
mu <- 2
amu <- mu*alpha
layout(1)
for (i in 1:length(amu)) {
ynb2 = exp(
y*log(amu[i]/(1+amu[i]))
- (1/alpha[i])*log(1+amu[i])
+ log( gamma(y +1/alpha[i]) )
- log( gamma(y+1) )
- log( gamma(1/alpha[i]) )
)
if (i==1) {
plot(y, ynb2, col=i, type='l', lty=i)
} else {
lines(y, ynb2, col=i, lty=i)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.9.r
|
# hilbe.NBR2.F9.1.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figure 9.1 user may amend as required for own model
#
load("c://source/affairs.RData")
affmodel <- glm(naffairs~ kids + avgmarr + hapavg + vryhap + notrel + slghtrel + smerel + vryrel + yrsmarr3 + yrsmarr4 + yrsmarr5 + yrsmarr6, family=poisson, data=affairs)
mu <- fitted(affmodel)
p <- NULL
avgp <- NULL
for (i in 0:15) {
p[[i+1]] <- exp(-mu)*(mu^i)/factorial(i)
avgp[i+1] <- mean(p[[i+1]])
}
nCases <- dim(affairs)
n<- NULL
propObs<- NULL
probFit<- NULL
yFitMean<- NULL
for (i in 0:15) { #possible values for naffairs
bLos<- affairs$naffairs==i #selector for naffairs=i
n[i+1]<- sum(bLos) #number for naffairs=i
propObs[i+1]<- n[i+1]/nCases[1] #observed proportion for naffairs=i
}
Diff <- propObs*100 - avgp*100
data.frame(LOS=0:15, ObsProp=propObs*100, avgp*100, Diff)
plot(0:15, avgp, type="b", xlim=c(0,15),
main = "Observed vs Predicted Affairs",
xlab = "Number Affairs", ylab = "Probability of naffairs")
lines(0:15, propObs, type = "b", pch = 2)
legend("topright", legend = c("Predicted Affairs","Observed Affairs"),
lty = c(1,1), pch = c(1,2))
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F9.1.r
|
# hilbe.NBR2.F9.1alt.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figure 9.1 Table Creation - no graphic Alternative
#
rm(list=ls())
load("c://source/affairs.RData")
affmodel <- glm(naffairs~ kids + avgmarr + hapavg + vryhap + notrel + slghtrel + smerel + vryrel + yrsmarr3 + yrsmarr4 + yrsmarr5 + yrsmarr6, family=poisson, data=affairs)
mu <- fitted.values(affmodel)
avgp <- sapply(0:13, function(i) mean(exp(-mu)*(mu^i)/factorial(i)))
propObsv <- with(subset(affairs, naffairs < 14), table(naffairs) / nrow(affairs))
Diff <- c(0,propObsv)*100 - avgp[1:25]*100
data.frame(LOS=0:13, ObsProp=c(0,propObsv)*100, avgp*100, Diff)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F9.1alt.r
|
# hilbe.NBR2.F9.2.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figure 9.2 user may amend as required for own model
#
load("c://source/affairs.RData")
affmodelnb <- glm.nb(naffairs~ kids + avgmarr + hapavg + vryhap + notrel + slghtrel + smerel + vryrel + yrsmarr3 + yrsmarr4 + yrsmarr5 + yrsmarr6, data=affairs)
mu <- fitted.values(affmodelnb)
p <- NULL
avgp <- NULL
for (i in 0:15) {
p[[i+1]] <- exp(-mu)*(mu^i)/factorial(i)
avgp[i+1] <- mean(p[[i+1]])
}
nCases <- dim(affairs)
n<- NULL
propObs<- NULL
probFit<- NULL
yFitMean<- NULL
for (i in 0:15) { #possible values for naffairs
bLos<- affairs$naffairs==i #selector for naffairs=i
n[i+1]<- sum(bLos) #number for naffairs=i
propObs[i+1]<- n[i+1]/nCases[1] #observed proportion for naffairs=i
}
Diff <- propObs*100 - avgp*100
data.frame(LOS=0:15, ObsProp=propObs*100, avgp*100, Diff)
plot(0:15, avgp, type="b", xlim=c(0,15),
main = "Observed vs Predicted Affairs",
xlab = "Number Affairs", ylab = "Probability of naffairs")
lines(0:15, propObs, type = "b", pch = 2)
legend("topright", legend = c("Predicted Affairs","Observed Affairs"),
lty = c(1,1), pch = c(1,2))
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F9.2.r
|
# hilbe.NBR2.F9.2alt.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figure 9.2 Table Creation - no graphic Alternative
#
rm(list=ls())
load("c://source/affairs.RData")
affmodel <- glm.nb(naffairs~ kids + avgmarr + hapavg + vryhap + notrel + slghtrel + smerel + vryrel + yrsmarr3 + yrsmarr4 + yrsmarr5 + yrsmarr6, data=affairs)
mu <- fitted.values(affmodel)
avgp <- sapply(0:13, function(i) mean(exp(-mu)*(mu^i)/factorial(i)))
propObsv <- with(subset(affairs, naffairs < 14), table(naffairs) / nrow(affairs))
Diff <- c(0,propObsv)*100 - avgp[1:25]*100
data.frame(LOS=0:13, ObsProp=c(0,propObsv)*100, avgp*100, Diff)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F9.2alt.r
|
# hilbe.NBR2.F9.3.r
# Negative binomial regression postestimation graphic
# Standardized deviance vs fitted value, mu.
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figure 9.3 user may amend as required for own model
#
load("c://source/affairs.RData")
affnb2r <- glm.nb(naffairs~ avgmarr + hapavg + vryhap + smerel
+ vryrel + yrsmarr4 + yrsmarr5 + yrsmarr6, data=affairs)
summary(affnb2r)
confint(affnbr2)
exp(coef(affnb2r))
exp(confint(affnb2r))
deviance <- residuals(affnb2, type="deviance")
dev <- sum(deviance*deviance)
pred <- predict(affnb2, se.fit=TRUE, type="response")
mu <- pred$fit
stdp <- pred$se.fit # Std error of prediction
variance <- mu
h <- stdp * stdp*variance # hat matrix diagonal
sdeviance <- rstandard(affnb2) # Std deviance
plot(mu, sdeviance)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F9.3.r
|
# hilbe.NBR2.F9.4.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Figure 9.4 user may amend as required for own model
# Table 9.32
#
load("c://source/azpro.RData")
attach(azpro)
myTable <- function(x) {
myDF <- data.frame( table(x) )
myDF$Prop <- prop.table( myDF$Freq )
myDF$CumProp <- cumsum( myDF$Prop )
myDF
}
by(los, procedure, myTable)
windows(record=TRUE) # set so graphs both show
by(los, procedure, hist) # produces histograms
# OR
library("ggplot2")
qplot(los,geom="histogram",facets=procedure~.)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F9.4.r
|
# hilbe.NBR2.F9.5.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# observed vs predicted number of afffairs
# Figure 9.5 user may amend as required for own model
#
load("c://source/mdvis.RData")
poimd <- glm(numvisit ~ reform+ badh+age+educ+loginc,family=poisson, data=mdvis)
mu <- fitted.values(poimd)
p <- NULL
avgp <- NULL
for (i in 0:20) {
p[[i+1]] <- exp(-mu)*(mu^i)/factorial(i)
avgp[i+1] <- mean(p[[i+1]])
}
nCases <- dim(mdvis)
n<- NULL
propObs<- NULL
probFit<- NULL
yFitMean<- NULL
for (i in 0:20) { #possible values for naffairs
bLos<- mdvis$numvisit==i #selector for naffairs=i
n[i+1]<- sum(bLos) #number for naffairs=i
propObs[i+1]<- n[i+1]/nCases[1] #observed proportion for naffairs=i
}
Diff <- propObs*100 - avgp*100
data.frame(LOS=0:20, ObsProp=propObs*100, avgp*100, Diff)
plot(0:20, avgp, type="b", xlim=c(0,20),
main = "Observed vs Predicted Visits",
xlab = "Number Visits", ylab = "Probability of Visits")
lines(0:20, propObs, type = "b", pch = 2)
legend("topright", legend = c("Predicted Visits","Observed Visits"),
lty = c(1,1), pch = c(1,2))
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F9.5.r
|
# hilbe.NBR2.F9.6ab.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# mu vs standardized deviance graphs
# Figure 9.6a and 9.6b user may amend as required for own model
# Table 9.46
#
# mu and Std deviance from ex4poie in T 9.44 - Poisson model
deviancep <- residuals(ex4poie, type="deviance")
devp <- sum(deviancep*deviancep)
predp <- predict(ex4poie, se.fit=TRUE, type="response")
mup <- predp$fit
mup <- 2*sqrt(mup)
stdpp <- predp$se.fit # Std error of prediction
variancep <- mup
hp <- stdpp * stdpp*variancep # hat matrix diagonal
sdeviancep <- rstandard(ex4poie) # Std deviance
plot(mup, sdeviancep)
# mu and Std deviance from ex4nbe in T 9.45 - NB2 model
deviancenb <- residuals(ex4nbe, type="deviance")
devnb <- sum(deviancenb*deviancenb)
prednb <- predict(ex4nbe, se.fit=TRUE, type="response")
munb <- prednb$fit
munb <- 2*sqrt(munb)
stdpnb <- prednb$se.fit # Std error of prediction
variancenb <- munb
hnb <- stdpnb * stdpnb*variancenb
sdeviancenb <- rstandard(ex4nbe) # Std dev
plot(munb, sdeviancenb) # Figure 9.6B
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F9.6ab.r
|
# Modeling Count Data, Cambridge University Press (2014)
# Joseph M Hilbe [email protected]
# ===============================================================
# CHAPTER 1 Varieties of Count Data
# ===============================================================
#
# 1.2.1
# ------
#
# R CODE
sbp <- c(131,132,122,119,123,115)
male <- c(1,1,1,0,0,0)
smoker <- c(1,1,0,0,1,0)
age <- c(34,36,30,32,26,23)
summary(reg1 <- lm(sbp~ male+smoker+age))
mu <- predict(reg1)
mu
cof <- reg1$coef
cof
xb <- cof[1] + cof[2]*male + cof[3]*smoker + cof[4]*age
xb
diff <- sbp - mu
diff
# 1.2.3
# --------
# Table 1.2a R: Code for Figure 1.2a
# =====================================================
obs <- 15; mu <- 4; y <- (0:140)/10; alpha <- .5
amu <- mu*alpha; layout(1)
all.lines <- vector(mode = 'list', length = 5)
for (i in 1:length(mu)) {
yp = exp(-mu[i])*(mu[i]^y)/factorial(y)
ynb1 = exp( log(gamma(mu[i]/alpha + y))
- log(gamma(y+1))
- log(gamma(mu[i]/alpha))
+ (mu[i]/alpha)*log(1/(1+alpha))
+ y*log(1-1/(1+alpha)))
ynb2 = exp( y*log(amu[i]/(1+amu[i]))
- (1/alpha)*log(1+amu[i])
+ log( gamma(y +1/alpha) )
- log( gamma(y+1) )
- log( gamma(1/alpha) ))
ypig = exp( (-(y-mu[i])^2)/(alpha*2*y*mu[i]^2)) * (sqrt(1/(alpha*2*pi*y^3)))
ygp = exp( log((1-alpha)*mu[i])
+ (y-1)*log((1-alpha) * mu[i]+alpha*y)
- (1-alpha)*mu[i]
- alpha*y
- log(gamma(y+1)))
all.lines = list(yp = yp, ynb1 = ynb1, ynb2 = ynb2, ypig = ypig, ygp = ygp)
ymax = max(unlist(all.lines), na.rm=TRUE)
cols = c("red","blue","black","green","purple")
plot(y, all.lines[[1]], ylim =
c(0, ymax), type = "n", main="5 Count Distributions: mean=4; alpha=0.5")
for (j in 1:5)
lines(y, all.lines[[j]], ylim = c(0, ymax), col=cols[j],type='b',pch=19, lty=j)
legend("topright",cex = 1.5, pch=19,
legend=c("NB2","POI","PIG","NB1","GP"),
col = c(1,2,3,4,5),
lty = c(1,1,1,1,1),
lwd = c(1,1,1,1,3))
}
# =======================================================
# 1.4.2
# -------
# Table 1.4 R: Poisson probabilities for y from 0 through 4
# ===========================================================
y <- c(4, 2, 0,3, 1, 2)
y0 <- exp(-2)* (2^0)/factorial(0)
y1 <- exp(-2)* (2^1)/factorial(1)
y2 <- exp(-2)* (2^2)/factorial(2)
y3 <- exp(-2)* (2^3)/factorial(3)
y4 <- exp(-2)* (2^4)/factorial(4)
poisProb <- c(y0, y1, y2, y3, y4); poisProb
# OR
dpois(0:4, lambda=2)
# CUMULATIVE
ppois(0:4, lambda=2)
# to plot a histogram
py <- 0:4
plot(poisProb ~ py, xlim=c(0,4), type="o", main="Poisson Prob 0-4: Mean=2")
# ============================================================
# Table 1.5 R : Code for Figure 1.3
# ===============================
m<- c(0.5,1,3,5) #Poisson means
y<- 0:11 #Observed counts
layout(1)
for (i in 1:length(m)) {
p<- dpois(y, m[i]) #poisson pdf
if (i==1) {
plot(y, p, col=i, type='l', lty=i)
} else {
lines(y, p, col=i, lty=i)
}
}
# ===============================
# ===============================================================
# CHAPTER 2 Poisson Regression
# ===============================================================
#
# 2.3
# ----
#
# Table 2.4 R: Synthetic Poisson Model
# =============================================
library(MASS); library(COUNT); set.seed(4590); nobs <- 50000
x1 <- runif(nobs); x2 <- runif(nobs); x3 <- runif(nobs)
py <- rpois(nobs, exp(1 + 0.75*x1 - 1.25*x2 + .5*x3))
cnt <- table(py)
dataf <- data.frame(prop.table(table(py) ) )
dataf$cumulative <- cumsum(dataf$Freq)
datafall <- data.frame(cnt, dataf$Freq*100, dataf$cumulative * 100)
datafall; summary(py)
summary(py1 <- glm(py ~ x1 + x2 + x3, family=poisson))
confint.default(py1); py1$aic/(py1$df.null+1)
r <- resid(py1, type = "pearson")
pchi2 <- sum(residuals(py1, type="pearson")^2)
disp <- pchi2/py1$df.residual; pchi2; disp
# =====================================================
# Table 2.6 R: Monte Carlo Poisson
# ==================================================
mysim <- function()
{
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
x3 <- runif(nobs)
py <- rpois(nobs, exp(2 + .75*x1 - 1.25*x2 + .5*x3))
poi <- glm(py ~ x1 + x2 + x3, family=poisson)
pr <- sum(residuals(poi, type="pearson")^2)
prdisp <- pr/poi$df.residual
beta <- poi$coef
list(beta,prdisp)
}
B <- replicate(100, mysim())
apply(matrix(unlist(B[1,]),4,100),1,mean)
# ===================================================
# Dispersion
mean(unlist(B[2,]))
# 2.4
# -----
# Table 2.7 R: Example Poisson Model and Associated Statistics
# ===========================================================
library(COUNT)
data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
cage <- rwm1984$age - mean(rwm1984$age)
summary(poic <- glm(docvis ~ outwork + cage, family=poisson, data=rwm1984))
pr <- sum(residuals(poic, type="pearson")^2) # Pearson Chi2
pr/poic$df.residual # dispersion statistic
modelfit(poic)
cnt <- table(rwm1984$docvis)
dataf <- data.frame(prop.table(table(rwm1984$docvis) ) )
dataf$cumulative <- cumsum(dataf$Freq)
datafall <- data.frame(cnt, dataf$Freq*100, dataf$cumulative * 100)
datafall
# ===========================================================
# Table 2.8 R: Change Levels in Categorical Predictor
# ======================================================
levels(rwm1984$edlevel) # levels of edlevel
elevel <- rwm1984$edlevel # new variable
levels(elevel)[2] <- "Not HS grad" # assign level 1 to 2
levels(elevel)[1] <- "HS" # rename level 1 to "HS"
levels(elevel) # levels of elevel
summary(tst2 <- glm(docvis ~ outwork + cage + female + married + kids
+ factor(elevel), family=poisson, data=rwm1984))
# =======================================================
# 2.5.1
# -------
summary(pyq <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984))
# Likelihood Profiling of SE
confint(pyq)
# Traditional Model-based SE
confint.default(pyq)
# 2.5.2
# ------
# Table 2.11 R: Poisson Model - Rate Ratio Parameterization
# ============================================================
library(COUNT)
data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
summary(poi1 <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984))
pr <- sum(residuals(poi1, type="pearson")^2) # Pearson Chi2
pr/poi1$df.residual # dispersion statistic
poi1$aic / (poi1$df.null+1) # AIC/n
exp(coef(poi1)) # IRR
exp(coef(poi1))*sqrt(diag(vcov(poi1))) # delta method
exp(confint.default(poi1)) # CI of IRR
# ============================================================
# 2.6
# ----
# Table 2.12 R: Poisson with Exposure
# ===========================================
data(fasttrakg)
summary(fast <- glm(die ~ anterior + hcabg + factor(killip),
family=poisson,
offset=log(cases),
data=fasttrakg))
exp(coef(fast))
exp(coef(fast))*sqrt(diag(vcov(fast)))
exp(confint.default(fast))
modelfit(fast)
# ============================================
# 2.7
# ----
# R prediction
myglm <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984)
lpred <- predict(myglm, newdata=rwm1984, type="link", se.fit=TRUE)
up <- lpred$fit + 1.96*lpred$se.fit; lo <- lpred$fit - 1.96*lpred$se.fit
eta <- lpred$fit ; mu <- myglm$family$linkinv(eta)
upci <- get(myglm$family$link)(up); loci <- get(myglm$family$link)(lo)
# 2.8.1
# -----
# Table 2.14 R: Marginal Effects at Mean
# ===========================================================
library(COUNT)
data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
summary(pmem <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984))
mout <- mean(rwm1984$outwork); mage <- mean(rwm1984$age)
xb <- coef(pmem)[1] + coef(pmem)[2]*mout + coef(pmem)[3]*mage
dfdxb <- exp(xb) * coef(pmem)[3]
mean(dfdxb)
# ===========================================================
# 2.8.2
# -------
# R CODE
mean(rwm1984$docvis) * coef(pmem)[3]
# 2.8.3
# -------
# R CODE discrete change
summary(pmem <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984))
mu0 <- exp(pmem$coef[1] + pmem$coef[3]*mage)
mu1 <- exp(pmem$coef[1] + pmem$coef[2] + pmem$coef[3]*mage)
pe <- mu1 - mu0
mean(pe)
# R CODE avg partial effects
summary(pmem <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984))
bout = coef(pmem)[2]
mu = fitted.values(pmem)
xb = pmem$linear.predictors
pe_out = 0
pe_out = ifelse(rwm1984$outwork == 0, exp(xb + bout)-exp(xb), NA)
pe_out = ifelse(rwm1984$outwork == 1, exp(xb)-exp(xb-bout),pe_out)
mean(pe_out)
# ===============================================================
# CHAPTER 3 Testing Overdispersion
# ===============================================================
#
# 3.1
# ----
# Table 3.1 R: Deviance Goodness-of-Fit Test
# ================================================================
library(COUNT); data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
mymod <-glm(docvis ~ outwork + age, family=poisson, data=rwm1984)
mymod
dev<-deviance(mymod); df<-df.residual(mymod)
p_value<-1-pchisq(dev,df)
print(matrix(c("Deviance GOF","D","df","p-value", " ",
round(dev,4),df, p_value), ncol=2))
# ==================================================================
# R CODE
mymod <-glm(docvis ~ outwork + age, family=poisson, data=rwm1984)
pr <- sum(residuals(mymod, type="pearson")^2) # get Pearson Chi2
pchisq(pr, mymod$df.residual, lower=F) # calc p-value
pchisq(mymod$deviance, mymod$df.residual, lower= F) # calc p-vl
# P__disp is now available when the COUNT pacakge is loaded.
# Table 3.2 R: Function to Calculate Pearson Chi2 and Dispersion Statistics
# =============================================================
P__disp <- function(x) {
pr <- sum(residuals(x, type="pearson")^2)
dispersion <- pr/x$df.residual
cat("\n Pearson Chi2 = ", pr ,
"\n Dispersion = ", dispersion, "\n")
}
# =============================================================
P__disp(mymod)
# R CODE
library(COUNT)
data(rwm5yr)
rwm1984 <- subset(rwm5yr, year==1984)
mymod <-glm(docvis ~ outwork + age, family=poisson, data=rwm1984)
P__disp(mymod)
# 3.3.1
# ----
# Table 3.4 R: Z-Score Test
# ===========================================================
library(COUNT); data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
summary(poi <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984))
mu <-predict(poi, type="response")
z <- ((rwm1984$docvis - mu)^2 - rwm1984$docvis)/ (mu * sqrt(2))
summary(zscore <- lm(z ~ 1))
# ==========================================================
# 3.3.2
# -----
# Table 3.5 R: Lagrange Multiplier Test
# ===============================================
obs <- nrow(rwm1984) # continue from Table 3.2
mmu <- mean(mu); nybar <- obs*mmu; musq <- mu*mu
mu2 <- mean(musq)*obs
chival <- (mu2 - nybar)^2/(2*mu2); chival
pchisq(chival,1,lower.tail = FALSE)
# ===============================================
# 3.3.3
# -----
# Table 3.7 R: Poisson Model with Ancillary Statistics
# ============================================================
library(COUNT); data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
summary(poi1 <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984))
pr <- sum(residuals(poi1, type="pearson")^2) # Pearson Chi2
pr/poi1$df.residual # dispersion statistic
poi1$aic / (poi1$df.null+1) # AIC/n
exp(coef(poi1)) # IRR
exp(coef(poi1))*sqrt(diag(vcov(poi1))) # delta method
exp(confint.default(poi1)) # CI of IRR
modelfit(poi1) # same as Stata abic
sd(rwm1984$docvis)^2 # observed variance
xbp <- predict(poi1) # xb, linear predictor
mup <- exp(xbp) # mu, fitted Poisson
mean(mup) # expected variance: mean=variance
# Table of observed vs expected counts
rbind(obs=table(rwm1984$docvis)[1:18],
exp = round(sapply(0:17, function(x)sum(dpois(x, fitted(poi1))))))
meany <- mean(rwm1984$docvis) # mean docvis
expect0 <- exp(-meany)*meany^0 / exp(log(factorial(0))) # expected prob of 0
zerodays <- (poi1$df.null+1) *expect0 # expected zero days
obs=table(rwm1984$docvis)[1:18] # observed number values in each count 0-17
exp = round(sapply(0:17, function(x)sum(dpois(x, fitted(poi1))))) # expected each count
chisq.test(obs, exp) # ChiSq test if obs & exp from same pop
# ============================================================
# 3.4.1
# -----
data(medpar)
# R: quasipoisson
# =============================================================
summary(poiql <- glm(los ~ hmo + white + hmo + factor(type),
family=quasipoisson, data=medpar))
# =============================================================
# Table 3.8 R: Scaling SE Medpar Data
# =====================================================
library(COUNT); data(medpar); attach(medpar)
summary(poi <- glm(los ~ hmo + white + factor(type), family=poisson,
data=medpar))
confint(poi) # profile confidence interval
pr <- sum(residuals(poi,type="pearson")^2) # Pearson statistic
dispersion <- pr/poi$df.residual; dispersion # dispersion
sse <- sqrt(diag(vcov(poi))) * sqrt(dispersion); sse # model SE
# OR
poiQL <- glm(los ~ hmo + white + factor(type), family=quasipoisson,
data=medpar)
coef(poiQL); confint(poiQL) # coeff & scaled SEs
modelfit(poi) # AIC,BIC statistics
# ======================================================
# 3.4.2
# ------
# Table 3.10 R: Quasi-likelihood Poisson Standard Errors
# ===============================================
poiQL <- glm(los ~ hmo+white+type2+type3, family=poisson, data=medpar)
summary(poiQL)
pr <-sum(residuals(poiQL, type="pearson")^2 )
disp <- pr/poiQL$df.residual # Pearson dispersion
se <-sqrt(diag(vcov(poiQL)))
QLse <- se/sqrt(disp); QLse
# ===============================================
# 3.4.3
# ------
# Table 3.12 R: Robust Standard Errors of medpar Model
# ====================================================
library(sandwich)
poi <- glm(los ~ hmo + white + factor(type), family=poisson, data=medpar)
vcovHC(poi)
sqrt(diag(vcovHC(poi, type="HC0"))) # final HC0 = H-C-zero
# Clustering
poi <- glm(los ~ hmo + white + factor(type), family=poisson, data=medpar)
library(haplo.ccs)
sandcov(poi, medpar$provnum)
sqrt(diag(sandcov(poi, medpar$provnum)))
# ====================================================
summary(poi1 <- glm(los ~ hmo+white+factor(type), family=poisson, data=medpar))
# 3.4.4
# ------
# Table 3.13 R: Bootstrap Standard Errors
# ======================================================
library(COUNT); library(boot); data(medpar)
poi <- glm(los ~ hmo + white + factor(type), family=poisson, data=medpar)
summary(poi)
t <- function (x, i) {
xx <- x[i,]
bsglm <- glm( los ~ hmo + white + factor(type), family=poisson, data=medpar)
return(sqrt(diag(vcov(bsglm))))
}
bse <- boot(medpar, t, R=1000)
sqrt(diag(vcov(poi))); apply(bse$t,2, mean)
# =======================================================
# ===============================================================
# CHAPTER 4 Assessment of Fit
# ===============================================================
#
# 4.1
# R Pearson Chi2 and statistic and graph
summary(pexp <- glm(docvis ~ outwork + cage, family=poisson, data=rwm1984))
presid <- residuals(pexp, type="pearson")
pchi2 <- sum(residuals(pexp, type="pearson")^2) # Pearson Chi2
summary(rwm <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984))
P__disp(rwm)
mu <- predict(rwm)
grd <- par(mfrow = c(2,2))
plot(x=mu, y= rwm$docvis, main = "Response residuals")
plot(x=mu, y= presid, main = "Pearson residuals")
# 4.2.1
# -------
# Table 4.2 R: Likelihood Ratio Test
# ====================================================
library(COUNT); library(lmtest); data(rwm5yr)
rwm1984 <- subset(rwm5yr, year==1984)
poi1 <- glm(docvis ~ outwork + age, family=poisson, data=rwm1984)
poi1a <- glm(docvis ~ outwork, family=poisson, data=rwm1984)
lrtest(poi1, poi1a)
drop1(poi1, test="Chisq")
# ====================================================
# 4.2.2
# ------
# Chi2 test boundary LR
pchisq(2.705,1, lower.tail=FALSE)/2
# 4.3.2
# ------
# Table 4.4 R: Version of Stata User Command, abic
# ========================================================
modelfit <- function(x) {
obs <- x$df.null + 1
aic <- x$aic
xvars <- x$df.null - x$df.residual + 1
rdof <- x$df.residual
aic_n <- aic/obs
ll <- xvars - aic/2
bic_r <- x$deviance - (rdof * log(obs))
bic_l <- -2*ll + xvars * log(obs)
bic_qh <- -2*(ll - xvars * log(xvars))/obs
c(AICn=aic_n, AIC=aic, BICqh=bic_qh, BICl=bic_l)
}
# modelfit(x) # substitute fitted model name for x
# ========================================================
library(COUNT)
data(medpar)
mymodel <- glm(los ~ hmo + white + factor(type), family=poisson, data=medpar)
modelfit(mymodel)
# ===============================================================
# CHAPTER 5 Negative Binomial Regression
# ===============================================================
#
# 5.3.1
# Table 5.4 R: rwm1984 Modeling Example
# ============================================================
# make certain the appropriate packages are loaded
library(COUNT); data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
# USING glm.nb
summary(nbx <- glm.nb(docvis ~ outwork + age + married + female +
edlevel2 + edlevel3 + edlevel4, data=rwm1984))
exp(coef(nbx)); exp(coef(nbx))*sqrt(diag(vcov(nbx)))
exp(confint.default(nbx))
alpha <- 1/nbx$theta; alpha; P__disp(nbx)
modelfit(nbx)
xbnb <- predict(poi1); munb <- exp(xbnb)
# expected variance of NB model (using alpha where alpha=1/theta)
mean(munb)+ (1/nbx$theta)*mean(munb)^2
round(sqrt(rbind(diag(vcov(nbx)), diag(sandwich(nbx)))), digits=4)
# USING nbinomial
nb1 <- nbinomial(docvis ~ outwork + age + married + female +
edlevel2 + edlevel3 + edlevel4, data=rwm1984)
summary(nb1)
modelfit(nb1)
# ============================================================
# Table 5.5 R: rwm1984 Poisson and NB2 Models
# ============================================================
library(COUNT);library(msme)
data(rwm5yr);rwm1984 <- subset(rwm5yr, year==1984)
# POISSON
poi <- glm(docvis ~ outwork + age + married + female +
edlevel2 + edlevel3 + edlevel4,
family = poisson, data = rwm1984)
summary(poi)
#NB2
nb1 <- nbinomial(docvis ~ outwork + age + married + female +
edlevel2 + edlevel3 + edlevel4, data=rwm1984)
summary(nb1)
# NB1
library(gamlss)
summary(gamlss(formula = docvis ~ outwork + age + married + female +
edlevel2 + edlevel3 + edlevel4, family = NBII, data = rwm1984))
# ============================================================
# 5.4.3
# ------
# R CODE
# POISSON
library(COUNT) ; data(nuts)
nut <- subset(nuts, dbh<.6)
sntrees <- scale(nut$ntrees)
sheight <- scale(nut$height)
scover <- scale(nut$cover)
summary(PO <- glm(cones ~ sntrees + sheight + scover, family=quasipoisson, data=nut))
table(nut$cones)
summary(nut$cones)
# NEGATIVE BINOMIAL
library(msme)
NB <- nbinomial(cones ~ sntrees + sheight + scover, data=nut)
summary(NB)
# HETEROGENEOUS NEGATIVE BINOMIAL
summary(HNB <- nbinomial(cones ~ sntrees + sheight + scover,
formula2 =~ sntrees + sheight + scover, data=nut, family = "negBinomial",
scale.link = "log_s"))
exp(coef(HNB))
# ===============================================================
# CHAPTER 6 Poisson Inverse Gaussian Regression
# ===============================================================
#
# 6.2.2
# ------
# Table 6.3 R: Poisson Inverse Gaussian - rwm1984
# ========================================================
library(gamlss); library(COUNT); library(msme); library(sandwich)
data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
summary(nbmod <- glm.nb(docvis ~ outwork + age, data=rwm1984))
vcovHC(nbmod)
sqrt(diag(vcovHC(nbmod, type="HC0")))
pigmod <- gamlss(docvis ~ outwork + age, data=rwm1984, family=PIG)
summary(pigmod)
exp(coef(pigmod))
# =========================================================
exp(1.344)
# Table 6.5 R: Poisson Inverse Gaussian - medpar
# ========================================================
library(gamlss); library(COUNT); library(msme); library(sandwich)
data(medpar)
rwm1984 <- subset(rwm5yr, year==1984)
summary(nbmod1 <- glm.nb(los ~ hmo + white + factor(type), data=medpar))
vcovHC(nbmod1)
sqrt(diag(vcovHC(nbmod1, type="HC0")))
pigmod1 <- gamlss(los ~ hmo + white + factor(type), data=medpar, family=PIG)
summary(pigmod1)
exp(coef(pigmod1))
# =========================================================
# ===============================================================
# CHAPTER 7 Probleems with Zero Counts
# ===============================================================
#
#
# R
# ================================================
exp(-3) * 3^0 / exp(log(factorial(0)))
100* (exp(-3) * 3^0 / exp(log(factorial(0))))
# ================================================
# 7.1.1
# ------
# Table 7.1 R: Poisson and Zero-truncated Poisson
# ====================================================
library(msme)
library(gamlss.tr)
data(medpar)
poi <- glm(los ~ white + hmo + factor(type), family=poisson, data=medpar)
summary(poi)
ztp <- gamlss(los ~ white + hmo + factor(type), data=medpar, family="PO")
gen.trun(0, "PO", type="left", name = "lefttr")
lt0poi <- gamlss(los~white+hmo+ factor(type), data=medpar, family="POlefttr")
summary(lt0poi)
# ====================================================
# 7.1.2
# ------
# Table 7.2 Zero-Truncated Negative Binomial
# ==========================================================
library(msme); library(gamlss.tr)
data(medpar)
nb <- nbinomial(los~ white + hmo + factor(type), data=medpar)
summary(nb)
ztnb <- gamlss(los~ white + hmo + factor(type),data=medpar, family="NBI")
gen.trun(0, "NBI", type="left", name = "lefttr")
lt0nb <- gamlss(los~white+hmo+ factor(type), data=medpar, family="NBIlefttr")
summary(lt0nb)
# ==========================================================
# R: Calculate NB2 expected 0's for given ? and ?
# ==========================================
a <- 1; mu <- 2 ; y <- 0
exp(y*log(a*mu/(1+a*mu))-(1/a)*log(1+a*mu)+
log(gamma(y +1/a))-log(gamma(y+1))-log( gamma(1/a)))
# ==========================================
# R: Proof that sum of y probabilities from 0 to 100 is 1
# ================================================
a <- 1 ; mu <- 2 ; y <- 0:100
ff <- exp(y*log(a*mu/(1+a*mu))-(1/a)*log(1+a*mu)+
log(gamma(y +1/a))-log(gamma(y+1))-log( gamma(1/a)))
sum(ff)
# ================================================
# 7.2.1
# ------
# Table 7.3 R: Poisson-Logit Hurdle
# ====================================================
library(pscl); library(COUNT)
data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
hpl <- hurdle(docvis ~ outwork + age, dist="poisson", data=rwm1984,
zero.dist="binomial", link="logit")
summary(hpl); AIC(hpl)
# ====================================================
# Table 7.4: R Components to Poisson-Logit Hurdle
# ===================================================
visit <- ifelse(rwm1984$docvis >0, 1, 0)
table(visit)
logis <- glm(visit ~ outwork + age, data=rwm1984,
family=binomial(link="logit"))
summary(logis)
library(pscl)
hpl2 <- hurdle(docvis ~ outwork + age, data=rwm1984,
dist = "poisson", zero.dist="binomial", link="logit")
summary(hpl2)
logit <- glm(visit ~ outwork + age, data=rwm1984,
family=binomial(link="logit"))
summary(logit)
# ===================================================
# Table 7.5 R NB2-logit Hurdle <Assume Model from 7.3 Loaded>
# ======================================================
hnbl <- hurdle(docvis ~ outwork + age, dist="poisson", data=rwm1984,
zero.dist="binomial", link="logit")
summary(hnbl); AIC(hnbl)
alpha <- 1/hnbl$theta ; alpha
exp(coef(hnbl))
predhnbl <- hnbl$fitted.values
# ======================================================
# Table 7.6. R - ZIP
# ====================================================================
library(pscl); library(COUNT)
data(rwm5yr) ; rwm1984 <- subset(rwm5yr, year==1984)
poi <- glm(docvis ~ outwork + age, data=rwm1984, family=poisson)
zip <- zeroinfl(docvis ~ outwork + age | outwork + age, data=rwm1984, dist="poisson")
summary(zip)
print(vuong(zip,poi))
exp(coef(zip))
round(colSums(predict(zip, type="prob")[,1:17])) # expected counts
rbind(obs=table(rwm1984$docvis)[1:18]) # observed counts
# =================================================================
# 7.3.5
# -----
# R CODE
pred <- round(colSums(predict(zip, type="prob") [,1:13]))
obs <- table(rwm1984$docvis)[1:13]
rbind(obs, pred)
# 7.3.6
# ------
# Table 7.7. R - ZINB
# ===============================================================
library(pscl); library(COUNT)
data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
nb2 <- glm.nb(docvis ~ outwork + age, data=rwm1984)
zinb <- zeroinfl(docvis ~ outwork + age | outwork + age, data=rwm1984, dist="negbin")
summary(zinb)
print(vuong(zinb,nb2))
exp(coef(zinb))
pred <- round(colSums(predict(zinb, type="prob")[,1:13])) # expected counts
obs <- table(rwm1984$docvis)[1:13] # observed counts
rbind(obs, pred)
# ====================================================================
# 7.3.7
# -----
# R ZIPIG
# ======================================================
library(gamlss) ; data(rwm1984); attach(rwm1984)
zpig <- gamlss(docvis ~ outwork + age, sigma.fo= ~ -1,
family="ZIPIG", data=rwm1984)
# summary(zpig) Throws error; uncomment to see
# code for calculating vuong, LR test, etc on book's website
# ======================================================================
# ===============================================================
# CHAPTER 8 Generalized Poisson
# ===============================================================
# ===============================================================
# CHAPTER 9 More Advanced Models
# ===============================================================
#
# 9.1 exact models
# R
# ==============================================
library(COUNT)
data(azcabgptca); attach(azcabgptca)
table(los); table(procedure, type); table(los, procedure)
summary(los)
summary(c91a <- glm(los ~ procedure+ type, family=poisson, data=azcabgptca))
modelfit(c91a)
summary(c91b <- glm(los ~ procedure+ type, family=quasipoisson, data=azcabgptca))
modelfit(c91b)
library(sandwich); sqrt(diag(vcovHC(c91a, type="HC0")))
# ==========================================================
# R
# ==============================================================
library(gamlss.tr)
gen.trun(0,"PO", type="left", name="leftr")
summary(c91c <- gamlss(los~ procedure+type, data=azcabgptca, family="POleftr"))
# ==============================================================================
# 9.2.1 Truncated models
# -----
# Table 9.2 R Left-Truncated at 3 Poisson
# ==================================================
library(COUNT); data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
summary(plt <- gamlss(docvis~outwork + age,data=rwm1984,family="PO"))
library(gamlss); library(gamlss.tr); pltvis<-subset(rwm1984, rwm1984$docvis>3)
summary(ltpo <- gamlss(docvis~outwork+age, family=trun(3, "PO", "left"), data=pltvis))
# -----------------
pltvis<-subset(rwm1984, rwm1984$docvis>3) # alternative method
gen.trun(3, "PO", "left") # saved globally for session
summary(lt3po <- gamlss(docvis~outwork+age, family=POleft, data=pltvis))
# ==================================================
# Table 9.3 R: Right-Truncated Poisson : cut=10
# ==================================================
rtp<-subset(rwm1984, rwm1984$docvis<10)
summary(rtpo <- gamlss(docvis~outwork + age, data=rtp,
family=trun(10, "PO", type="right"))) # LT, not LE
# ==================================================
# 9.2.2
# ------
# Table 9.4 R Left Censored Poisson at Cut=3
# =================================================
library(gamlss.cens); library(survival); library(COUNT)
data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
lcvis <- rwm1984
cy <- with(lcvis, ifelse(docvis<3, 3, docvis))
ci <- with(lcvis, ifelse(docvis<=3, 0, 1))
Surv(cy,ci, type="left")[1:100]
cbind(Surv(cy,ci, type="left")[1:50], rwm1984$docvis[1:50])
lcmdvis <- data.frame(lcvis, cy, ci )
rm(cy,ci); gen.cens("PO",type="left")
lcat30<-gamlss(Surv(cy, ci, type="left") ~ outwork + age,
data=lcmdvis, family=POlc)
summary(lcat30)
# ================================================
# Table 9.5 R Right censored Poisson at 10
# =========================================
library(gamlss.cens); library(survival); rcvis <- rwm1984
cy <- with(rcvis, ifelse(docvis>=10, 9, docvis))
ci <- with(rcvis, ifelse(docvis>=10, 0, 1))
rcvis <- data.frame(rcvis, cy, ci )
rm(cy,ci) ; gen.cens("PO",type="right")
summary(rcat30<-gamlss(Surv(cy, ci) ~ outwork + age,
data=rcvis, family=POrc, n.cyc=100))
# ==========================================
# 9.3
# ------
# Table 9.6 R: Poisson-Poisson Finite Mixture Model
# =================================================
library(COUNT)
library(flexmix)
data(fishing)
attach(fishing)
## FIXME the following code gives an error that the model argument should
## be one of "gaussian", "binomial", "poisson", "Gamma"
fmm_pg <- flexmix(totabund~meandepth + offset(log(sweptarea)),
data=rwm1984, k=2,
model=list(FLXMRglm(totabund~., family="poisson"),
FLXMRglm(totabund~., family="poisson")))
parameters(fmm_pg, component=1, model=1)
parameters(fmm_pg, component=2, model=1)
summary(fmm_pg)
# =================================================
# 9.4
# ------
#Table 9.7 R: GAM
# ===========================================================
library(COUNT); library(mgcv)
data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
summary(pglm <- glm(docvis ~ outwork + age + female + married +
edlevel2 + edlevel3 + edlevel4, family=poisson, data=rwm1984))
summary(pgam <- gam(docvis ~ outwork + s(age) + female + married +
edlevel2 + edlevel3 + edlevel4, family=poisson, data=rwm1984))
plot(pgam)
# ===========================================================
# 9.6.1
#-----
# Table 9.8 R: GEE
# ================================================
library(COUNT); library(gee); data(medpar)
summary(pgee <- gee(los ~ hmo + white + age80 + type2 + type3,
data=medpar, id=medpar$provnum,
corstr='exchangeable', family=poisson))
# =============================================
# 9.6.2
# -----
# Table 9.9 R Random Intercept Poisson
# ================================================
library(gamlss.mx)
summary(rip <- gamlssNP(los ~ hmo + white + type2 + type3,
random=~1|provnum, data=medpar,
family="PO", mixture="gq", K=20))
# ================================================
# 9.7
# ----
# Table 9.10 R Generalized Waring Regression
# ========================================================
library(COUNT); library(GWRM)
data(rwm5yr); rwm1984 <- subset(rwm5yr, year==1984)
war <- GWRM.fit(docvis ~ outwork + age + female + married, data=rwm1984)
GWRM.display(war)
# =========================================================
# 9.8
# ----
# Table 9.11 R: Bayesian Poisson MCMC
# ======================================================
library(COUNT); library(MCMCpack); data(medpar)
summary(poi <- glm(los ~ hmo + white + type2 + type3,
family=poisson, data=medpar))
confint.default(poi)
summary(poibayes <- MCMCpoisson(los ~ hmo + white + type2 + type3,
burnin = 5000, mcmc = 100000, data=medpar))
# ======================================================
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/HILBE-MCD-Rcode.r
|
# ---------------------------------------------------------------------------------------
# zero altered negative binomial type I (with probability y=0 is nu) 01/03/10
# ---------------------------------------------------------------------------------------
ZANBI = function (mu.link = "log", sigma.link = "log", nu.link = "logit")
{
mstats <- checklink("mu.link", "ZANBI", substitute(mu.link),
c("inverse", "log", "identity"))
dstats <- checklink("sigma.link", "ZANBI", substitute(sigma.link),
c("inverse", "log", "identity"))
vstats <- checklink("nu.link", "ZANBI", substitute(nu.link),
c("logit", "probit", "cloglog", "log", "own"))
structure(list(family = c("ZANBI", "Zero altered negative binomial type I"),
parameters = list(mu = TRUE, sigma = TRUE, nu = TRUE),
nopar = 3,
type = "Discrete",
mu.link = as.character(substitute(mu.link)),
sigma.link = as.character(substitute(sigma.link)),
nu.link = as.character(substitute(nu.link)),
mu.linkfun = mstats$linkfun,
sigma.linkfun = dstats$linkfun,
nu.linkfun = vstats$linkfun,
mu.linkinv = mstats$linkinv,
sigma.linkinv = dstats$linkinv,
nu.linkinv = vstats$linkinv,
mu.dr = mstats$mu.eta,
sigma.dr = dstats$mu.eta,
nu.dr = vstats$mu.eta,
dldm = function(y,mu,sigma,nu) {dldm0 <- NBI()$dldm(y,mu,sigma) + dNBI(0,mu,sigma)*NBI()$dldm(0,mu,sigma)/(1-dNBI(0,mu,sigma))
dldm <- ifelse(y==0, 0 , dldm0)
dldm},
d2ldm2 = function(y,mu,sigma,nu) {dldm0 <- NBI()$dldm(y,mu,sigma) + dNBI(0,mu,sigma)*NBI()$dldm(0,mu,sigma)/(1-dNBI(0,mu,sigma))
dldm <- ifelse(y==0, 0 , dldm0)
d2ldm2 <- -dldm*dldm
d2ldm2 <- ifelse(d2ldm2 < -1e-15, d2ldm2,-1e-15)
d2ldm2},
dldd = function(y,mu,sigma,nu) {dldd0 <- NBI()$dldd(y,mu,sigma) + dNBI(0,mu,sigma)*NBI()$dldd(0,mu,sigma)/(1-dNBI(0,mu,sigma))
dldd <- ifelse(y==0, 0 , dldd0)
dldd},
d2ldd2 = function(y,mu,sigma,nu) {dldd0 <- NBI()$dldd(y,mu,sigma) + dNBI(0,mu,sigma)*NBI()$dldd(0,mu,sigma)/(1-dNBI(0,mu,sigma))
dldd <- ifelse(y==0, 0 , dldd0)
d2ldd2 <- -dldd^2
d2ldd2 <- ifelse(d2ldd2 < -1e-15, d2ldd2,-1e-15)
d2ldd2},
dldv = function(y,mu,sigma,nu) {dldv <- ifelse(y==0, 1/nu, -1/(1-nu))
dldv},
d2ldv2 = function(y,mu,sigma,nu) {d2ldv2 <- -1/(nu*(1-nu))
d2ldv2 <- ifelse(d2ldv2 < -1e-15, d2ldv2,-1e-15)
d2ldv2},
d2ldmdd = function(y) {dldm0 <- NBI()$dldm(y,mu,sigma) + dNBI(0,mu,sigma)*NBI()$dldm(0,mu,sigma)/(1-dNBI(0,mu,sigma))
dldm <- ifelse(y==0, 0 , dldm0)
dldd0 <- NBI()$dldd(y,mu,sigma) + dNBI(0,mu,sigma)*NBI()$dldd(0,mu,sigma)/(1-dNBI(0,mu,sigma))
dldd <- ifelse(y==0, 0 , dldd0)
d2ldm2<--dldm*dldd
d2ldm2},
d2ldmdv = function(y) {d2ldmdv=0
d2ldmdv},
d2ldddv = function(y) {d2ldddv=0
d2ldddv},
G.dev.incr = function(y,mu,sigma,...) -2*dZANBI(y, mu = mu, sigma = sigma, nu=nu, log = TRUE),
rqres = expression(
rqres(pfun="pZANBI", type="Discrete", ymin=0, y=y, mu=mu, sigma=sigma)
),
mu.initial = expression(mu <- (y + mean(y))/2),
## mu.initial = expression(mu <- y+0.5),
sigma.initial = expression(
sigma <- rep( max( ((var(y)-mean(y))/(mean(y)^2)),0.1),length(y))),
nu.initial = expression(nu <- rep(0.3, length(y))),
mu.valid = function(mu) all(mu > 0) ,
sigma.valid = function(sigma) all(sigma > 0),
nu.valid = function(nu) all(nu > 0 & nu < 1),
y.valid = function(y) all(y >= 0)
),
class = c("gamlss.family","family"))
}
#-------------------------------------------------------------------------------------------
dZANBI<-function(x, mu = 1, sigma = 1, nu = 0.3, log = FALSE)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", ""))
if (any(nu <= 0)|any(nu >= 1)) stop(paste("nu must be between 0 and 1 ", "\n", ""))
if (any(x < 0) ) stop(paste("x must be >=0", "\n", ""))
if (length(sigma)>1) fy <- ifelse(sigma>0.0001, dnbinom(x, size=1/sigma, mu = mu, log = T),
dPO(x, mu = mu, log = T) )
else fy <- if (sigma<0.0001) dPO(x, mu = mu, log = T)
else dnbinom(x, size=1/sigma, mu = mu, log = T)
if (length(sigma)>1) fy0 <- ifelse(sigma>0.0001, dnbinom(0, size=1/sigma, mu = mu, log = T),
dPO(0, mu = mu, log = T) )
else fy0 <- if (sigma<0.0001) dPO(0, mu = mu, log = T)
else dnbinom(0, size=1/sigma, mu = mu, log = T)
logfy <- rep(0, length(x))
logfy <- ifelse((x==0), log(nu), log(1-nu) + fy - log(1-exp(fy0)))
if(log == FALSE) fy2 <- exp(logfy) else fy2 <- logfy
fy2
}
#------------------------------------------------------------------------------------------
pZANBI <- function(q, mu = 1, sigma = 1, nu = 0.3, lower.tail = TRUE, log.p = FALSE)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", ""))
if (any(nu <= 0)|any(nu >= 1)) #In this parametrization nu = alpha
stop(paste("nu must be between 0 and 1 ", "\n", ""))
if (any(q < 0) ) stop(paste("y must be >=0", "\n", ""))
if (length(sigma)>1) cdf1 <- ifelse(sigma>0.0001, pnbinom(q, size=1/sigma, mu=mu),
ppois(q, lambda = mu) )
else cdf1 <- if (sigma<0.0001) ppois(q, lambda = mu)
else pnbinom(q, size=1/sigma, mu=mu)
if (length(sigma)>1) cdf0 <- ifelse(sigma>0.0001, pnbinom(0, size=1/sigma, mu=mu),
ppois(0, lambda = mu) )
else cdf0 <- if (sigma<0.0001) ppois(0, lambda = mu)
else pnbinom(0, size=1/sigma, mu=mu)
# cdf <- rep(0,length(q))
# cdf1 <- ppois(q, lambda = mu, lower.tail = TRUE, log.p = FALSE)
# cdf2 <- ppois(0, lambda = mu, lower.tail = TRUE, log.p = FALSE)
cdf3 <- nu+((1-nu)*(cdf1-cdf0)/(1-cdf0))
cdf <- ifelse((q==0),nu, cdf3)
if(lower.tail == TRUE) cdf <- cdf else cdf <-1-cdf
if(log.p==FALSE) cdf <- cdf else cdf <- log(cdf)
cdf
}
#------------------------------------------------------------------------------------------
qZANBI <- function(p, mu = 1, sigma = 1, nu = 0.3, lower.tail = TRUE, log.p = FALSE)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", ""))
if (any(nu <= 0)|any(nu >= 1)) #In this parametrization nu = alpha
stop(paste("nu must be between 0 and 1 ", "\n", ""))
if (any(p < 0) | any(p > 1)) stop(paste("p must be between 0 and 1", "\n", ""))
if (log.p == TRUE) p <- exp(p) else p <- p
if (lower.tail == TRUE) p <- p else p <- 1 - p
pnew <- (p-nu)/(1-nu)
if (length(sigma)>1) cdf0 <- ifelse(sigma>0.0001, pnbinom(0, size=1/sigma, mu=mu),
ppois(0, lambda = mu) )
else cdf0 <- if (sigma<0.0001) ppois(0, lambda = mu)
else pnbinom(0, size=1/sigma, mu=mu)
pnew2 <- cdf0*(1-pnew) + pnew
pnew2 <- ifelse((pnew2 > 0 ),pnew2, 0)
if (length(sigma)>1) q <- ifelse(sigma>0.0001, qnbinom(pnew2, size=1/sigma, mu=mu),
qpois(pnew2, lambda = mu) )
else q <- if (sigma<0.0001) qpois(pnew2, lambda = mu)
else qnbinom(pnew2, size=1/sigma, mu=mu)
# q2 <- suppressWarnings(ifelse((pnew > 0 ), q, 0))
# suppressWarnings(q <- ifelse((pnew > 0 ), qpois(pnew, lambda = mu, ), 0))
q
}
#------------------------------------------------------------------------------------------
rZANBI <- function(n, mu = 1, sigma = 1, nu = 0.3)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", ""))
if (any(nu <= 0)|any(nu >= 1)) #In this parametrization nu = alpha
stop(paste("nu must be between 0 and 1 ", "\n", ""))
if (any(n <= 0)) stop(paste("n must be a positive integer", "\n", ""))
n <- ceiling(n)
p <- runif(n)
r <- qZANBI(p, mu=mu, sigma=sigma, nu=nu)
r
}
#------------------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/ZANBI.r
|
#------------------------------------------------------------------------------------------
# this is new ZAP, the Poisson Zero Adjusted distribution with extra probability for 0, in generic form
ZAP <- function (mu.link = "log", sigma.link = "logit")
{
mstats <- checklink("mu.link", "ZAP", substitute(mu.link),
c("1/mu^2", "log", "identity"))
dstats <- checklink("sigma.link", "ZAP", substitute(sigma.link),
c("logit", "probit", "cloglog", "cauchit", "log", "own"))
structure(
list(family = c("ZAP", "Poisson Zero Inflated"),
parameters = list(mu=TRUE, sigma=TRUE),
nopar = 2,
type = "Discrete",
mu.link = as.character(substitute(mu.link)),
sigma.link = as.character(substitute(sigma.link)),
mu.linkfun = mstats$linkfun,
sigma.linkfun = dstats$linkfun,
mu.linkinv = mstats$linkinv,
sigma.linkinv = dstats$linkinv,
mu.dr = mstats$mu.eta,
sigma.dr = dstats$mu.eta,
dldm = function(y,mu,sigma) {dldm0 <- PO()$dldm(y,mu) + dPO(0,mu)*PO()$dldm(0,mu)/(1-dPO(0,mu))
dldm <- ifelse(y==0, 0 , dldm0)
dldm},
d2ldm2 = function(y,mu,sigma) {dldm0 <- PO()$dldm(y,mu) + dPO(0,mu)*PO()$dldm(0,mu)/(1-dPO(0,mu))
dldm <- ifelse(y==0, 0 , dldm0)
d2ldm2 <- -dldm*dldm
d2ldm2},
dldd = function(y,mu,sigma) {dldd <- ifelse(y==0, 1/sigma, -1/(1-sigma))
dldd},
d2ldd2 = function(y,mu,sigma) {d2ldd2 <- -1/(sigma*(1-sigma))
d2ldd2},
d2ldmdd = function(y,mu,sigma) {d2ldmdd <- 0
d2ldmdd
},
G.dev.incr = function(y,mu,sigma,...) -2*dZAP(y,mu,sigma,log=TRUE),
rqres = expression(rqres(pfun="pZAP", type="Discrete", ymin=0, y=y, mu=mu, sigma=sigma)) ,
mu.initial = expression(mu <- (y+mean(y))/2), #rep(mean(y),length(y)) ),
sigma.initial = expression(sigma <-rep(0.3, length(y))),
mu.valid = function(mu) all(mu > 0) ,
sigma.valid = function(sigma) all(sigma > 0 & sigma < 1),
y.valid = function(y) all(y >= 0)
),
class = c("gamlss.family","family"))
}
#------------------------------------------------------------------------------------------
dZAP<-function(x, mu = 5, sigma = 0.1, log = FALSE)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0", "\n", ""))
if (any(sigma <= 0) | any(sigma >= 1) ) stop(paste("sigma must be between 0 and 1", "\n", ""))
if (any(x < 0) ) stop(paste("x must be 0 or greater than 0", "\n", ""))
logfy <- rep(0, length(x))
logfy <- ifelse((x==0), log(sigma), log(1-sigma) + dPO(y,mu,log=T) - log(1-dPO(0,mu)) )
if(log == FALSE) fy <- exp(logfy) else fy <- logfy
fy
}
#------------------------------------------------------------------------------------------
pZAP <- function(q, mu = 5, sigma = 0.1, lower.tail = TRUE, log.p = FALSE)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0", "\n", ""))
if (any(sigma <= 0) | any(sigma >= 1) ) stop(paste("sigma must be between 0 and 1", "\n", ""))
if (any(q < 0) ) stop(paste("y must be 0 or greater than 0", "\n", ""))
cdf <- rep(0,length(q))
cdf1 <- ppois(q, lambda = mu, lower.tail = TRUE, log.p = FALSE)
cdf2 <- ppois(0, lambda = mu, lower.tail = TRUE, log.p = FALSE)
cdf3 <- sigma+((1-sigma)*(cdf1-cdf2)/(1-cdf2))
cdf <- ifelse((q==0),sigma, cdf3)
if(lower.tail == TRUE) cdf <- cdf else cdf <-1-cdf
if(log.p==FALSE) cdf <- cdf else cdf <- log(cdf)
cdf
}
#-----------------------------------------------------------------------------------------
qZAP <- function(p, mu = 5, sigma = 0.1, lower.tail = TRUE, log.p = FALSE)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0", "\n", ""))
if (any(p <= 0) | any(p >= 1)) stop(paste("p must be between 0 and 1", "\n", ""))
if (log.p == TRUE) p <- exp(p) else p <- p
if (lower.tail == TRUE) p <- p else p <- 1 - p
pnew <- (p-sigma)/(1-sigma)
pnew2 <- ppois(0, lambda = mu, lower.tail = TRUE, log.p = FALSE)*(1-pnew) + pnew
suppressWarnings(q <- ifelse((pnew > 0 ), qpois(pnew2, lambda = mu, ), 0))
q
}
#-----------------------------------------------------------------------------------------
rZAP <- function(n, mu=5, sigma=0.1)
{
if (any(mu <= 0) ) stop(paste("mu must greated than 0", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must greated than 0", "\n", ""))
if (any(n <= 0)) stop(paste("n must be a positive integer", "\n", ""))
n <- ceiling(n)
p <- runif(n)
r <- qZAP(p, mu = mu, sigma = sigma)
r
}
#-----------------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/ZAP.r
|
# ---------------------------------------------------------------------------------------
# zero inflated negative binomial type I (with probability y=0 is nu) 01/03/10
# ---------------------------------------------------------------------------------------
ZINBI = function (mu.link = "log", sigma.link = "log", nu.link = "logit")
{
mstats <- checklink("mu.link", "ZINBI", substitute(mu.link),
c("inverse", "log", "identity"))
dstats <- checklink("sigma.link", "ZINBI", substitute(sigma.link),
c("inverse", "log", "identity"))
vstats <- checklink("nu.link", "ZINBI", substitute(nu.link),
c("logit", "probit", "cloglog", "log", "own"))
structure(list(family = c("ZINBI", "Zero inflated negative binomial type I"),
parameters = list(mu = TRUE, sigma = TRUE, nu = TRUE),
nopar = 3,
type = "Discrete",
mu.link = as.character(substitute(mu.link)),
sigma.link = as.character(substitute(sigma.link)),
nu.link = as.character(substitute(nu.link)),
mu.linkfun = mstats$linkfun,
sigma.linkfun = dstats$linkfun,
nu.linkfun = vstats$linkfun,
mu.linkinv = mstats$linkinv,
sigma.linkinv = dstats$linkinv,
nu.linkinv = vstats$linkinv,
mu.dr = mstats$mu.eta,
sigma.dr = dstats$mu.eta,
nu.dr = vstats$mu.eta,
dldm = function(y,mu,sigma,nu) {dldm0 <- (1-nu)*((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*dNBI(0,mu,sigma)*NBI()$dldm(0,mu,sigma)
dldm <- ifelse(y==0, dldm0, NBI()$dldm(y,mu,sigma))
dldm},
d2ldm2 = function(y,mu,sigma,nu) {dldm0 <- (1-nu)*((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*dNBI(0,mu,sigma)*NBI()$dldm(0,mu,sigma)
dldm <- ifelse(y==0, dldm0, NBI()$dldm(y,mu,sigma))
d2ldm2 <- -dldm*dldm
d2ldm2 <- ifelse(d2ldm2 < -1e-15, d2ldm2,-1e-15)
d2ldm2},
dldd = function(y,mu,sigma,nu) {dldd0 <- (1-nu)*((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*dNBI(0,mu,sigma)*NBI()$dldd(0,mu,sigma)
dldd <- ifelse(y==0, dldd0, NBI()$dldd(y,mu,sigma))
dldd},
d2ldd2 = function(y,mu,sigma,nu) {dldd0 <- (1-nu)*((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*dNBI(0,mu,sigma)*NBI()$dldd(0,mu,sigma)
dldd <- ifelse(y==0, dldd0, NBI()$dldd(y,mu,sigma))
d2ldd2 <- -dldd^2
d2ldd2 <- ifelse(d2ldd2 < -1e-15, d2ldd2,-1e-15)
d2ldd2},
dldv = function(y,mu,sigma,nu) {dldv0 <- ((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*(1-dNBI(0,mu,sigma))
dldv <- ifelse(y==0, dldv0, -1/(1-nu))
dldv},
d2ldv2 = function(y,mu,sigma,nu) {dldv0 <- ((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*(1-dNBI(0,mu,sigma))
dldv <- ifelse(y==0, dldv0, -1/(1-nu))
d2ldv2 <- -dldv^2
d2ldv2 <- ifelse(d2ldv2 < -1e-15, d2ldv2,-1e-15)
d2ldv2},
d2ldmdd = function(y) {dldm0 <- (1-nu)*((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*dNBI(0,mu,sigma)*NBI()$dldm(0,mu,sigma)
dldm <- ifelse(y==0, dldm0, NBI()$dldm(y,mu,sigma))
dldd0 <- (1-nu)*((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*dNBI(0,mu,sigma)*NBI()$dldd(0,mu,sigma)
dldd <- ifelse(y==0, dldd0, NBI()$dldd(y,mu,sigma))
d2ldm2<--dldm*dldd
d2ldm2},
d2ldmdv = function(y) { #partial derivate of log-density respect to mu and alpha
dldm0 <- (1-nu)*((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*dNBI(0,mu,sigma)*NBI()$dldm(0,mu,sigma)
dldm <- ifelse(y==0, dldm0, NBI()$dldm(y,mu,sigma))
dldv0 <- ((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*(1-dNBI(0,mu,sigma))
dldv <- ifelse(y==0, dldv0, -1/(1-nu))
d2ldmdv <- -dldm*dldv
d2ldmdv
},
d2ldddv = function(y) { #partial derivate of log-density respect to sigma and alpha
dldd0 <- (1-nu)*((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*dNBI(0,mu,sigma)*NBI()$dldd(0,mu,sigma)
dldd <- ifelse(y==0, dldd0, NBI()$dldd(y,mu,sigma))
dldv0 <- ((nu+(1-nu)*dNBI(0,mu,sigma))^(-1))*(1-dNBI(0,mu,sigma))
dldv <- ifelse(y==0, dldv0, -1/(1-nu))
d2ldddv <- -dldd*dldv
d2ldddv
},
G.dev.incr = function(y,mu,sigma,...) -2*dZINBI(y, mu = mu, sigma = sigma, nu=nu, log = TRUE),
rqres = expression(
rqres(pfun="pZINBI", type="Discrete", ymin=0, y=y, mu=mu, sigma=sigma)
),
mu.initial = expression(mu <- (y + mean(y))/2),
## mu.initial = expression(mu <- y+0.5),
sigma.initial = expression(
sigma <- rep( max( ((var(y)-mean(y))/(mean(y)^2)),0.1),length(y))),
nu.initial = expression(nu <- rep(0.3, length(y))),
mu.valid = function(mu) all(mu > 0) ,
sigma.valid = function(sigma) all(sigma > 0),
nu.valid = function(nu) all(nu > 0 & nu < 1),
y.valid = function(y) all(y >= 0)
),
class = c("gamlss.family","family"))
}
#-------------------------------------------------------------------------------------------
dZINBI<-function(x, mu = 1, sigma = 1, nu = 0.3, log = FALSE)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", ""))
if (any(nu <= 0)|any(nu >= 1)) stop(paste("nu must be between 0 and 1 ", "\n", ""))
if (any(x < 0) ) stop(paste("x must be >=0", "\n", ""))
if (length(sigma)>1) fy <- ifelse(sigma>0.0001, dnbinom(x, size=1/sigma, mu = mu, log = T),
dPO(x, mu = mu, log = T) )
else fy <- if (sigma<0.0001) dPO(x, mu = mu, log = T)
else dnbinom(x, size=1/sigma, mu = mu, log = T)
logfy <- rep(0, length(x))
logfy <- ifelse((x==0), log(nu+(1-nu)*exp(fy)), (log(1-nu) + fy ))
if(log == FALSE) fy2 <- exp(logfy) else fy2 <- logfy
fy2
}
#------------------------------------------------------------------------------------------
pZINBI <- function(q, mu = 1, sigma = 1, nu = 0.3, lower.tail = TRUE, log.p = FALSE)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", ""))
if (any(nu <= 0)|any(nu >= 1)) #In this parametrization nu = alpha
stop(paste("nu must be between 0 and 1 ", "\n", ""))
if (any(q < 0) ) stop(paste("y must be >=0", "\n", ""))
if (length(sigma)>1) cdf <- ifelse(sigma>0.0001, pnbinom(q, size=1/sigma, mu=mu),
ppois(q, lambda = mu) )
else cdf <- if (sigma<0.0001) ppois(q, lambda = mu)
else pnbinom(q, size=1/sigma, mu=mu)
cdf <- nu + (1-nu)*cdf
if(lower.tail == TRUE) cdf <- cdf else cdf <-1-cdf
if(log.p==FALSE) cdf <- cdf else cdf <- log(cdf)
cdf
}
#------------------------------------------------------------------------------------------
qZINBI <- function(p, mu = 1, sigma = 1, nu = 0.3, lower.tail = TRUE, log.p = FALSE)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", ""))
if (any(nu <= 0)|any(nu >= 1)) #In this parametrization nu = alpha
stop(paste("nu must be between 0 and 1 ", "\n", ""))
if (any(p < 0) | any(p > 1)) stop(paste("p must be between 0 and 1", "\n", ""))
if (log.p == TRUE) p <- exp(p) else p <- p
if (lower.tail == TRUE) p <- p else p <- 1 - p
pnew <- (p-nu)/(1-nu)
pnew <- ifelse((pnew > 0 ),pnew, 0)
if (length(sigma)>1) q <- ifelse(sigma>0.0001, qnbinom(pnew, size=1/sigma, mu=mu, lower.tail=lower.tail, log.p=log.p),
qpois(pnew, lambda = mu, lower.tail = lower.tail, log.p = log.p) )
else q <- if (sigma<0.0001) qpois(pnew, lambda = mu, lower.tail = lower.tail, log.p = log.p)
else qnbinom(pnew, size=1/sigma, mu=mu, lower.tail=lower.tail, log.p=log.p)
# q2 <- suppressWarnings(ifelse((pnew > 0 ), q, 0))
# suppressWarnings(q <- ifelse((pnew > 0 ), qpois(pnew, lambda = mu, ), 0))
q
}
#------------------------------------------------------------------------------------------
rZINBI <- function(n, mu = 1, sigma = 1, nu = 0.3)
{
if (any(mu <= 0) ) stop(paste("mu must be greater than 0 ", "\n", ""))
if (any(sigma <= 0) ) stop(paste("sigma must be greater than 0 ", "\n", ""))
if (any(nu <= 0)|any(nu >= 1)) #In this parametrization nu = alpha
stop(paste("nu must be between 0 and 1 ", "\n", ""))
if (any(n <= 0)) stop(paste("n must be a positive integer", "\n", ""))
n <- ceiling(n)
p <- runif(n)
r <- qZINBI(p, mu=mu, sigma=sigma, nu=nu)
r
}
#------------------------------------------------------------------------------------------
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/ZINBI.r
|
# R script for Stata abic command
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# and Hilbe, Logistic Regression Models, Chapman & Hall/CRC
model <- <insert model name>
obs <- model$df.null + 1
aic <- model$aic
xvars <- model$rank
rdof <- model$df.residual
aic_n <- aic/obs
ll <- xvars - aic/2
bic_r <- model$deviance - (rdof * log(obs)
bic_l <- -2*ll + xvars * log(obs)
bic_qh <- -2*(ll - xvars * log(xvars))/obs
aic; aic_n; bic_l; bic_qh
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/abic.r
|
binegbin.glm=function(y1,y2,x1,x2){
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# author: Masakazu Iwasaki, Clinical Statistics Group, Schering-Plough K.K., Tokyo
#
n=length(y1)
p1=ncol(x1)
p2=ncol(x2)
p=p1+p2
print(p)
eta1=eta(y1)
eta2=eta(y2)
beta1=qr.solve(t(x1)%*%x1)%*%t(x1)%*%eta1
beta2=qr.solve(t(x2)%*%x2)%*%t(x2)%*%eta2
beta=rbind(beta1,beta2)
print(beta)
mu1=inveta(x1%*%beta1)
mu2=inveta(x2%*%beta2)
# initial value
phi=sum((y1-mu1)^2+(y2-mu2)^2-(mu1+mu2))/sum(mu1^2+mu2^2)
alpha=sum((y1-mu1)*(y2-mu2))/(phi*sum(mu1*mu2))
for(j in 1:100){
FI=matrix(0,p,p)
Q=matrix(0,p,1)
for(i in 1:n){
xi1=c(x1[i,],rep(0,p2))
xi2=c(rep(0,p1),x2[i,])
xi=cbind(xi1,xi2)
yi=c(y1[i],y2[i])
mui=c(mu1[i],mu2[i])
#phii=((y1[i]-mu1[i])^2+(y2[i]-mu2[i])^2-(mu1[i]+mu2[i]))/(mu1[i]^2+mu2[i]^2)
#alphai=c(y1[i]-mu1[i])*(y2[i]-mu2[i])/(phii*mu1[i]*mu2[i])
fi=detadmu(mui)
FI=FI+xi%*%qr.solve(fi%*%sigma(mui,phi,alpha)%*%fi,tol=1e-20)%*%t(xi)
Q=Q+xi%*%qr.solve(sigma(mui,phi,alpha)%*%fi,tol=1e-20)%*%(yi-mui)
}
dbeta=qr.solve(FI,tol=1e-20)%*%Q
beta=beta+dbeta
beta1=beta[1:p1]
beta2=beta[(p1+1):(p1+p2)]
mu1=inveta(x1%*%beta1)
mu2=inveta(x2%*%beta2)
phi=sum((y1-mu1)^2+(y2-mu2)^2-(mu1+mu2))/sum(mu1^2+mu2^2)
alpha=sum((y1-mu1)*(y2-mu2))/(phi*sum(mu1*mu2))
lammda=(1+alpha)/(phi*alpha)
#print(phi)
#print(alpha)
#print(lammda)
}
#print(y1)
#print(mu1)
#print(y2)
#print(mu2)
#cormu=cor(mu1,mu2)
#cory=cor(y1,y2)
#print(cormu)
#print(cory)
dy1=y1-mu1
dy2=y2-mu2
#meany1_mean(y1)
#meany2_mean(y2)
meand1=mean(dy1)
meand2=mean(dy2)
stdd1=sd(dy1)
stdd2=sd(dy2)
meanmu1=mean(mu1)
meanmu2=mean(mu2)
mse1=sum(dy1^2)/(n-1)
mse2=sum(dy2^2)/(n-1)
#print(meany1)
#print(meany2)
#print(meand1)
#print(meand2)
#print(stdd1)
#print(stdd2)
asymcov=solve(FI)
list(beta1,beta2,phi,alpha,lammda,asymcov,meand1,meand2,mse1,mse2,meanmu1,meanmu2)
#list(beta1,beta2,phi,alpha,lammda,mu1,mu2,asymcov,meand1,meand2,mse1,mse2,meanmu1,meanmu2)
}
eta=function(mu){
log(mu)
}
inveta=function(eta){
exp(eta)
}
detadmu=function(mu){
diag(c(1/mu[1],1/mu[2]))
}
sigma=function(mu,phi,alpha){
matrix(c(mu[1]+phi*mu[1]^2,alpha*phi*mu[1]*mu[2],alpha*phi*mu[1]*mu[2],mu[2]+phi*mu[2]^2),2,2)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/binegbin_new.r
|
# geo_rng.r
# Table 10.2: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
# Synthetic geometric regression
#
library(MASS)
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
xb <- 2 + .75*x1 - 1.25*x2 # parameter values
exb <- exp(xb) # Poisson predicted value
xg <- rgamma(n = nobs, shape = 1, rate = 1) # gamma variate, param 1,1
xbg <-exb*xg # mix Poisson and gamma variates
gy <- rpois(nobs, xbg) # generate NB2 variates
geo <-glm.nb(gy ~ x1 + x2) # model geometric
summary(geo)
nbg <- glm(gy ~ x1 + x2,, family=negative.binomial(1))
summary(nbg) # GLM NB2 with ?=1
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/geo_rng.r
|
# hilbe.obspred.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Table 6.15 LOS : observed vs predicted and difference days 0-25
# ammend as needed for own data
#
library(MASS)
rm(list=ls())
load("c://source/medpar.RData")
mdpar <- glm(los ~ hmo+white+type2+type3, family=poisson, data=medpar)
mu <- fitted(mdpar)
avgp <- sapply(0:25, function(i) mean(exp(-mu)*(mu^i)/factorial(i)))
propObsv <- with(subset(medpar, los < 26), table(los) / nrow(medpar))
Diff <- c(0,propObsv)*100 - avgp[1:25]*100
data.frame(LOS=0:25, ObsProp=c(0,propObsv)*100, avgp*100, Diff)
# ==== GRAPH OF OBSERVED VS PREDICTED LOS =======================
plot(0:25, avgp, type="b", xlim=c(0,25),
main = "Observed vs Predicted Days",
xlab = "Days in Hospital", ylab = "Probability of LOS")
lines(0:25, c(0,propObsv), type = "b", pch = 2)
legend("topright", legend = c("Predicted Days","Observed Days"),
lty = c(1,1), pch = c(1,2))
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/hilbe.obspred.r
|
# NB1 maximum likelihood function J Hilbe 11Apr 2010
ml.nb1 <- function(formula, data, start = NULL, verbose = FALSE) {
mf <- model.frame(formula, data)
mt <- attr(mf, "terms")
y <- model.response(mf, "numeric")
nb1X <- model.matrix(formula, data = data)
nb1.reg.ml <- function(b.hat, X, y) {
a.hat <- b.hat[1]
xb.hat <- X %*% b.hat[-1]
mu.hat <- exp(xb.hat)
r.hat <- (1/a.hat) * mu.hat
sum(dnbinom(y,
size = r.hat,
mu = mu.hat,
log = TRUE))
}
if (is.null(start))
start <- c(0.5, -1, rep(0, ncol(nb1X) - 1))
fit <- optim(start,
nb1.reg.ml,
X = nb1X,
y = y,
control = list(
fnscale = -1,
maxit = 10000),
hessian = TRUE
)
if (verbose | fit$convergence > 0) print(fit)
beta.hat <- fit$par
se.beta.hat <- sqrt(diag(solve(-fit$hessian)))
results <- data.frame(Estimate = beta.hat,
SE = se.beta.hat,
Z = beta.hat / se.beta.hat,
LCL = beta.hat - 1.96 * se.beta.hat,
UCL = beta.hat + 1.96 * se.beta.hat)
rownames(results) <- c("alpha", colnames(nb1X))
results <- results[c(2:nrow(results), 1),]
return(results)
}
load("c://source/medpar.RData")
medpar$type <- factor(medpar$type)
med.nb1 <- ml.nb1(los ~ hmo + white + type, data = medpar)
med.nb1
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/ml.nb1.r
|
# NB2 maximum likelihood function J Hilbe 11Apr 2010
ml.nb2 <- function(formula, data, start = NULL, verbose = FALSE) {
mf <- model.frame(formula, data)
mt <- attr(mf, "terms")
y <- model.response(mf, "numeric")
nb2X <- model.matrix(formula, data = data)
nb2.reg.ml <- function(b.hat, X, y) {
a.hat <- b.hat[1]
xb.hat <- X %*% b.hat[-1]
mu.hat <- exp(xb.hat)
r.hat <- 1 / a.hat
sum(dnbinom(y,
size = r.hat,
mu = mu.hat,
log = TRUE))
}
if (is.null(start))
start <- c(0.5, -1, rep(0, ncol(nb2X) - 1))
fit <- optim(start,
nb2.reg.ml,
X = nb2X,
y = y,
control = list(
fnscale = -1,
maxit = 10000),
hessian = TRUE
)
if (verbose | fit$convergence > 0) print(fit)
beta.hat <- fit$par
se.beta.hat <- sqrt(diag(solve(-fit$hessian)))
results <- data.frame(Estimate = beta.hat,
SE = se.beta.hat,
Z = beta.hat / se.beta.hat,
LCL = beta.hat - 1.96 * se.beta.hat,
UCL = beta.hat + 1.96 * se.beta.hat)
rownames(results) <- c("alpha", colnames(nb2X))
results <- results[c(2:nrow(results), 1),]
return(results)
}
load("c://source/medpar.RData")
medpar$type <- factor(medpar$type)
med.nb2 <- ml.nb2(los ~ hmo + white + type, data = medpar)
med.nb2
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/ml.nb2.r
|
ml.nbc <- function(formula, data, start = NULL, verbose = FALSE) {
mf <- model.frame(formula, data)
mt <- attr(mf, "terms")
y <- model.response(mf, "numeric")
nbcX <- model.matrix(formula, data = data)
nbc.reg.ml <- function(b.hat, X, y) {
a.hat <- b.hat[1]
xb.hat <- X %*% b.hat[-1]
mu.hat <- 1 / ((exp(-xb.hat)-1)*a.hat)
p.hat <- 1 / (1 + a.hat*mu.hat)
r.hat <- 1 / a.hat
sum(dnbinom(y,
size = r.hat,
prob = p.hat,
log = TRUE))
}
if (is.null(start))
start <- c(0.5, -1, rep(0, ncol(nbcX) - 1))
fit <- optim(start,
nbc.reg.ml,
X = nbcX,
y = y,
control = list(
fnscale = -1,
maxit = 10000),
hessian = TRUE
)
if (verbose | fit$convergence > 0) print(fit)
beta.hat <- fit$par
se.beta.hat <- sqrt(diag(solve(-fit$hessian)))
results <- data.frame(Estimate = beta.hat,
SE = se.beta.hat,
Z = beta.hat / se.beta.hat,
LCL = beta.hat - 1.96 * se.beta.hat,
UCL = beta.hat + 1.96 * se.beta.hat)
rownames(results) <- c("alpha", colnames(nbcX))
results <- results[c(2:nrow(results), 1),]
return(results)
}
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/ml.nbc.r
|
# modelfit function to calc AIC and BIC statistics post estimation
# Joseph M. Hilbe 12January, 2010
modelfit <- function(x) {
obs <- x$df.null + 1
aic <- x$aic
xvars <- x$rank
rdof <- x$df.residual
aic_n <- aic/obs
ll <- xvars - aic/2
bic_r <- x$deviance - (rdof * log(obs))
bic_l <- -2*ll + xvars * log(obs)
bic_qh <- -2*(ll - xvars * log(xvars))/obs
return(list("AIC" = aic, "AICn" = aic_n, "BIC" = bic_l, "BICqh" = bic_qh))
}
modelfit(x) # substitute fitted model name for x
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/modelfit.r
|
# myTable.r - Frequency count and percentage table
# Table 9.40 : Hilbe, JM (2011) Negative Binomial Regression, 2nd ed, Cambridge Univ Press
library(MASS)
# load("c://source/mdvis.RData")
# numvisit <- mdvis$numvisit
myTable <- function(x) {
myDF <- data.frame( table(x) )
myDF$Prop <- prop.table( myDF$Freq )
myDF$CumProp <- cumsum( myDF$Prop )
myDF
}
# myTable(numvisit)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/myTable.r
|
# mysim.r
# Table 6.5 : Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
# Monte Carlo simulation - Poisson
mysim <- function()
{
nobs <- 50000
x1 <-runif(nobs)
x2 <-runif(nobs)
py <- rpois(nobs, exp(2 + .75*x1 - 1.25*x2))
poi <- glm(py ~ x1 + x2, family=poisson)
pr <- sum(residuals(poi, type="pearson")^2)
prdisp <- pr/poi$df.residual
beta <- poi$coef
list(beta,prdisp)
}
B <- replicate(100, mysim())
apply(matrix(unlist(B[1,]),3,100),1,mean)
mean(unlist(B[2,]))
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/mysim.r
|
# nb.reg.ml.r
# Synthetic MLE Negative Binomial NB2 data and model
# Table 9.11: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
# with assistance of: Andrew Robinson, University of Melbourne, Australia
#
set.seed(85132)
b <- c(5, 2, 3, 0.5) ## Population parameters
n <- 10000
X <- cbind(rlnorm(n), rlnorm(n)) ## Design matrix
y <- rnbinom(n = n, ## Choice of parameterization
mu = b[1] + b[2] * X[,1],
size = b[3] + b[4] * X[,2])
nb.reg.ml <- function(b.hat, X, y) { ## JCLL
sum(dnbinom(y,
mu = b.hat[1] + b.hat[2] * X[,1],
size = b.hat[3] + b.hat[4] * X[,2],
log = TRUE))
}
p.0 <- c(1,1,1,1) ## initial estimates
fit <- optim(p.0, ## Maximize the JCLL
nb.reg.ml,
X = X,
y = y,
control = list(fnscale = -1),
hessian = TRUE
)
stderr <- sqrt(diag(solve(-fit$hessian))) ## Asymptotic SEs
nbresults <- data.frame(fit$par, stderr)
nbresults
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/nb.reg.ml.r
|
rm(list=ls())
load("c://source/medpar.RData")
medpar$type <- factor(medpar$type)
nbc.reg.ml <- function(b.hat, X, y) {
a.hat <- b.hat[1]
xb.hat <- X %*% b.hat[-1]
mu.hat <- 1 / ((exp(-xb.hat)-1)*a.hat)
p.hat <- 1 / (1 + a.hat*mu.hat)
r.hat <- 1 / a.hat
sum(dnbinom(y,
size = r.hat,
prob = p.hat,
log = TRUE))
}
# Create the design matrix
nbcX <- model.matrix(~ hmo + white + type, data = medpar)
# Starting points (discovered by trial and error!)
p.0 <- c(alpha = 0.5,
cons = -1,
hmo = 0,
white = 0,
type2 = 0,
type3 = 0)
# Maximize the joint conditional LL
nbc.fit <- optim(p.0,
nbc.reg.ml,
X = nbcX,
y = medpar$los,
control = list(
fnscale = -1,
maxit = 10000),
hessian = TRUE
)
# and obtain the parameter estimates and asymptotic SE's by
(nbc.beta.hat <- nbc.fit$par)
(nbc.se.beta.hat <- sqrt(diag(solve(-nbc.fit$hessian))))
nbc.results <- data.frame(Estimate = nbc.beta.hat,
SE = nbc.se.beta.hat,
Z = nbc.beta.hat / nbc.se.beta.hat,
LCL = nbc.beta.hat - 1.96 * nbc.se.beta.hat,
UCL = nbc.beta.hat + 1.96 * nbc.se.beta.hat)
rownames(nbc.results) <- c("alpha", colnames(nbcX))
nbc.results <- nbc.results[c(2:nrow(nbc.results), 1),]
nbc.results
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/nbc.reg.ml.r
|
# nbr2_6_2_2.r Chanages to univariable models
# Table 6.6 Hilbe,JM (2011), Negative Binomial Regression, 2 ed, Cambridge Univ Press
nobs <- 50000
x1 <- runif(nobs)
y <-rpois(nobs, exp(1 + 0.5*x1))
poi <- glm(y ~ x1, family=poisson)
summary(poi)
mu <- predict(poi, type="response")
# change response y
y10 <- y*10
poi10 <- glm(y10 ~ x1, family=poisson)
summary(poi10)
mu10 <- predict(poi10, type="response")
# change predictor x1
x10 <- x1*10
poix <- glm(y ~ x10, family=poisson)
summary(poix)
mux10 <- predict(poix, type="response")
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/nbr2_6_2_2.r
|
# nbr2_6_2_3.r Chanages to multivariable models
# Table 6.7 Hilbe,JM (2011), Negative Binomial Regression, 2 ed, Cambridge Univ Press
nobs <- 50000
x1m <- runif(nobs)
ym <-rpois(nobs, exp(1 + 0.5*x1 -.25*x2))
poim <- glm(ym ~ x1 + x2, family=poisson)
summary(poim)
mum <- predict(poim, type="response")
# change response ym
ym10 <- ym*10
poim10 <- glm(ym10 ~ x1 + x2, family=poisson)
summary(poim10)
mum10 <- predict(poim10, type="response")
# change predictor x1
xm10 <- x1*10
poix10 <- glm(ym ~ xm10 + x2, family=poisson)
summary(poix10)
mumx10 <- predict(poix10, type="response")
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/nbr2_6_2_3.r
|
# nbr2_7_1.r
# Table 7.1: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
# Poisson with frequency table of observed counts
library(MASS)
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
x3 <- runif(nobs)
py <-rpois(nobs, exp(1 + 0.5*x1 - 0.75*x2 + 0.25*x3))
cnt <- table(py)
df <- data.frame( prop.table( table(py) ) )
df$cumulative <- cumsum( df$Freq )
dfall <- data.frame(cnt, df$Freq, df$cumulative)
dfall
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/nbr2_7_1.r
|
# nbsim.r
# Table 9.5: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
# Synthetic NB2 Monte Carlo estimation
#
library(MASS)
mysim <- function()
{
nobs <- 50000
x1 <-runif(nobs)
x2 <-runif(nobs)
xb <- 2 + .75*x1 - 1.25*x2
a <- .5
ia <- 1/.5
exb <- exp(xb)
xg <- rgamma(n = nobs, shape = a, rate = a)
xbg <-exb*xg
nby <- rpois(nobs, xbg)
nbsim <-glm.nb(nby ~ x1 + x2)
alpha <- nbsim$theta
pr <- sum(residuals(nbsim, type="pearson")^2)
prdisp <- pr/nbsim$df.residual
beta <- nbsim$coef
list(alpha,prdisp,beta)
}
B <- replicate(100, mysim())
mean(unlist(B[1,]))
mean(unlist(B[2,]))
apply(matrix(unlist(B[3,]),3,100),1,mean)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/nbsim.r
|
# p.reg.ml.r
# Poisson optimization
# Ch 6.2.1 : Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
set.seed(3357)
b <- c(5, 1, 0.5) ## Population parameters
n <- 10000
X <- cbind(1, rnorm(n), rnorm(n)) ## Design matrix
y <- rpois(n = n, lambda = X %*% b)
p.reg.ml <- function(b.hat, X, y) { ## Joint Conditional LL
sum(dpois(y, lambda = X %*% b.hat, log = TRUE))
}
p.0 <- lm.fit(X, y)$coef ## Obtain initial estimates
fit <- optim(p.0, ## Maximize JCLL
p.reg.ml,
X = X,
y = y,
control = list(fnscale = -1),
hessian = TRUE
)
stderr <- sqrt(diag(solve(-fit$hessian))) ## Asymptotic SEs
poiresults <- data.frame(fit$par, stderr)
poiresults
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/p.reg.ml.r
|
# syn.bin_logit.r Synthetic grouped logistic regression
# Table 9.22: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
d <- rep(1:5, each=10000, times=1)*100 # denominator
xb <- 2 + .75*x1 - 1.25*x2 # linear predictor; values
exb <- 1/(1+exp(-xb)) # fit; predicted prob
by <- rbinom(nobs, size = d, p = exb) # random binomial variate
dby <- d - by # denominator - numerator
gby <- glm(cbind(by,dby) ~ x1 + x2, family=binomial(link="logit"))
summary(gby) # displays model output
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.bin_logit.r
|
# syn.cgeo.r Synthetic canonical geometric regression
# Table 10.3: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
library(MASS)
nobs <- 50000
x2 <- runif(nobs)
x1 <- runif(nobs)
xb <- 1.25*x1 + .1*x2 - 1.5
mu <- 1/(exp(-xb)-1)
p <- 1/(1+mu)
r <- 1
gcy <- rnbinom(nobs, size=r, prob = p)
source("c://source/ml.nbc.r") # NB-C function
g2y <- ml.nbc(y ~ x1 + x2)
summary(g2y)
library(gamlss)
hnbii <- gamlss(y~ x1 + x2, family=NBII)
summary(hnbii)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.cgeo.r
|
# syn.geo.r Synthetic log-geometric regression
# Table 10.4: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
library(MASS)
nobs <- 50000
x2 <- runif(nobs)
x1 <- runif(nobs)
xb <- 2*x1 - .5*x2 - 1
exb <- exp(xb)
xg <- rgamma(n = nobs, shape = 1, rate = 1)
xbg <-exb*xg
gy <- rpois(nobs, xbg)
gnb2 <-glm.nb(gy ~ x1 + x2)
summary(gnb2)
gpy <- glm(gy ~ x1 + x2, family=poisson)
summary(gpy)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.geo.r
|
# syn.hurdle_lnb2.r snthetic logit-NB2 hurdle model
# Table 11.4 Hilbe,JM (2011), Negative Binomial Regression, 2 ed, Cambridge Univ Press
library(MASS)
library(pscl)
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
xb <- 2 + .75*x1 - 1.25*x2
a <- .5
ia <- 1/.5
exb <- exp(xb)
xg <- rgamma(n = nobs, shape = a, scale = a)
xbg <-exb*xg
nby <- rpois(nobs, xbg)
nbdata <- data.frame(nby, x1, x2)
nby <- nbdata[nbdata$nby!=0, ]
pi <- 1/(1+exp(-(.9*x1 + .1*x2 + .2)))
bern <- runif(nobs)>pi
bern <- as.numeric(bern)
jhObs <- which( nbdata$bern==0 ) # nbdata$nby <- ifelse(bern==0, 0, nbdata$nby)
nbdata$nby[jhObs] <- 0 # hy <- nbdata$nby
hy <- nby
hlnb2 <- hurdle(hy ~ x1 + x2, dist="negbin",
zero.dist= "binomial", link="logit", data=nbdata)
summary(hlnb2)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.hurdle_lnb2.r
|
# syn.logit.r Synthetic logistic regression
# Table 9.20: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
library(MASS)
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
xb <- 2 + .75*x1 - 1.25*x2 # linear predictor
exb <- 1/(1+exp(-xb)) # fit; predicted prob
by <- rbinom(nobs, size = 1, prob =exb) # random logit variates
lry <- glm(by ~ x1 + x2, family=binomial(link="logit"))
summary(lry)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.logit.r
|
# syn.nb1.r Synthetic NB1 regression
# Table 10.6: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
# Synthetic NB1 regression amend as needed
#
library(MASS)
library(gamlss)
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
xb <- 0.5 + 1.25*x1 - 1.5*x2
delta <- .5 # value assigned to delta
exb <-exp(xb)
idelta <- (1/delta)*exb
xg <-rgamma(n = 50000, shape = idelta, rate = idelta)
xbg <- exb*xg
nb1y <- rpois(50000, xbg)
jhgm <- gamlss(nbly ~ x1 + x2,family=NBII)
summary(jhgm)
# nb1 <- ml.nb1(los ~ hmo + white + type) # Table 10.8
# nb1
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.nb1.r
|
# syn.nb2.r Synthetic NB2
# Table 9.3: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
library(MASS)
nobs <- 50000
x1 <- qnorm(runif(nobs)) # random normal N[0,1] variate
x2 <- qnorm(runif(nobs)) # random normal N[0,1] variate
xb <- 2 + .75*x1 - 1.25*x2 # parameter values
a <- .5 # assign value to ancillary parameter
ia <- 1/.5 # invert alpha
exb <- exp(xb) # Poisson predicted value
xg <- rgamma(n = nobs, shaep = a, rate = a) # generate gamma variates given alpha
xbg <-exb*xg # mix Poisson and gamma variates
nby <- rpois(nobs, xbg) # generate NB2 variates
jhnb2 <-glm.nb(nby ~ x1 + x2) # model NB2
summary(jhnb2)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.nb2.r
|
# syn.nb2nb2fm.r Synthetic NB2-NB2 Finite Mixture model
# Table 13.3: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
library(gamlss.mx)
nobs <- 50000
x1 <- (runif(nobs))
x2 <- qnorm(runif(nobs))
xb1 <- 1 + .25*x1 - .75*x2
xb2 <- 2 + .75*x1 - 1.25*x2
a1 <- .5
a2 <- 1.5
ia1 <- 1/a1
ia2 <- 1/a2
exb1 <- exp(xb1)
exb2 <- exp(xb2)
xg1 <- rgamma(n = nobs, shape = a1, rate = a1)
xg2 <- rgamma(n = nobs, shape = a2, rate = a2)
xbg1 <-exb1*xg1
xbg2 <-exb2*xg2
nby1 <- rpois(nobs, xbg1)
nby2 <- rpois(nnobs, xbg2)
nbxnb <- nby2
nbxnb <- ifelse(runif(nobs) > .9, nby1, nbxnb)
nxn <- gamlssNP(nbxnb~x1+x2, random=~1,family=NBI, K=2)
summary(nxn)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.nb2nb2fm.r
|
# syn.nb2o.r Synthetic NB2 with offset
# Table 9.9: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
library(MASS)
x1 <- qnorm(runif(50000))
x2 <- qnorm(runif(50000))
off <- rep(1:5, each=10000, times=1)*100 # offset
loff <- log(off) # log of offset
xb<-2 + .75*x1 -1.25*x2 + loff # linear predictor
exb <-exp(xb) # inverse link
a <- .5 # assign value to alpha
ia <- 1/.5 # invert alpha
xg <- rgamma(n = 50000, shape = a, rate = a) # generate gamma variates w alpha
xbg <-exb*xg # mix Poisson and gamma variates
nbyo <- rpois(50000, xbg) # generate NB2 variates - w offset
nb2o <-glm.nb(nbyo ~ x1 + x2 + offset(loff)) # model NB2
summary(nb2o)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.nb2o.r
|
# syn.nbc.r
# Table 10.9: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
# with assistance of: Andrew Robinson, University of Melbourne, Australia
# Synthetic NB-C estimated using optimiation
#
library(MASS)
nobs <- 50000
x2 <- runif(nobs)
x1 <- runif(nobs)
a <- 1.15 # value of alpha: 1.15
xb <- 1.25*x1 + .1*x2 - 1.5
mu <- 1/((exp(-xb)-1)*a)
p <- 1/(1+a*mu)
r <- 1/a
nbcy <- rnbinom(50000, size=r, prob = p)
nbc.reg.ml <- function(b.hat, X, y) {
a.hat <- b.hat[1]
xb.hat <- X %*% b.hat[-1]
mu.hat <- 1 / ((exp(-xb.hat)-1)*a.hat)
p.hat <- 1 / (1 + a.hat*mu.hat)
r.hat <- 1 / a.hat
sum(dnbinom(y,
size = r.hat,
prob = p.hat,
log = TRUE))
}
nbcX <- cbind(1, x1, x2)
p.0 <- c(a.hat = 1,
b.0 = -2,
b.1 = 1,
b.2 = 1)
fit <- optim(p.0, ## Maximize the JCLL
nbc.reg.ml,
X = nbcX,
y = nbcy,
control = list(fnscale = -1),
hessian = TRUE
)
fit$par ## ML estimates
sqrt(diag(solve(-fit$hessian))) ## SEs
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.nbc.r
|
# syn.poisson.r
# Table 6.4 : Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
nobs <- 50000
x1 <- qnorm(runif(nobs))
x2 <- qnorm(runif(nobs))
py <-rpois(nobs, exp(2 + .75*x1 -1.25*x2))
jhpoi <-glm(py ~ x1 + x2, family=poisson)
summary(jhpoi)
confint(jhpoi)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.poisson.r
|
# syn.poissono.r Poisson with offset
# Table 6.20 : Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
off <- rep(1:5, each=10000, times=1)*100 # offset
loff <- log(off)
py <-rpois(nobs, exp(2 + .75*x1 -1.25*x2 + loff))
poir <-glm(py ~ x1 + x2 + offset(loff), family=poisson)
summary(poir)
confint(poir)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.poissono.r
|
# syn.ppfm.r Synthetic Poisson-Poisson Finite Mixture model
# Table 13.2: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
#
library(flexmix)
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
xb1 <- 1 + 0.25*x1 - 0.75*x2
xb2 <- 2 + 0.75*x1 - 1.25*x2
exb1 <- exp(xb2)
exb2 <- exp(xb1)
py1 <- rpois(nobs, exb2)
py2 <- rpois(nobs, exb1)
poixpoi <- py2
poixpoi <- ifelse(runif(nobs) > .9, py1, poixpoi)
pxp <- flexmix(poixpoi ~ x1 + x2, k=2,
model=FLXMRglm(family="poisson"))
summary(pxp)
parameters(pxp, component=1, model=1)
parameters(pxp, component=2, model=1)
library(MASS)
library(gamlss)
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
xb <- 0.5 + 1.25*x1 - 1.5*x2
delta <- .5 # value assigned to delta
exb <-exp(xb)
idelta <- (1/delta)*exb
xg <-rgamma(50000, idelta, idelta, 1/idelta)
xbg <- exb*xg
nb1y <- rpois(50000, xbg)
jhgm <- gamlss(nbly ~ x1 + x2,family=NBII)
summary(jhgm)
# nb1 <- ml.nb1(los ~ hmo + white + type) # Table 10.8
# nb1
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.ppfm.r
|
# syn.probit.r Synthetic probit regression
# Table 9.21: Hilbe, Negative Binomial Regression, 2 ed, Cambridge Univ Press
library(MASS)
nobs <- 50000
x1 <- runif(nobs)
x2 <- runif(nobs)
xb <- 2 + .75*x1 - 1.25*x2
exb <- pnorm(xb)
by <- rbinom(nobs, size = 1, prob =exb)
pry <- glm(by ~ x1 + x2, family=binomial(link="probit"))
summary(pry)
|
/scratch/gouwar.j/cran-all/cranData/COUNT/inst/HILBE_SCRIPTS/syn.probit.r
|
apple <- function(x, level, url, dir, verbose){
# check
if(is.null(x$key_apple_mobility))
return(x)
# sanitize url
if(is.logical(url)){
if(!url) return(x)
jurl <- "https://covid19-static.cdn-apple.com/covid19-mobility-data/current/v3/index.json"
json <- readLines(jurl, warn = FALSE)
base <- gsub('^(.*)basePath":"(.*?)"(.*)$', "\\2", json)
csv <- gsub('^(.*)csvPath":"(.*?)"(.*)$', "\\2", json)
url <- paste0("https://covid19-static.cdn-apple.com", base, csv)
}
# read
a <- data.table::fread(url, encoding = "UTF-8", na.strings = "", header = TRUE, showProgress = verbose)
# format
id.vars <- c("region", "sub-region", "transportation_type")
measure.vars <- which(grepl("^\\d{4}-\\d{2}-\\d{2}$", colnames(a)))
a <- suppressWarnings(data.table::melt(a, id.vars = id.vars, measure.vars = measure.vars, variable.name = "date"))
a <- data.table::dcast(a, region + `sub-region` + date ~ transportation_type, value.var = "value")
# date
a$date <- as.Date(a$date)
# key
a$key_apple_mobility <- a$region
idx <- which(!is.na(a$`sub-region`))
a$key_apple_mobility[idx] <- paste(a$region[idx], a$`sub-region`[idx], sep = ", ")
# subset
a <- a[,c("date", "key_apple_mobility", "driving", "transit", "walking")]
# return
join(x, a, on = c("date" = "date", "key_apple_mobility" = "key_apple_mobility"))
}
|
/scratch/gouwar.j/cran-all/cranData/COVID19/R/apple.R
|
#' COVID-19 Data Hub
#'
#' Download COVID-19 data from \url{https://covid19datahub.io}
#'
#' @param country vector of country names or \href{https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes}{ISO codes} (ISO 3166-1 Alpha-2 code, Alpha-3 code, or numeric code). By default, downloads data for all countries.
#' @param level integer. Granularity level. 1: country-level data. 2: state-level data. 3: lower-level data.
#' @param start,end the start and the end date of the period of interest. The data are subsetted to match this time range.
#' @param vintage date. This parameter allows to retrieve the snapshot of the dataset that was available on the given date. This typically differs from subsetting the latest data, as most governments are updating the data retroactively. Available since 2020-04-14.
#' @param wb character vector of \href{https://data.worldbank.org}{World Bank} indicator codes. See details.
#' @param gmr link to the \href{https://www.google.com/covid19/mobility/}{Google Mobility Report} dataset, or \code{TRUE}. See details.
#' @param amr link to the \href{https://covid19.apple.com/mobility}{Apple Mobility Report} dataset, or \code{TRUE}. See details.
#' @param dir folder where the data files are to be downloaded.
#' @param verbose logical. Print on progress? Default \code{TRUE}.
#' @param ... backward compatibility, not used.
#'
#' @details
#'
#' Country-level covariates by \href{https://data.worldbank.org}{World Bank Open Data} can be added via the argument \code{wb}.
#' This is a character vector of indicator codes to download.
#' The codes can be found by inspecting the corresponding URL.
#' For example, the code of the indicator "Hospital beds (per 1,000 people)" available at \url{https://data.worldbank.org/indicator/SH.MED.BEDS.ZS} is \code{SH.MED.BEDS.ZS}.
#' The indicators are typically available at a yearly frequency.
#' This function returns the latest data available between the \code{start} and the \code{end} date.
#'
#' Mobility data by \href{https://www.google.com/covid19/mobility/}{Google Mobility Reports} can be added via the argument \code{gmr}.
#' This is the link to the Google "CSV by geographic area" ZIP folder.
#' At the time of writing, the link is \url{https://www.gstatic.com/covid19/mobility/Region_Mobility_Report_CSVs.zip}.
#' As the link has been stable since the beginning of the pandemic, the function accepts \code{gmr=TRUE} to automatically use this link.
#'
#' As of April 14, 2022, Apple is no longer providing COVID-19 \href{https://covid19.apple.com/mobility}{mobility trends reports}.
#' If you have downloaded the data file previously, you can still use it by setting \code{amr="path/to/file.csv"}.
#'
#' Refer to \href{https://covid19datahub.io/reference/index.html}{this webpage} for the details on the data sources, and
#' \href{https://covid19datahub.io/news/index.html}{see the changelog} for the latest news about the dataset.
#'
#' @return \code{data.frame}. See the \href{https://covid19datahub.io/articles/docs.html}{dataset documentation}
#'
#' @examples
#' \dontrun{
#'
#' # Worldwide data by country
#' x <- covid19()
#'
#' # Worldwide data by state
#' x <- covid19(level = 2)
#'
#' # Data for specific countries by county/province
#' x <- covid19(c("Italy", "US"), level = 3)
#'
#' # Retrieve the data that were available on 15 May, 2020
#' x <- covid19(vintage = "2020-05-15")
#'
#' # Download the files in the folder "data"
#' dir.create("data")
#' x <- covid19(dir = "data")
#'
#' # World Bank data
#' wb <- c("gdp" = "NY.GDP.MKTP.CD", "hosp_beds" = "SH.MED.BEDS.ZS")
#' x <- covid19(wb = wb)
#'
#' # Google Mobility Reports
#' x <- covid19(gmr = TRUE)
#'
#' # Apple Mobility Reports
#' x <- covid19(amr = "path/to/file.csv")
#'
#' }
#'
#' @source \url{https://covid19datahub.io}
#'
#' @references
#' Guidotti, E., Ardia, D., (2020), "COVID-19 Data Hub", Journal of Open Source Software 5(51):2376, \doi{10.21105/joss.02376}.
#'
#' Guidotti, E., (2022), "A worldwide epidemiological database for COVID-19 at fine-grained spatial resolution", Sci Data 9(1):112, \doi{10.1038/s41597-022-01245-1}.
#'
#' @note
#' We have invested a lot of time and effort in creating \href{https://covid19datahub.io}{COVID-19 Data Hub}, please:
#'
#' \itemize{
#' \item cite \href{https://joss.theoj.org/papers/10.21105/joss.02376}{Guidotti and Ardia (2020)} when using \href{https://covid19datahub.io}{COVID-19 Data Hub}.
#' \item place the URL \url{https://covid19datahub.io} in a footnote to help others find \href{https://covid19datahub.io}{COVID-19 Data Hub}.
#' \item you assume full risk for the use of \href{https://covid19datahub.io}{COVID-19 Data Hub}.
#' We try our best to guarantee the data quality and consistency and the continuous filling of the Data Hub.
#' However, it is free software and comes with ABSOLUTELY NO WARRANTY.
#' Reliance on \href{https://covid19datahub.io}{COVID-19 Data Hub} for medical guidance or use of \href{https://covid19datahub.io}{COVID-19 Data Hub} in commerce is strictly prohibited.
#' }
#'
#' @importFrom data.table :=
#'
#' @export
#'
covid19 <- function(country = NULL,
level = 1,
start = "2010-01-01",
end = Sys.Date(),
vintage = NULL,
wb = NULL,
gmr = NULL,
amr = NULL,
dir = tempdir(),
verbose = TRUE,
...){
oo <- options(timeout = 0)
on.exit(options(oo))
if(any(!level %in% 1:3))
stop("'level' must be one of 1, 2, 3 or a combination of those.")
if(is.logical(vintage)){
if(!vintage){
vintage <- NULL
}
else{
vintage <- end
warning(sprintf("Using vintage='%s' (end date). See ?covid19 for the new usage of the 'vintage' parameter.", vintage))
}
}
if(is.null(vintage)){
if(is.null(country) | all(level==1)){
x <- data.table::rbindlist(fill = TRUE, lapply(level, function(i){
url <- endpoint("level/", i, ".csv.gz")
read.gz(url, dir = dir, verbose = verbose)
}))
x <- filter(x, country = country, level = level, start = start, end = end)
}
else{
url <- endpoint("country/index.csv.gz")
map <- read.gz(url, dir = dir, verbose = verbose)
iso <- map$iso_alpha_3[
map$name %in% country |
map$iso_alpha_3 %in% country |
map$iso_alpha_2 %in% country |
map$iso_numeric %in% country ]
x <- data.table::rbindlist(fill = TRUE, lapply(iso, function(i){
url <- endpoint("country/", i, ".csv.gz")
read.gz(url, dir = dir, verbose = verbose)
}))
x <- filter(x, country = country, level = level, start = start, end = end)
}
}
else{
url <- sprintf("%s/%s%s", baseurl, vintage, ifelse(vintage>="2021-11-15", ".db.gz", ".zip"))
ext <- tools::file_ext(url)
if(ext=="zip"){
x <- read.zip(url, dir = dir, level = level, verbose = verbose)
x <- filter(x, country = country, level = level, start = start, end = end)
}
if(ext=="gz"){
x <- read.db(url, dir = dir, country = country, level = level, start, end, verbose = verbose)
}
}
x$date <- as.Date(x$date)
x <- data.table::data.table(x)
if(!is.null(wb))
x <- worldbank(x, indicator = wb, start = start, end = end)
if(!is.null(gmr))
x <- google(x, level = level, url = gmr, dir = dir, verbose = verbose)
if(!is.null(amr))
x <- apple(x, level = level, url = amr, dir = dir, verbose = verbose)
if(verbose){
print(utils::citation("COVID19"), bibtex = FALSE)
cat("To print citations in BibTeX format use:\n")
cat(" > print(citation('COVID19'), bibtex=TRUE)\n\n")
cat("To hide this message use 'verbose = FALSE'.\n")
}
return(data.frame(x))
}
|
/scratch/gouwar.j/cran-all/cranData/COVID19/R/covid19.R
|
google <- function(x, level, url, dir, verbose){
# sanitize url
if(is.logical(url)){
if(!url) return(x)
url <- "https://www.gstatic.com/covid19/mobility/Region_Mobility_Report_CSVs.zip"
}
# backward compatibility: use names instead of place_id
backward <- any(nchar(x$key_google_mobility)!=27, na.rm = TRUE)
if(backward){
if(is.null(x$key_google_mobility)){
idx <- which(x$administrative_area_level==1)
if(!length(idx)) return(x)
x$key_google_mobility[idx] <- x$iso_alpha_2[idx]
}
if(length(idx <- which(x$administrative_area_level>1))){
x$key_google_mobility[idx] <- paste(x$iso_alpha_2[idx], x$key_google_mobility[idx], sep = ", ")
x$key_google_mobility[idx] <- gsub("(, (NA)?)+$", "", x$key_google_mobility[idx])
}
}
# download
path <- download(url, dir = dir, verbose = verbose, timestamp = TRUE)
# files to read
pattern <- sprintf("\\_%s\\_", paste0(unique(x$iso_alpha_2), collapse = "|"))
files <- list.files(path, pattern = pattern, full.names = TRUE)
# read
g <- data.table::rbindlist(fill = TRUE, lapply(files, function(file){
g <- data.table::fread(file, encoding = "UTF-8", na.strings = "", showProgress = verbose)
if(backward){
g$place_id <- paste(g$country_region_code, g$sub_region_1, g$sub_region_2, g$metro_area, sep = ", ")
g$place_id <- gsub("(, (NA)?)+$", "", g$place_id)
}
g[g$place_id %in% x$key_google_mobility & !is.na(g$place_id),]
}))
# check
if(!nrow(g))
return(x)
# convert date
g$date <- as.Date(g$date)
# subset
g <- g[,c(
"place_id", "date",
"retail_and_recreation_percent_change_from_baseline",
"grocery_and_pharmacy_percent_change_from_baseline",
"parks_percent_change_from_baseline",
"transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline",
"residential_percent_change_from_baseline")]
# return
join(x, g, on = c("date" = "date", "key_google_mobility" = "place_id"))
}
|
/scratch/gouwar.j/cran-all/cranData/COVID19/R/google.R
|
# Site URL
baseurl <- "https://storage.covid19datahub.io"
# Build endpoint
endpoint <- function(...){
ep <- paste0(list(...), collapse = "")
paste(baseurl, ep, sep = "/")
}
# Generate local filename
local <- function(..., dir, timestamp){
file <- paste0(list(...), collapse = "/")
file <- gsub("^https?://[^/]*/", "", file)
file <- gsub("\\.(gz|zip)$", "", file)
if(timestamp)
dir <- paste(dir, format(Sys.time(), "%Y-%m-%d"), sep = "/")
paste(dir, file, sep = "/")
}
# Read GZ files
read.gz <- function(url, dir, verbose){
file <- download(url, dir = dir, verbose = verbose, timestamp = TRUE)
data.table::fread(file, showProgress = verbose, encoding = "UTF-8", na.strings = "", keepLeadingZeros = TRUE)
}
# Read vintage ZIP
read.zip <- function(url, dir, level, verbose){
file <- download(url, dir = dir, verbose = verbose, timestamp = FALSE)
data.table::rbindlist(fill = TRUE, lapply(level, function(i){
rawdata <- sprintf("%s/rawdata-%s.csv", file, i)
dt <- try(data.table::fread(
rawdata, showProgress = verbose, encoding = "UTF-8", na.strings = "", keepLeadingZeros = TRUE),
silent = !verbose)
if("try-error" %in% class(dt)) return(NULL)
return(dt)
}))
}
# Read SQLite database
read.db <- function(url, dir, country, level, start, end, verbose){
if(!requireNamespace("RSQLite", quietly = TRUE))
stop("Package 'RSQLite' needed but not installed. Install with: install.packages('RSQLite')")
level <- paste(level, collapse = "','")
country <- paste(country, collapse = "','")
sql <- sprintf("
SELECT *
FROM
timeseries NATURAL JOIN location
WHERE
date BETWEEN '%s' AND '%s' AND
administrative_area_level IN ('%s')", start, end, level)
if(country!="")
sql <- sprintf("%s AND (
administrative_area_level_1 IN ('%s') OR
iso_alpha_3 IN ('%s') OR
iso_alpha_2 IN ('%s') OR
iso_numeric IN ('%s')
)", sql, country, country, country, country)
file <- download(url, dir = dir, verbose = verbose, timestamp = FALSE)
con <- RSQLite::dbConnect(RSQLite::SQLite(), file)
x <- RSQLite::dbGetQuery(con, sql)
RSQLite::dbDisconnect(con)
return(x)
}
# Download and return local filename
download <- function(url, dir, verbose, timestamp){
file <- local(url, dir = dir, timestamp = timestamp)
if(file.exists(file))
return(file)
tmp <- tempfile()
utils::download.file(url, destfile = tmp, mode = "wb", quiet = !verbose)
ext <- tools::file_ext(url)
if(ext=="gz")
R.utils::gunzip(tmp, file)
else if(ext=="zip")
utils::unzip(tmp, exdir = file)
else
file <- NULL
return(file)
}
# Subset the data by country, level, and time range
filter <- function(x, country, level, start, end){
x <- x[x$date>=start & x$date<=end & x$administrative_area_level %in% level,]
if(!is.null(country)){
x <- x[x$administrative_area_level_1 %in% country |
x$iso_alpha_3 %in% country |
x$iso_alpha_2 %in% country |
x$iso_numeric %in% country,]
}
return(x)
}
# Efficient left join
join <- function(x, y, on){
data.table::setnames(y, old = on, new = names(on))
data.table::setcolorder(y[x, on = names(on)], unique(names(x), names(y)))
}
|
/scratch/gouwar.j/cran-all/cranData/COVID19/R/utils.R
|
worldbank <- function(x, indicator, start, end){
# check
if(!requireNamespace("wbstats", quietly = TRUE))
stop("Package 'wbstats' needed but not installed. Install with: install.packages('wbstats')")
# date
start <- format(as.Date(start), "%Y")
end <- format(as.Date(end), "%Y")
# set names
if(is.null(names(indicator)))
names(indicator) <- indicator
# download
w <- wbstats::wb_data(indicator = indicator, start_date = start, end_date = end, return_wide = FALSE)
# convert to data.table and sort
w <- data.table::data.table(w)
data.table::setkey(w, iso3c, date)
# bind variables for CRAN
value <- iso3c <- indicator_id <- NULL
# fill most recent value
w[, value := data.table::nafill(value, type = "locf"), by = list(iso3c, indicator_id)]
# pivot wider
w <- data.table::dcast(w, iso3c + date ~ indicator_id, value.var = "value", fill = NA)
# get most recent value for each country
w <- unique(w, by = "iso3c", fromLast = TRUE)
# drop year
w$date <- NULL
# rename
data.table::setnames(w, old = indicator, new = names(indicator))
# return
join(x, w, on = c("iso_alpha_3" = "iso3c"))
}
|
/scratch/gouwar.j/cran-all/cranData/COVID19/R/worldbank.R
|
#' Add deflator variables to PNAD COVID19 microdata
#' @description This function adds deflator variables to PNAD COVID19 microdata. For deflation of income variables, the documentation provided through the following address must be used: \url{https://ftp.ibge.gov.br/Trabalho_e_Rendimento/Pesquisa_Nacional_por_Amostra_de_Domicilios_PNAD_COVID19/Microdados/Documentacao/COVIDIBGE_Deflator.pdf}.
#' @import dplyr httr magrittr projmgr RCurl readr readxl survey tibble timeDate utils
#' @param data_covid A tibble of PNAD COVID19 microdata read with \code{read_covid} function.
#' @param deflator.file The deflator file for selected survey available on official website: (select the deflator zip file) - \url{https://ftp.ibge.gov.br/Trabalho_e_Rendimento/Pesquisa_Nacional_por_Amostra_de_Domicilios_PNAD_COVID19/Microdados/Documentacao/}.
#' @return A tibble with the data provided from PNAD COVID19 survey and the deflator variables added for use.
#' @note For more information, visit the survey official website <\url{https://www.ibge.gov.br/estatisticas/investigacoes-experimentais/estatisticas-experimentais/27946-divulgacao-semanal-pnadcovid1?t=o-que-e}> and consult the other functions of this package, described below.
#' @seealso \link[COVIDIBGE]{get_covid} for downloading, labeling, deflating and creating survey design object for PNAD COVID19 microdata.\cr \link[COVIDIBGE]{read_covid} for reading PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_labeller} for labeling categorical variables from PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_design} for creating PNAD COVID19 survey design object.\cr \link[COVIDIBGE]{covid_example} for getting the path of the PNAD COVID19 toy example files.
#' @examples
#' # Using data read from disk
#' data_path <- covid_example(path="exampledata.csv")
#' dictionary.path <- covid_example(path="dictionaryexample.xls")
#' deflator.path <- covid_example(path="deflatorexample.xls")
#' covid.df <- read_covid(microdata=data_path, vars=c("C001","C002"))
#' covid.df <- covid_labeller(data_covid=covid.df, dictionary.file=dictionary.path)
#' covid.df <- covid_deflator(data_covid=covid.df, deflator.file=deflator.path)
#' \donttest{
#' # Downloading data
#' covid.df2 <- get_covid(year=2020, month=5, vars=c("C001","C002"),
#' labels=TRUE, deflator=FALSE, design=FALSE,
#' reload=TRUE, curlopts=list(), savedir=tempdir())
#' deflator.path2 <- covid_example(path="deflatorexample.xls")
#' covid.df2 <- covid_deflator(data_covid=covid.df2, deflator.file=deflator.path2)}
#' @export
covid_deflator <- function(data_covid, deflator.file) {
if (sum(class(data_covid) == "tbl_df") > 0) {
if (!(FALSE %in% (c("Ano", "V1013", "UF") %in% names(data_covid)))) {
data_covid <- data_covid[, !names(data_covid) %in% c("Habitual", "Efetivo", "CO3"), drop=FALSE]
deflator <- suppressMessages(readxl::read_excel(deflator.file))
colnames(deflator)[c(1:3)] <- c("Ano", "V1013", "UF")
if (is.integer(data_covid$Ano)) {
deflator$Ano <- as.integer(deflator$Ano)
}
else {
deflator$Ano <- as.character(as.integer(deflator$Ano))
}
if (is.integer(data_covid$V1013)) {
deflator$V1013 <- as.integer(deflator$V1013)
}
else {
deflator$V1013 <- as.character(as.integer(deflator$V1013))
}
if (is.integer(data_covid$UF)) {
deflator$UF <- as.integer(deflator$UF)
}
else {
deflator$UF <- as.factor(deflator$UF)
if (identical(intersect(levels(deflator$UF), levels(as.factor(data_covid$UF))), character(0)) & length(levels(deflator$UF)) == length(levels(as.factor(data_covid$UF)))) {
levels(deflator$UF) <- levels(as.factor(data_covid$UF))
}
}
data_covid <- merge(x=data_covid, y=deflator, by.x=c("Ano", "V1013", "UF"), by.y=c("Ano", "V1013", "UF"), all.x=TRUE, all.y=FALSE)
if (!(FALSE %in% (c("ID_DOMICILIO") %in% names(data_covid)))) {
data_covid <- data_covid[order(data_covid$Estrato, data_covid$ID_DOMICILIO, data_covid$A001),]
}
else {
data_covid <- data_covid[order(data_covid$Estrato, data_covid$UPA, data_covid$V1008, data_covid$A001),]
}
data_covid <- tibble::as_tibble(data_covid)
}
else {
message("Merge variables required for adding deflator variables are missing.\n")
}
}
else {
message("The microdata object is not of the tibble class or sample design was already defined for microdata, so adding deflator variables is not possible.\n")
}
return(data_covid)
}
|
/scratch/gouwar.j/cran-all/cranData/COVIDIBGE/R/covid_deflator.R
|
#' Create PNAD COVID19 survey object with its sample design
#' @description This function creates PNAD COVID19 survey object with its sample design for analysis using \code{survey} package functions.
#' @import dplyr httr magrittr projmgr RCurl readr readxl survey tibble timeDate utils
#' @param data_covid A tibble of PNAD COVID19 microdata read with \code{read_covid} function.
#' @return An object of class \code{survey.design} or \code{svyrep.design} with the data from PNAD COVID19 and its sample design.
#' @note For more information, visit the survey official website <\url{https://www.ibge.gov.br/estatisticas/investigacoes-experimentais/estatisticas-experimentais/27946-divulgacao-semanal-pnadcovid1?t=o-que-e}> and consult the other functions of this package, described below.
#' @seealso \link[COVIDIBGE]{get_covid} for downloading, labeling, deflating and creating survey design object for PNAD COVID19 microdata.\cr \link[COVIDIBGE]{read_covid} for reading PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_labeller} for labeling categorical variables from PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_deflator} for adding deflator variables to PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_example} for getting the path of the PNAD COVID19 toy example files.
#' @examples
#' # Using data read from disk
#' data_path <- covid_example(path="exampledata.csv")
#' dictionary.path <- covid_example(path="dictionaryexample.xls")
#' deflator.path <- covid_example(path="deflatorexample.xls")
#' covid.df <- read_covid(microdata=data_path, vars=c("C001","C002"))
#' covid.df <- covid_labeller(data_covid=covid.df, dictionary.file=dictionary.path)
#' covid.df <- covid_deflator(data_covid=covid.df, deflator.file=deflator.path)
#' \donttest{
#' covid.svy <- covid_design(data_covid=covid.df)
#' # Calculating proportion of people temporarily away from work
#' if (!is.null(covid.svy)) survey::svymean(x=~C002, design=covid.svy, na.rm=TRUE)}
#' \donttest{
#' # Downloading data
#' covid.df2 <- get_covid(year=2020, month=5, vars=c("C001","C002"),
#' labels=TRUE, deflator=TRUE, design=FALSE,
#' reload=TRUE, curlopts=list(), savedir=tempdir())
#' covid.svy2 <- covid_design(data_covid=covid.df2)
#' # Calculating proportion of people temporarily away from work
#' if (!is.null(covid.svy2)) survey::svymean(x=~C002, design=covid.svy2, na.rm=TRUE)}
#' @export
covid_design <- function(data_covid) {
if (sum(class(data_covid) == "tbl_df") > 0) {
if (!(FALSE %in% (c("UPA", "ID_DOMICILIO", "Estrato", "V1030", "V1031", "V1032", "posest") %in% names(data_covid)))) {
options(survey.lonely.psu="adjust")
options(survey.adjust.domain.lonely=TRUE)
data_prior <- survey::svydesign(ids=~UPA, strata=~Estrato, data=data_covid, weights=~V1031, nest=TRUE)
popc.types <- data.frame(posest=as.character(unique(data_covid$posest)), Freq=as.numeric(unique(data_covid$V1030)))
popc.types <- popc.types[order(popc.types$posest),]
data_posterior <- survey::postStratify(design=data_prior, strata=~posest, population=popc.types)
}
else {
message("Weight variables required for sample design are missing.\n")
data_posterior <- data_covid
}
}
else {
message("The microdata object is not of the tibble class or sample design was already defined for microdata, so applying another design is not possible.\n")
data_posterior <- data_covid
}
return(data_posterior)
}
|
/scratch/gouwar.j/cran-all/cranData/COVIDIBGE/R/covid_design.R
|
#' Label categorical variables from PNAD COVID19 microdata
#' @description This function labels categorical variables from PNAD COVID19 microdata.
#' @import dplyr httr magrittr projmgr RCurl readr readxl survey tibble timeDate utils
#' @param data_covid A tibble of PNAD COVID19 microdata read with \code{read_covid} function.
#' @param dictionary.file The dictionary file for selected survey available on official website: (select a dictionary xls file) - \url{https://ftp.ibge.gov.br/Trabalho_e_Rendimento/Pesquisa_Nacional_por_Amostra_de_Domicilios_PNAD_COVID19/Microdados/Documentacao/}.
#' @return A tibble with the data provided from PNAD COVID19 survey and its categorical variables as factors with related labels.
#' @note For more information, visit the survey official website <\url{https://www.ibge.gov.br/estatisticas/investigacoes-experimentais/estatisticas-experimentais/27946-divulgacao-semanal-pnadcovid1?t=o-que-e}> and consult the other functions of this package, described below.
#' @seealso \link[COVIDIBGE]{get_covid} for downloading, labeling, deflating and creating survey design object for PNAD COVID19 microdata.\cr \link[COVIDIBGE]{read_covid} for reading PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_deflator} for adding deflator variables to PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_design} for creating PNAD COVID19 survey design object.\cr \link[COVIDIBGE]{covid_example} for getting the path of the PNAD COVID19 toy example files.
#' @examples
#' # Using data read from disk
#' data_path <- covid_example(path="exampledata.csv")
#' dictionary.path <- covid_example(path="dictionaryexample.xls")
#' covid.df <- read_covid(microdata=data_path, vars=c("C001","C002"))
#' covid.df <- covid_labeller(data_covid=covid.df, dictionary.file=dictionary.path)
#' \donttest{
#' # Downloading data
#' covid.df2 <- get_covid(year=2020, month=5, vars=c("C001","C002"),
#' labels=FALSE, deflator=FALSE, design=FALSE,
#' reload=TRUE, curlopts=list(), savedir=tempdir())
#' dictionary.path2 <- covid_example(path="dictionaryexample.xls")
#' covid.df2 <- covid_labeller(data_covid=covid.df2, dictionary.file=dictionary.path2)}
#' @export
covid_labeller <- function(data_covid, dictionary.file) {
if (sum(class(data_covid) == "tbl_df") > 0) {
dictionary <- suppressMessages(readxl::read_excel(dictionary.file))
X__2 = X__5 = X__6 = NULL
colnames(dictionary) <- paste0("X__",1:dim(dictionary)[2])
dictionary %<>% subset(!is.na(X__5))
codcurrent <- dictionary$X__2
for (i in 1:dim(dictionary)[1]) {
if (is.na(dictionary$X__2[i])) {
dictionary$X__2[i] <- codcurrent
}
else {
codcurrent <- dictionary$X__2[i]
}
}
notlabel <- c("Ano", "UPA", "ID_DOMICILIO", "Estrato", "V1008", "V1012", "V1013", "V1016",
"V1030", "V1031", "V1032", "posest",
"A001", "A0011", "A001B1", "A001B2", "A001B3", "A002", "A006A1", "A006B1", "A006C1", "A007A1", "A007B1", "A007C1",
"B00371", "C0031", "C0051", "C0052", "C0053", "C007C1", "C007D1", "C007E1", "C007E2", "C008", "C009",
"C01011", "C01012", "C01021", "C01022", "C011A11", "C011A12", "C011A21", "C011A22", "C0161",
"D0012", "D0013", "D0022", "D0023", "D0032", "D0033", "D0042", "D0043", "D0052", "D0053", "D0062", "D0063", "D0072", "D0073", "D0074",
"E00241", "F0011", "F0021", "F0022", "F006",
"Habitual", "Efetivo", "CO3")
vars <- names(data_covid)
varsc <- vars[sapply(data_covid, class) == "character"]
varsf <- setdiff(varsc, notlabel)
for (i in 1:length(varsf)) {
if (i > 0 & varsf[i] %in% (dictionary$X__2)) {
data_covid[varsf[i]] <- factor(suppressWarnings(as.numeric(unlist(data_covid[varsf[i]]))),
levels=suppressWarnings(as.numeric(unlist(dictionary %>% subset(X__2 == varsf[i]) %>% select(X__5)))),
labels=unlist(dictionary %>% subset(X__2 == varsf[i]) %>% select(X__6)))
}
}
}
else {
message("The microdata object is not of the tibble class or sample design was already defined for microdata, so labeling categorical variables is not possible.\n")
}
return(data_covid)
}
|
/scratch/gouwar.j/cran-all/cranData/COVIDIBGE/R/covid_labeller.R
|
#' Get the path of the PNAD COVID19 toy example files
#' @description This function provides the path of the microdata from month 5 of year 2020 of the PNAD COVID19 toy example files, loaded with this package.
#' @import dplyr httr magrittr projmgr RCurl readr readxl survey tibble timeDate utils
#' @param path Name of file. If \code{NULL}, the PNAD COVID19 toy example files names will be listed.
#' @return A vector with names of all the available PNAD COVID19 toy example files or the path for specific requested PNAD COVID19 toy example file.
#' @note For more information, visit the survey official website <\url{https://www.ibge.gov.br/estatisticas/investigacoes-experimentais/estatisticas-experimentais/27946-divulgacao-semanal-pnadcovid1?t=o-que-e}> and consult the other functions of this package, described below.
#' @seealso \link[COVIDIBGE]{get_covid} for downloading, labeling, deflating and creating survey design object for PNAD COVID19 microdata.\cr \link[COVIDIBGE]{read_covid} for reading PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_labeller} for labeling categorical variables from PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_deflator} for adding deflator variables to PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_design} for creating PNAD COVID19 survey design object.
#' @examples
#' covid_example()
#' covid_example(path="exampledata.csv")
#' covid_example(path="dictionaryexample.xls")
#' covid_example(path="deflatorexample.xls")
#' @export
covid_example <- function(path = NULL) {
if (is.null(path)) {
dir(system.file("extdata", package="COVIDIBGE"))
}
else {
system.file("extdata", path, package="COVIDIBGE", mustWork=TRUE)
}
}
|
/scratch/gouwar.j/cran-all/cranData/COVIDIBGE/R/example.R
|
#' Download, label, deflate and create survey design object for PNAD COVID19 microdata
#' @description Core function of package. With this function only, the user can download a PNAD COVID19 microdata from a month and get a sample design object ready to use with \code{survey} package functions.
#' @import dplyr httr magrittr projmgr RCurl readr readxl survey tibble timeDate utils
#' @param year The year of the data to be downloaded. Must be a number equal to 2020. Vector not accepted.
#' @param month The month of the year of the data to be downloaded. Must be number from 5 to 11. Vector not accepted.
#' @param vars Vector of variable names to be kept for analysis. Default is to keep all variables.
#' @param labels Logical value. If \code{TRUE}, categorical variables will presented as factors with labels corresponding to the survey's dictionary.
#' @param deflator Logical value. If \code{TRUE}, deflator variables will be available for use in the microdata.
#' @param design Logical value. If \code{TRUE}, will return an object of class \code{survey.design} or \code{svyrep.design}. It is strongly recommended to keep this parameter as \code{TRUE} for further analysis. If \code{FALSE}, only the microdata will be returned.
#' @param reload Logical value. If \code{TRUE}, will re-download the files even if they already exist in the save directory. If \code{FALSE}, will be checked if the files already exist in the save directory and the download will not be performed repeatedly, be careful with coinciding names of microdata files.
#' @param curlopts A named list object identifying the curl options for the handle when using functions from \code{RCurl} package.
#' @param savedir Directory to save the downloaded data. Default is to use a temporary directory.
#' @return An object of class \code{survey.design} or \code{svyrep.design} with the data from PNAD COVID19 and its sample design, or a tibble with selected variables of the microdata, including the necessary survey design ones.
#' @note For more information, visit the survey official website <\url{https://www.ibge.gov.br/estatisticas/investigacoes-experimentais/estatisticas-experimentais/27946-divulgacao-semanal-pnadcovid1?t=o-que-e}> and consult the other functions of this package, described below.
#' @seealso \link[COVIDIBGE]{read_covid} for reading PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_labeller} for labeling categorical variables from PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_deflator} for adding deflator variables to PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_design} for creating PNAD COVID19 survey design object.\cr \link[COVIDIBGE]{covid_example} for getting the path of the PNAD COVID19 toy example files.
#' @examples
#' \donttest{
#' covid.svy <- get_covid(year=2020, month=5, vars=c("C001","C002"),
#' labels=TRUE, deflator=TRUE, design=TRUE,
#' reload=TRUE, curlopts=list(), savedir=tempdir())
#' # Calculating proportion of people temporarily away from work
#' if (!is.null(covid.svy)) survey::svymean(x=~C002, design=covid.svy, na.rm=TRUE)}
#' @export
get_covid <- function(year, month, vars = NULL,
labels = TRUE, deflator = TRUE, design = TRUE, reload = TRUE, curlopts = list(), savedir = tempdir())
{
if (year != 2020) {
message("Year must be equal to 2020.\n")
return(NULL)
}
if (month < 5 | month > 11) {
message("Month number must be an integer from 5 to 11.\n")
return(NULL)
}
if (!(labels %in% c(TRUE, FALSE))) {
labels <- TRUE
message("Invalid value provided for labels argument, so default value TRUE was set to this argument.\n")
}
if (!(deflator %in% c(TRUE, FALSE))) {
deflator <- TRUE
message("Invalid value provided for deflator argument, so default value TRUE was set to this argument.\n")
}
if (!(design %in% c(TRUE, FALSE))) {
design <- TRUE
message("Invalid value provided for design argument, so default value TRUE was set to this argument.\n")
}
if (!(reload %in% c(TRUE, FALSE))) {
reload <- TRUE
message("Invalid value provided for reload argument, so default value TRUE was set to this argument.\n")
}
if (!is.list(curlopts)) {
curlopts <- list()
message("Invalid value provided for curlopts argument, as the value of this argument needs to be a list, so the value provided will be ignored.\n")
}
if (!dir.exists(savedir)) {
savedir <- tempdir()
message(paste0("The directory provided does not exist, so the directory was set to '", savedir), "'.\n")
}
if (savedir != tempdir()) {
printpath <- TRUE
}
else {
printpath <- FALSE
}
if (substr(savedir, nchar(savedir), nchar(savedir)) == "/" | substr(savedir, nchar(savedir), nchar(savedir)) == "\\") {
savedir <- substr(savedir, 1, nchar(savedir)-1)
}
ftpdir <- ("https://ftp.ibge.gov.br/Trabalho_e_Rendimento/Pesquisa_Nacional_por_Amostra_de_Domicilios_PNAD_COVID19/Microdados/")
if (!projmgr::check_internet()) {
message("The internet connection is unavailable.\n")
return(NULL)
}
if (httr::http_error(httr::GET(ftpdir, httr::timeout(60)))) {
message("The microdata server is unavailable.\n")
return(NULL)
}
restime <- getOption("timeout")
on.exit(options(timeout=restime))
options(timeout=max(600, restime))
ftpdata <- paste0(ftpdir, "Dados/")
datayear <- unlist(strsplit(unlist(strsplit(unlist(strsplit(gsub("\r\n", "\n", RCurl::getURL(ftpdata, dirlistonly=TRUE, .opts=curlopts)), "\n")), "<a href=[[:punct:]]")), ".zip"))
if (month < 10) {
dataname <- datayear[which(startsWith(datayear, paste0("PNAD_COVID_0", month, year)))]
}
else {
dataname <- datayear[which(startsWith(datayear, paste0("PNAD_COVID_", month, year)))]
}
if (length(dataname) == 0) {
message("Data unavailable for selected month and year.\n")
return(NULL)
}
else if (length(dataname) > 1) {
message("There is more than one file available for the requested microdata, please contact the package maintainer.\n")
return(NULL)
}
else {
dataname <- paste0(dataname, ".zip")
}
if (reload == FALSE & file.exists(paste0(savedir, "/", dataname))) {
message("The reload argument was defined as FALSE and the file of microdata was already downloaded, so the download process will not execute again.\n")
}
else {
utils::download.file(url=paste0(ftpdata, dataname), destfile=paste0(savedir, "/", dataname), mode="wb")
if (suppressWarnings(class(try(utils::unzip(zipfile=paste0(savedir, "/", dataname), exdir=savedir), silent=TRUE)) == "try-error")) {
message("The directory defined to save the downloaded data is denied permission to overwrite the existing files, please clear or change this directory.\n")
return(NULL)
}
if (reload == FALSE) {
message("The definition of FALSE for the reload argument will be ignored, since the file of microdata was not downloaded yet.\n")
}
}
utils::unzip(zipfile=paste0(savedir, "/", dataname), exdir=savedir)
if (month < 10) {
microdataname <- dir(savedir, pattern=paste0("^PNAD_COVID_0", month, year, ".*\\.csv$"), ignore.case=FALSE)
}
else {
microdataname <- dir(savedir, pattern=paste0("^PNAD_COVID_", month, year, ".*\\.csv$"), ignore.case=FALSE)
}
microdatafile <- paste0(savedir, "/", microdataname)
microdatafile <- rownames(file.info(microdatafile)[order(file.info(microdatafile)$mtime),])[length(microdatafile)]
data_covid <- COVIDIBGE::read_covid(microdata=microdatafile, vars=vars)
ftpdoc <- paste0(ftpdir, "Documentacao/")
if (labels == TRUE) {
if (exists("covid_labeller", where="package:COVIDIBGE", mode="function")) {
dicfiles <- unlist(strsplit(unlist(strsplit(unlist(strsplit(gsub("\r\n", "\n", RCurl::getURL(ftpdoc, dirlistonly=TRUE, .opts=curlopts)), "\n")), "<a href=[[:punct:]]")), ".xls"))
if (month < 10) {
dicname <- paste0(dicfiles[which(startsWith(dicfiles, paste0("Dicionario_PNAD_COVID_0", month, year)))], ".xls")
}
else {
dicname <- paste0(dicfiles[which(startsWith(dicfiles, paste0("Dicionario_PNAD_COVID_", month, year)))], ".xls")
}
if (reload == FALSE & file.exists(paste0(savedir, "/", dicname))) {
message("The reload argument was defined as FALSE and the file of dictionary was already downloaded, so the download process will not execute again.\n")
}
else {
utils::download.file(url=paste0(ftpdoc, dicname), destfile=paste0(savedir, "/", dicname), mode="wb")
if (reload == FALSE) {
message("The definition of FALSE for the reload argument will be ignored, since the file of dictionary was not downloaded yet.\n")
}
}
dicfile <- paste0(savedir, "/", dicname)
dicfile <- rownames(file.info(dicfile)[order(file.info(dicfile)$mtime),])[length(dicfile)]
data_covid <- COVIDIBGE::covid_labeller(data_covid=data_covid, dictionary.file=dicfile)
}
else {
message("Labeller function is unavailable in package COVIDIBGE.\n")
}
}
if (deflator == TRUE) {
if (exists("covid_deflator", where="package:COVIDIBGE", mode="function")) {
arcfiles <- unlist(strsplit(unlist(strsplit(unlist(strsplit(gsub("\r\n", "\n", RCurl::getURL(ftpdoc, dirlistonly=TRUE, .opts=curlopts)), "\n")), "<a href=[[:punct:]]")), ".zip"))
defzip <- paste0(arcfiles[which(startsWith(arcfiles, "Deflatores"))], ".zip")
if (reload == FALSE & file.exists(paste0(savedir, "/Deflatores.zip"))) {
message("The reload argument was defined as FALSE and the file of deflator was already downloaded, so the download process will not execute again.\n")
}
else {
utils::download.file(url=paste0(ftpdoc, defzip), destfile=paste0(savedir, "/Deflatores.zip"), mode="wb")
if (reload == FALSE) {
message("The definition of FALSE for the reload argument will be ignored, since the file of deflator was not downloaded yet.\n")
}
}
utils::unzip(zipfile=paste0(savedir, "/Deflatores.zip"), exdir=savedir)
defname <- dir(savedir, pattern=paste0("^Deflator_PNAD_COVID.*\\.xls$"), ignore.case=FALSE)
deffile <- paste0(savedir, "/", defname)
deffile <- rownames(file.info(deffile)[order(file.info(deffile)$mtime),])[length(deffile)]
data_covid <- COVIDIBGE::covid_deflator(data_covid=data_covid, deflator.file=deffile)
}
else {
message("Deflator function is unavailable in package COVIDIBGE.\n")
}
}
if (design == TRUE) {
if (exists("covid_design", where="package:COVIDIBGE", mode="function")) {
data_covid <- COVIDIBGE::covid_design(data_covid=data_covid)
}
else {
message("Sample design function is unavailable in package COVIDIBGE.\n")
}
}
if (printpath == TRUE) {
message("Paths of files downloaded in this function at the save directory provided are:")
message(paste0(list.files(path=savedir, pattern="COVID", full.names=TRUE), collapse="\n"), "\n")
}
return(data_covid)
}
|
/scratch/gouwar.j/cran-all/cranData/COVIDIBGE/R/get_covid.R
|
#' Read PNAD COVID19 microdata
#' @description This function reads PNAD COVID19 microdata.
#' @import dplyr httr magrittr projmgr RCurl readr readxl survey tibble timeDate utils
#' @param microdata A comma-separated values file containing microdata from PNAD COVID19 survey, available on official website: (select a microdata file) - \url{https://ftp.ibge.gov.br/Trabalho_e_Rendimento/Pesquisa_Nacional_por_Amostra_de_Domicilios_PNAD_COVID19/Microdados/Dados/}.
#' @param vars Vector of variable names to be kept for analysis. Default is to keep all variables.
#' @return A tibble with selected variables of the microdata, including the necessary survey design ones.
#' @note For more information, visit the survey official website <\url{https://www.ibge.gov.br/estatisticas/investigacoes-experimentais/estatisticas-experimentais/27946-divulgacao-semanal-pnadcovid1?t=o-que-e}> and consult the other functions of this package, described below.
#' @seealso \link[COVIDIBGE]{get_covid} for downloading, labeling, deflating and creating survey design object for PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_labeller} for labeling categorical variables from PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_deflator} for adding deflator variables to PNAD COVID19 microdata.\cr \link[COVIDIBGE]{covid_design} for creating PNAD COVID19 survey design object.\cr \link[COVIDIBGE]{covid_example} for getting the path of the PNAD COVID19 toy example files.
#' @examples
#' data_path <- covid_example(path="exampledata.csv")
#' covid.df <- read_covid(microdata=data_path, vars=c("C001","C002"))
#' @export
read_covid <- function(microdata, vars = NULL) {
data_covid <- suppressWarnings(utils::read.csv(microdata, header=TRUE, sep=",", dec="."))
if (!is.null(vars)) {
if (any(!(vars %in% colnames(data_covid)))) {
missvar <- vars[!(vars %in% colnames(data_covid))]
message(paste("Variables", paste(missvar, collapse=", "), "not present in microdata.\n"))
}
keeps <- intersect(names(data_covid), c("Ano", "UF", "UPA", "ID_DOMICILIO", "Estrato", "V1008", "V1012", "V1013", "V1030", "V1031", "V1032", "posest", "A001", vars))
data_covid <- data_covid[,names(data_covid) %in% keeps]
}
label <- c("Ano", "UF", "CAPITAL", "RM_RIDE", "UPA", "ID_DOMICILIO", "Estrato", "V1008", "V1012", "V1013", "V1016", "V1022", "V1023", "posest",
"A001A", "A003", "A004", "A005", "A006", "A006A", "A006B", "A006C", "A007", "A007A", "A007B", "A007C", "A008", "A009",
"B0011", "B0012", "B0013", "B0014", "B0015", "B0016",
"B0017", "B0018", "B0019", "B00110", "B00111", "B00112", "B00113",
"B002", "B0031", "B0032", "B0033", "B0034", "B0035", "B0036", "B0037",
"B0041", "B0042", "B0043", "B0044", "B0045", "B0046", "B005", "B006", "B007",
"B008", "B009", "B009A", "B009B", "B009C", "B009D", "B009E", "B009F",
"B0101", "B0102", "B0103", "B0104", "B0105", "B0106", "B011",
"C001", "C002", "C003", "C004", "C005", "C006", "C007",
"C007A", "C007B", "C007C", "C007D", "C007E", "C007F", "C009A", "C010", "C0101", "C0102", "C0103",
"C0104", "C011A", "C011A1", "C011A2", "C012", "C013", "C014", "C015", "C016", "C017A",
"D0011", "D0021", "D0031", "D0041",
"D0051", "D0061", "D0071", "E001", "E0021", "E0022", "E0023", "E0024",
"F001", "F002A1", "F002A2", "F002A3", "F002A4", "F002A5", "F0061")
label <- intersect(names(data_covid), label)
data_covid[label] <- lapply(data_covid[label], as.character)
data_covid <- tibble::as_tibble(data_covid)
data_covid <- dplyr::mutate(data_covid, ID_DOMICILIO=paste0(data_covid$UPA, data_covid$V1008))
return(data_covid)
}
|
/scratch/gouwar.j/cran-all/cranData/COVIDIBGE/R/read_covid.R
|
CalcConPwrExp <- function(theta.0,
d1, o1, O1.star, lambda1.hat,
d2, o2, O2.star,
n.star,
alpha) {
## Calculates the conditional power
## for the exponential model, i. e.
## S(t) = exp(- lambda * t), lambda > 0, t >= 0.
##
## Args:
## Parameters from exponential power calculations.
##
## Results:
## Returns conditional power values.
# range of theta for conditional power function
min <- exp(min(log(theta.0), - log(theta.0)) - 1)
max <- exp(max(log(theta.0), - log(theta.0)) + 1)
# choice of suitable stepwidth for theta's
if ((max - min) / 0.01 <= 1000) {
theta <- seq(from = min,
to = max,
by = 0.01)
}
else {
theta <- seq(from = min,
to = max,
length.out = 1000)
}
# expected value under the null hypothesis
mu.theta.null <- (log((d2 + lambda1.hat * O2.star) / (o2 + O2.star))
- log((d1 + lambda1.hat * O1.star) / (o1 + O1.star)))
# standard deviation under the null hypothesis
sigma.theta.null <- sqrt(2 / n.star)
# expected value under the alternative hypothesis
mu.theta.alter <- (log((d2 + theta * lambda1.hat * O2.star) / (o2 + O2.star))
- log((d1 + lambda1.hat * O1.star) / (o1 + O1.star)))
# standard deviation under the alternative hypothesis
sigma.theta.alter <- sqrt(2 / n.star)
# (asymptotically) conditional power function
gamma.theta <- (stats::pnorm((stats::qnorm(alpha / 2) * sigma.theta.null + mu.theta.null
- mu.theta.alter) / sigma.theta.alter)
+ 1 - stats::pnorm((stats::qnorm(1 - alpha / 2) * sigma.theta.null + mu.theta.null
- mu.theta.alter) / sigma.theta.alter))
# values at theta.0
mu.theta.0 <- (log((d2 + theta.0 * lambda1.hat * O2.star) / (o2 + O2.star))
- log((d1 + lambda1.hat * O1.star) / (o1 + O1.star)))
sigma.theta.0 <- sqrt(2 / n.star)
gamma.theta.0 <- (stats::pnorm((stats::qnorm(alpha / 2) * sigma.theta.null + mu.theta.null
- mu.theta.0) / sigma.theta.0)
+ 1 - stats::pnorm((stats::qnorm(1 - alpha / 2) * sigma.theta.null + mu.theta.null
- mu.theta.0) / sigma.theta.0))
return(list(theta, gamma.theta, gamma.theta.0))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/CalcConPwrExp.R
|
CalcConPwrExpAndersen <- function(theta.0,
d1, o1, O1.star, lambda1.hat,
d2, o2, O2.star,
alpha) {
## Calculates the conditional power
## for the exponential model, i. e.
## S(t) = exp(- lambda * t), lambda > 0, t >= 0,
## with the original formulae of the Andersen paper.
##
## Args:
## Parameters from exponential power calculations.
##
## Results:
## Returns conditional power values.
# range of theta for conditional power function
min <- exp(min(log(theta.0), - log(theta.0)) - 1)
max <- exp(max(log(theta.0), - log(theta.0)) + 1)
# choice of suitable stepwidth for theta's
if ((max - min) / 0.01 <= 1000) {
theta <- seq(from = min,
to = max,
by = 0.01)
}
else {
theta <- seq(from = min,
to = max,
length.out = 1000)
}
# expected value under the null hypothesis
mu.theta.null <- (log((d2 + lambda1.hat * O2.star) / (o2 + O2.star))
- log((d1 + lambda1.hat * O1.star) / (o1 + O1.star)))
# standard deviation under the null hypothesis
sigma.theta.null <- (sqrt((1 / (o1 + O1.star) + 1 / (o2 + O2.star))
/ ((d1 + d2) / (o1 + o2))))
# expected value under the alternative hypothesis
mu.theta.alter <- (log((d2 + theta * lambda1.hat * O2.star) / (o2 + O2.star))
- log((d1 + lambda1.hat * O1.star) / (o1 + O1.star)))
# standard deviation under the alternative hypothesis
sigma.theta.alter <- (sqrt(lambda1.hat * O1.star / (d1 + lambda1.hat * O1.star)^2
+ theta * lambda1.hat * O2.star / (d2 + theta * lambda1.hat * O2.star)^2))
# (asymptotically) conditional power function
gamma.theta <- (stats::pnorm((stats::qnorm(alpha / 2) * sigma.theta.null
- mu.theta.alter) / sigma.theta.alter)
+ 1 - stats::pnorm((stats::qnorm(1 - alpha / 2) * sigma.theta.null
- mu.theta.alter) / sigma.theta.alter))
# values at theta.0
mu.theta.0 <- (log((d2 + theta.0 * lambda1.hat * O2.star) / (o2 + O2.star))
- log((d1 + lambda1.hat * O1.star) / (o1 + O1.star)))
sigma.theta.0 <- (sqrt(lambda1.hat * O1.star / (d1 + lambda1.hat * O1.star)^2
+ theta.0 * lambda1.hat * O2.star / (d2 + theta.0 * lambda1.hat * O2.star)^2))
gamma.theta.0 <- (stats::pnorm((stats::qnorm(alpha / 2) * sigma.theta.null
- mu.theta.0) / sigma.theta.0)
+ 1 - stats::pnorm((stats::qnorm(1 - alpha / 2) * sigma.theta.null
- mu.theta.0) / sigma.theta.0))
return(list(theta, gamma.theta, gamma.theta.0))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/CalcConPwrExpAndersen.R
|
CalcConPwrNonMix <- function(theta.0,
d1, o1.stroke, O1.stroke.star, c1.hat,
d2, o2.stroke, O2.stroke.star, O2.stroke.star.null,
n.star,
alpha) {
## Calculates the conditional power
## for the non-mixture model.
##
## Args:
## Parameters from power calculations.
##
## Results:
## Returns conditional power values.
# range of theta for conditional power function
min <- exp(min(log(theta.0), - log(theta.0)) - 1)
max <- exp(max(log(theta.0), - log(theta.0)) + 1)
# choice of suitable stepwidth for theta's
if ((max - min) / 0.01 <= 1000) {
theta <- seq(from = min,
to = max,
by = 0.01)
}
else {
theta <- seq(from = min,
to = max,
length.out = 1000)
}
# expected value under the null hypothesis
mu.theta.null <- (log((d2 - log(c1.hat) * O2.stroke.star.null) / (o2.stroke + O2.stroke.star.null))
- log((d1 - log(c1.hat) * O1.stroke.star) / (o1.stroke + O1.stroke.star)))
# standard deviation under the null hypothesis
sigma.theta.null <- (sqrt(1 / (n.star
* (1 - exp(- (d1 - log(c1.hat) * O1.stroke.star) / (o1.stroke + O1.stroke.star))
* (1 + ((d1 - log(c1.hat) * O1.stroke.star) / (o1.stroke + O1.stroke.star))^2)))
+ 1 / (n.star
* (1 - exp(- (d2 - log(c1.hat) * O2.stroke.star.null) / (o2.stroke + O2.stroke.star.null))
* (1 + ((d2 - log(c1.hat) * O2.stroke.star.null) / (o2.stroke + O2.stroke.star.null))^2)))))
# expected value under the alternative hypothesis
mu.theta.alter <- (log((d2 - theta * log(c1.hat) * O2.stroke.star) / (o2.stroke + O2.stroke.star))
- log((d1 - log(c1.hat) * O1.stroke.star) / (o1.stroke + O1.stroke.star)))
# standard deviation under the alternative hypothesis
sigma.theta.alter <- (sqrt(1 / (n.star
* (1 - exp(- (d1 - log(c1.hat) * O1.stroke.star) / (o1.stroke + O1.stroke.star))
* (1 + ((d1 - log(c1.hat) * O1.stroke.star) / (o1.stroke + O1.stroke.star))^2)))
+ 1 / (n.star
* (1 - exp(- (d2 - theta * log(c1.hat) * O2.stroke.star) / (o2.stroke + O2.stroke.star))
* (1 + ((d2 - theta * log(c1.hat) * O2.stroke.star) / (o2.stroke + O2.stroke.star))^2)))))
# (asymptotically) conditional power function
gamma.theta <- (stats::pnorm((stats::qnorm(alpha / 2) * sigma.theta.null + mu.theta.null
- mu.theta.alter) / sigma.theta.alter)
+ 1 - stats::pnorm((stats::qnorm(1 - alpha / 2) * sigma.theta.null + mu.theta.null
- mu.theta.alter) / sigma.theta.alter))
# values at theta.0
mu.theta.0 <- (log((d2 - theta.0 * log(c1.hat) * O2.stroke.star) / (o2.stroke + O2.stroke.star))
- log((d1 - log(c1.hat) * O1.stroke.star) / (o1.stroke + O1.stroke.star)))
sigma.theta.0 <- (sqrt(1 / (n.star
* (1 - exp(- (d1 - log(c1.hat) * O1.stroke.star) / (o1.stroke + O1.stroke.star))
* (1 + ((d1 - log(c1.hat) * O1.stroke.star) / (o1.stroke + O1.stroke.star))^2)))
+ 1 / (n.star
* (1 - exp(- (d2 - theta.0 * log(c1.hat) * O2.stroke.star) / (o2.stroke + O2.stroke.star))
* (1 + ((d2 - theta.0 * log(c1.hat) * O2.stroke.star) / (o2.stroke + O2.stroke.star))^2)))))
gamma.theta.0 <- (stats::pnorm((stats::qnorm(alpha / 2) * sigma.theta.null + mu.theta.null
- mu.theta.0) / sigma.theta.0)
+ 1 - stats::pnorm((stats::qnorm(1 - alpha / 2) * sigma.theta.null + mu.theta.null
- mu.theta.0) / sigma.theta.0))
return(list(theta, gamma.theta, gamma.theta.0))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/CalcConPwrNonMix.R
|
CompSurvMod <- function(data, cont.time,
new.pat = c(0, 0), theta.0 = 1, alpha = 0.05,
disp.data = FALSE, plot.km = FALSE) {
## Calculates the conditional power and plots the conditional power curve
## for the exponential model and the non-mixture models with
## exponential, Weibull type and Gamma type survival
## with respect to two different treatments and no dropouts.
##
## Args:
## data: Data frame which consists of at least three columns with the group
## (two different expressions) in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## cont.time: Period of time of continuing the trial.
## new.pat: 2-dimensional vector which consists of numbers of new patients
## who will be recruited each time unit
## (first component = group 1, second component = group 2)
## with default at (0, 0).
## theta.0: Originally postulated clinically relevant difference
## (hazard ratio = hazard of group 2 / hazard of group 1)
## with default at 1.
## alpha: Significance level for conditional power calculations
## with default at 0.05.
## disp.data: Logical value indicating if all calculated data should be displayed
## with default at FALSE.
## plot.km: Logical value indicating if Kaplan-Meier curves
## and estimated survival curves according to
## the mentioned models should be plotted
## with default at FALSE.
##
## Results:
## Displays the calculated conditional power
## and optionally an overview of the other calculated values,
## and plots the conditional power curves
## and optionally the Kaplan-Meier curves.
## Returns the estimates of the parameters, the hazard ratios
## and the conditional power.
# check of passed parameters
IsValid(data, cont.time, new.pat, theta.0, alpha, disp.data, plot.km)
# split data frame into two data frames, each for one group,
# and converting group expressions for internal calculations
# into values 1 and 2
split.data <- SplitData(data)
data1 <- split.data[[1]]
group1.name <- split.data[[2]]
data2 <- split.data[[3]]
group2.name <- split.data[[4]]
########################
# EXPONENTIAL SURVIVAL #
########################
# maximum likelihood estimators for hazards of group 1 and group 2
d1 <- sum(data1[, 2])
o1 <- sum(data1[, 3])
d2 <- sum(data2[, 2])
o2 <- sum(data2[, 3])
lambda1.hat.exp <- d1 / o1
lambda2.hat.exp <- d2 / o2
# maximum of log-likehood function without prefactor of censoring
# and AIC = - 2 * log-likelihood + 2 * Parameter
# of group 1 and group 2
log.likelihood1.hat.exp <- sum(data1[, 2] * log(lambda1.hat.exp) - lambda1.hat.exp * data1[, 3])
log.likelihood2.hat.exp <- sum(data2[, 2] * log(lambda2.hat.exp) - lambda2.hat.exp * data2[, 3])
AIC1.exp <- - 2 * log.likelihood1.hat.exp + 2 * 1
AIC2.exp <- - 2 * log.likelihood2.hat.exp + 2 * 1
# estimator for hazard ratio theta = lambda2 / lambda1
theta.hat.exp <- lambda2.hat.exp / lambda1.hat.exp
# estimation of person months in group 1 and group 2
n1.alive <- sum(1 - data1[, 2])
n2.alive <- sum(1 - data2[, 2])
O1.star.exp <- PersMonExp(d1, o1, n1.alive, new.pat[1], cont.time)
O2.star.exp <- PersMonExp(d2, o2, n2.alive, new.pat[2], cont.time)
# number of patients
n.alive <- n1.alive + n2.alive
rel <- n.alive / length(x = data[, 1])
n.star <- floor(x = (n.alive + ((new.pat[1] + new.pat[2]) * cont.time * rel)))
# conditional power calculations
calc.conpwr.exp <- CalcConPwrExp(theta.0,
d1, o1, O1.star.exp, lambda1.hat.exp,
d2, o2, O2.star.exp,
n.star,
alpha)
theta <- calc.conpwr.exp[[1]]
gamma.theta.exp <- calc.conpwr.exp[[2]]
gamma.theta.0.exp <- calc.conpwr.exp[[3]]
# summary vector
summary.exp <- c(log.likelihood1.hat.exp, log.likelihood2.hat.exp,
AIC1.exp, AIC2.exp,
lambda1.hat.exp, lambda2.hat.exp,
O1.star.exp, O2.star.exp,
theta.hat.exp)
#####################################
# NON-MIXTURE: EXPONENTIAL SURVIVAL #
#####################################
# calculate initial values for maximum likelihood estimation
# of parameters in group 1
# and if applicable projection into feasible region
init.val.data1.likelihood.nonmix.exp <- InitValLikelihoodNonMixExp(data1)
# initial values for maximum likelihood estimation
# of parameters in group 1
lambda1.0 <- init.val.data1.likelihood.nonmix.exp[1]
c1.0 <- init.val.data1.likelihood.nonmix.exp[2]
# calculate initial values for maximum likelihood estimation
# of parameters in group 2
# and if applicable projection into feasible region
init.val.data2.likelihood.nonmix.exp <- InitValLikelihoodNonMixExp(data2)
# initial values for maximum likelihood estimation
# of parameters in group 2
lambda2.0 <- init.val.data2.likelihood.nonmix.exp[1]
c2.0 <- init.val.data2.likelihood.nonmix.exp[2]
# calculate initial values for maximum likelihood estimation
# of parameters for all data
# and if applicable projection into feasible region
init.val.data.likelihood.nonmix.exp <- InitValLikelihoodNonMixExp(data)
# initial values for maximum likelihood estimation
# of parameters for all data
lambda.0 <- init.val.data.likelihood.nonmix.exp[1]
c.0 <- init.val.data.likelihood.nonmix.exp[2]
# maximum likelihood estimation of parameters in group 1, group 2
# and for all data
likelihood.nonmix.exp <- LikelihoodNonMixExp(data1, data2, data,
lambda1.0, c1.0,
lambda2.0, c2.0,
lambda.0, c.0)
# maximum likelihood estimators of parameters,
# maximum of log-likelihood function without prefactor of censoring
# and AIC = - 2 * log-likelihood + 2 * Parameter
# of group 1 and group 2
lambda1.hat.nonmix.exp <- likelihood.nonmix.exp[1]
c1.hat.nonmix.exp <- likelihood.nonmix.exp[2]
lambda2.hat.nonmix.exp <- likelihood.nonmix.exp[3]
c2.hat.nonmix.exp <- likelihood.nonmix.exp[4]
lambda.hat.nonmix.exp <- likelihood.nonmix.exp[5]
c1.cond.hat.nonmix.exp <- likelihood.nonmix.exp[6]
c2.cond.hat.nonmix.exp <- likelihood.nonmix.exp[7]
log.likelihood1.hat.nonmix.exp <- likelihood.nonmix.exp[8]
AIC1.nonmix.exp <- - 2 * log.likelihood1.hat.nonmix.exp + 2 * 2
log.likelihood2.hat.nonmix.exp <- likelihood.nonmix.exp[9]
AIC2.nonmix.exp <- - 2 * log.likelihood2.hat.nonmix.exp + 2 * 2
# estimator for hazard ratio theta = log(c2) / log(c1)
# under the assumption lambda1 = lambda2
theta.hat.nonmix.exp <- log(c2.cond.hat.nonmix.exp) / log(c1.cond.hat.nonmix.exp)
# estimation of person months in group 1 and group 2
n1.alive <- sum(1 - data1[, 2])
n2.alive <- sum(1 - data2[, 2])
O1.star.nonmix.exp <- PersMonNonMixExp(lambda1.hat.nonmix.exp, c1.hat.nonmix.exp,
n1.alive, new.pat[1], cont.time)
O2.star.nonmix.exp <- PersMonNonMixExp(lambda2.hat.nonmix.exp, c2.hat.nonmix.exp,
n2.alive, new.pat[2], cont.time)
# functions of person months in group 1 , group 2
# and in group 2 under the null hypothesis
o1.stroke <- FctPersMonNonMixExp(data1, lambda1.hat.nonmix.exp, group1.name)
o2.stroke <- FctPersMonNonMixExp(data2, lambda2.hat.nonmix.exp, group2.name)
o2.stroke.null <- FctPersMonNonMixExp(data2, lambda1.hat.nonmix.exp, group2.name)
# further functions of person months in group 1, group 2
# and in group 2 under the null hypothesis
n1 <- length(x = data1[, 1])
n2 <- length(x = data2[, 1])
O1.stroke.star <- o1.stroke / n1 * (n1.alive + new.pat[1] * cont.time) * c1.cond.hat.nonmix.exp
O2.stroke.star <- o2.stroke / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat.nonmix.exp
O2.stroke.star.null <- o2.stroke.null / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat.nonmix.exp
# number of patients
n.alive <- n1.alive + n2.alive
rel <- n.alive / (n1 + n2)
n.star <- floor(x = (n.alive + ((new.pat[1] + new.pat[2]) * cont.time * rel)))
# conditional power calculations
d1 <- sum(data1[, 2])
d2 <- sum(data2[, 2])
calc.conpwr.nonmix <- CalcConPwrNonMix(theta.0,
d1, o1.stroke, O1.stroke.star, c1.cond.hat.nonmix.exp,
d2, o2.stroke, O2.stroke.star, O2.stroke.star.null,
n.star,
alpha)
theta <- calc.conpwr.nonmix[[1]]
gamma.theta.nonmix.exp <- calc.conpwr.nonmix[[2]]
gamma.theta.0.nonmix.exp <- calc.conpwr.nonmix[[3]]
# summary vector
summary.nonmix.exp <- c(log.likelihood1.hat.nonmix.exp, log.likelihood2.hat.nonmix.exp,
AIC1.nonmix.exp, AIC2.nonmix.exp,
lambda.hat.nonmix.exp, c1.cond.hat.nonmix.exp,
lambda.hat.nonmix.exp, c2.cond.hat.nonmix.exp,
O1.star.nonmix.exp, O2.star.nonmix.exp,
theta.hat.nonmix.exp)
######################################
# NON-MIXTURE: WEIBULL TYPE SURVIVAL #
######################################
# calculate initial values for maximum likelihood estimation
# of parameters in group 1
# and if applicable projection into feasible region
init.val.data1.likelihood.nonmix.wei <- InitValLikelihoodNonMixWei(data1)
# initial values for maximum likelihood estimation
# of parameters in group 1
lambda1.0 <- init.val.data1.likelihood.nonmix.wei[1]
k1.0 <- init.val.data1.likelihood.nonmix.wei[2]
c1.0 <- init.val.data1.likelihood.nonmix.wei[3]
# calculate initial values for maximum likelihood estimation
# of parameters in group 2
# and if applicable projection into feasible region
init.val.data2.likelihood.nonmix.wei <- InitValLikelihoodNonMixWei(data2)
# initial values for maximum likelihood estimation
# of parameters in group 2
lambda2.0 <- init.val.data2.likelihood.nonmix.wei[1]
k2.0 <- init.val.data2.likelihood.nonmix.wei[2]
c2.0 <- init.val.data2.likelihood.nonmix.wei[3]
# calculate initial values for maximum likelihood estimation
# of parameters for all data
# and if applicable projection into feasible region
init.val.data.likelihood.nonmix.wei <- InitValLikelihoodNonMixWei(data)
# initial values for maximum likelihood estimation
# of parameters for all data
lambda.0 <- init.val.data.likelihood.nonmix.wei[1]
k.0 <- init.val.data.likelihood.nonmix.wei[2]
c.0 <- init.val.data.likelihood.nonmix.wei[3]
# maximum likelihood estimation of parameters in group 1, group 2
# and for all data
likelihood.nonmix.wei <- LikelihoodNonMixWei(data1, data2, data,
lambda1.0, k1.0, c1.0,
lambda2.0, k2.0, c2.0,
lambda.0, k.0, c.0)
# maximum likelihood estimators of parameters,
# maximum of log-likelihood function without prefactor of censoring
# and AIC = - 2 * log-likelihood + 2 * Parameter
# of group 1 and group 2
lambda1.hat.nonmix.wei <- likelihood.nonmix.wei[1]
k1.hat.nonmix.wei <- likelihood.nonmix.wei[2]
c1.hat.nonmix.wei <- likelihood.nonmix.wei[3]
lambda2.hat.nonmix.wei <- likelihood.nonmix.wei[4]
k2.hat.nonmix.wei <- likelihood.nonmix.wei[5]
c2.hat.nonmix.wei <- likelihood.nonmix.wei[6]
lambda.hat.nonmix.wei <- likelihood.nonmix.wei[7]
k.hat.nonmix.wei <- likelihood.nonmix.wei[8]
c1.cond.hat.nonmix.wei <- likelihood.nonmix.wei[9]
c2.cond.hat.nonmix.wei <- likelihood.nonmix.wei[10]
log.likelihood1.hat.nonmix.wei <- likelihood.nonmix.wei[11]
AIC1.nonmix.wei <- - 2 * log.likelihood1.hat.nonmix.wei + 2 * 3
log.likelihood2.hat.nonmix.wei <- likelihood.nonmix.wei[12]
AIC2.nonmix.wei <- - 2 * log.likelihood2.hat.nonmix.wei + 2 * 3
# estimator for hazard ratio theta = log(c2) / log(c1)
# under the assumption lambda1 = lambda2 and k1 = k2
theta.hat.nonmix.wei <- log(c2.cond.hat.nonmix.wei) / log(c1.cond.hat.nonmix.wei)
# estimation of person months in group 1 and group 2
n1.alive <- sum(1 - data1[, 2])
n2.alive <- sum(1 - data2[, 2])
O1.star.nonmix.wei <- PersMonNonMixWei(lambda1.hat.nonmix.wei, k1.hat.nonmix.wei, c1.hat.nonmix.wei,
n1.alive, new.pat[1], cont.time)
O2.star.nonmix.wei <- PersMonNonMixWei(lambda2.hat.nonmix.wei, k2.hat.nonmix.wei, c2.hat.nonmix.wei,
n2.alive, new.pat[2], cont.time)
# functions of person months in group 1 , group 2
# and in group 2 under the null hypothesis
o1.stroke <- FctPersMonNonMixWei(data1, lambda1.hat.nonmix.wei, k1.hat.nonmix.wei, group1.name)
o2.stroke <- FctPersMonNonMixWei(data2, lambda2.hat.nonmix.wei, k2.hat.nonmix.wei, group2.name)
o2.stroke.null <- FctPersMonNonMixWei(data2, lambda1.hat.nonmix.wei, k1.hat.nonmix.wei, group2.name)
# further functions of person months in group 1, group 2
# and in group 2 under the null hypothesis
n1 <- length(x = data1[, 1])
n2 <- length(x = data2[, 1])
O1.stroke.star <- o1.stroke / n1 * (n1.alive + new.pat[1] * cont.time) * c1.cond.hat.nonmix.wei
O2.stroke.star <- o2.stroke / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat.nonmix.wei
O2.stroke.star.null <- o2.stroke.null / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat.nonmix.wei
# number of patients
n.alive <- n1.alive + n2.alive
rel <- n.alive / (n1 + n2)
n.star <- floor(x = (n.alive + ((new.pat[1] + new.pat[2]) * cont.time * rel)))
# conditional power calculations
d1 <- sum(data1[, 2])
d2 <- sum(data2[, 2])
calc.conpwr.nonmix <- CalcConPwrNonMix(theta.0,
d1, o1.stroke, O1.stroke.star, c1.cond.hat.nonmix.wei,
d2, o2.stroke, O2.stroke.star, O2.stroke.star.null,
n.star,
alpha)
theta <- calc.conpwr.nonmix[[1]]
gamma.theta.nonmix.wei <- calc.conpwr.nonmix[[2]]
gamma.theta.0.nonmix.wei <- calc.conpwr.nonmix[[3]]
# summary vector
summary.nonmix.wei <- c(log.likelihood1.hat.nonmix.wei, log.likelihood2.hat.nonmix.wei,
AIC1.nonmix.wei, AIC2.nonmix.wei,
lambda.hat.nonmix.wei, k.hat.nonmix.wei, c1.cond.hat.nonmix.wei,
lambda.hat.nonmix.wei, k.hat.nonmix.wei, c2.cond.hat.nonmix.wei,
O1.star.nonmix.wei, O2.star.nonmix.wei,
theta.hat.nonmix.wei)
####################################
# NON-MIXTURE: GAMMA TYPE SURVIVAL #
####################################
# calculate initial values for maximum likelihood estimation
# of parameters in group 1
# and if applicable projection into feasible region
init.val.data1.likelihood.nonmix.gamma <- InitValLikelihoodNonMixGamma(data1)
# initial values for maximum likelihood estimation
# of parameters in group 1
a1.0 <- init.val.data1.likelihood.nonmix.gamma[1]
b1.0 <- init.val.data1.likelihood.nonmix.gamma[2]
c1.0 <- init.val.data1.likelihood.nonmix.gamma[3]
# calculate initial values for maximum likelihood estimation
# of parameters in group 2
# and if applicable projection into feasible region
init.val.data2.likelihood.nonmix.gamma <- InitValLikelihoodNonMixGamma(data2)
# initial values for maximum likelihood estimation
# of parameters in group 2
a2.0 <- init.val.data2.likelihood.nonmix.gamma[1]
b2.0 <- init.val.data2.likelihood.nonmix.gamma[2]
c2.0 <- init.val.data2.likelihood.nonmix.gamma[3]
# calculate initial values for maximum likelihood estimation
# of parameters for all data
# and if applicable projection into feasible region
init.val.data.likelihood.nonmix.gamma <- InitValLikelihoodNonMixGamma(data)
# initial values for maximum likelihood estimation
# of parameters for all data
a.0 <- init.val.data.likelihood.nonmix.gamma[1]
b.0 <- init.val.data.likelihood.nonmix.gamma[2]
c.0 <- init.val.data.likelihood.nonmix.gamma[3]
# maximum likelihood estimation of parameters in group 1, group 2
# and for all data
likelihood.nonmix.gamma <- LikelihoodNonMixGamma(data1, data2, data,
a1.0, b1.0, c1.0,
a2.0, b2.0, c2.0,
a.0, b.0, c.0)
# maximum likelihood estimators of parameters,
# maximum of log-likelihood function without prefactor of censoring
# and AIC = - 2 * log-likelihood + 2 * Parameter
# of group 1 and group 2
a1.hat.nonmix.gamma <- likelihood.nonmix.gamma[1]
b1.hat.nonmix.gamma <- likelihood.nonmix.gamma[2]
c1.hat.nonmix.gamma <- likelihood.nonmix.gamma[3]
a2.hat.nonmix.gamma <- likelihood.nonmix.gamma[4]
b2.hat.nonmix.gamma <- likelihood.nonmix.gamma[5]
c2.hat.nonmix.gamma <- likelihood.nonmix.gamma[6]
a.hat.nonmix.gamma <- likelihood.nonmix.gamma[7]
b.hat.nonmix.gamma <- likelihood.nonmix.gamma[8]
c1.cond.hat.nonmix.gamma <- likelihood.nonmix.gamma[9]
c2.cond.hat.nonmix.gamma <- likelihood.nonmix.gamma[10]
log.likelihood1.hat.nonmix.gamma <- likelihood.nonmix.gamma[11]
AIC1.nonmix.gamma <- - 2 * log.likelihood1.hat.nonmix.gamma + 2 * 3
log.likelihood2.hat.nonmix.gamma <- likelihood.nonmix.gamma[12]
AIC2.nonmix.gamma <- - 2 * log.likelihood2.hat.nonmix.gamma + 2 * 3
# estimator for hazard ratio theta = log(c2) / log(c1)
# under the assumption a1 = a2 and b1 = b2
theta.hat.nonmix.gamma <- log(c2.cond.hat.nonmix.gamma) / log(c1.cond.hat.nonmix.gamma)
# estimation of person months in group 1 and group 2
n1.alive <- sum(1 - data1[, 2])
n2.alive <- sum(1 - data2[, 2])
O1.star.nonmix.gamma <- PersMonNonMixGamma(a1.hat.nonmix.gamma, b1.hat.nonmix.gamma, c1.hat.nonmix.gamma,
n1.alive, new.pat[1], cont.time)
O2.star.nonmix.gamma <- PersMonNonMixGamma(a2.hat.nonmix.gamma, b2.hat.nonmix.gamma, c2.hat.nonmix.gamma,
n2.alive, new.pat[2], cont.time)
# functions of person months in group 1 , group 2
# and in group 2 under the null hypothesis
o1.stroke <- FctPersMonNonMixGamma(data1, a1.hat.nonmix.gamma, b1.hat.nonmix.gamma, group1.name)
o2.stroke <- FctPersMonNonMixGamma(data2, a2.hat.nonmix.gamma, b2.hat.nonmix.gamma, group2.name)
o2.stroke.null <- FctPersMonNonMixGamma(data2, a1.hat.nonmix.gamma, b1.hat.nonmix.gamma, group2.name)
# further functions of person months in group 1, group 2
# and in group 2 under the null hypothesis
n1 <- length(x = data1[, 1])
n2 <- length(x = data2[, 1])
O1.stroke.star <- o1.stroke / n1 * (n1.alive + new.pat[1] * cont.time) * c1.cond.hat.nonmix.gamma
O2.stroke.star <- o2.stroke / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat.nonmix.gamma
O2.stroke.star.null <- o2.stroke.null / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat.nonmix.gamma
# number of patients
n.alive <- n1.alive + n2.alive
rel <- n.alive / (n1 + n2)
n.star <- floor(x = (n.alive + ((new.pat[1] + new.pat[2]) * cont.time * rel)))
# conditional power calculations
d1 <- sum(data1[, 2])
d2 <- sum(data2[, 2])
calc.conpwr.nonmix <- CalcConPwrNonMix(theta.0,
d1, o1.stroke, O1.stroke.star, c1.cond.hat.nonmix.gamma,
d2, o2.stroke, O2.stroke.star, O2.stroke.star.null,
n.star,
alpha)
theta <- calc.conpwr.nonmix[[1]]
gamma.theta.nonmix.gamma <- calc.conpwr.nonmix[[2]]
gamma.theta.0.nonmix.gamma <- calc.conpwr.nonmix[[3]]
# summary vector
summary.nonmix.gamma <- c(log.likelihood1.hat.nonmix.gamma, log.likelihood2.hat.nonmix.gamma,
AIC1.nonmix.gamma, AIC2.nonmix.gamma,
a.hat.nonmix.gamma, b.hat.nonmix.gamma, c1.cond.hat.nonmix.gamma,
a.hat.nonmix.gamma, b.hat.nonmix.gamma, c2.cond.hat.nonmix.gamma,
O1.star.nonmix.gamma, O2.star.nonmix.gamma,
theta.hat.nonmix.gamma)
# results
# additional data (optional)
if (disp.data == TRUE) {
# calculate number of death events, person months, number of patients
# and number of patients still alive of group1 and group 2
interim.data1 <- InterimData(data1, group1.name)
interim.data2 <- InterimData(data2, group2.name)
DispDataAll(group1.name, group2.name, # 2 x 1 elements
interim.data1, interim.data2, # 2 x 4 elements
summary.exp, # 9 elements
summary.nonmix.exp, # 11 elements
summary.nonmix.wei, # 13 elements
summary.nonmix.gamma, # 13 elements
theta.0) # 1 elements
}
# conditional power
DispConPwrAll(gamma.theta.0.exp,
gamma.theta.0.nonmix.exp,
gamma.theta.0.nonmix.wei,
gamma.theta.0.nonmix.gamma,
group1.name, group2.name)
# standardization of plot window
graphics::par(las = 1,
mfrow = c(1, 1))
# plots of Kaplan-Meier curves
# and estimated survival curves
# according to the four mentioned models (optional)
if (plot.km == TRUE) {
graphics::layout(mat = matrix(data = c(1, 2, 3, 4, 5, 5),
nrow = 3,
ncol = 2,
byrow = TRUE))
# exponential model
PlotKM(data, "Exponential Model")
PlotEstExp(data1, data2,
lambda1.hat.exp, lambda2.hat.exp,
group1.name, group2.name)
# non-mixture model with exponential survival
PlotKM(data, "Non-Mixture Model with Exponential Survival")
PlotEstNonMixExp(data1, data2,
lambda1.hat.nonmix.exp, c1.cond.hat.nonmix.exp,
lambda2.hat.nonmix.exp, c2.cond.hat.nonmix.exp,
group1.name, group2.name)
# non-mixture model with Weibull type survival
PlotKM(data, "Non-Mixture Model with Weibull type Survival")
PlotEstNonMixWei(data1, data2,
lambda1.hat.nonmix.wei, k1.hat.nonmix.wei, c1.cond.hat.nonmix.wei,
lambda2.hat.nonmix.wei, k2.hat.nonmix.wei, c2.cond.hat.nonmix.wei,
group1.name, group2.name)
# non-mixture model with Gamma type survival
PlotKM(data, "Non-Mixture Model with Gamma type Survival")
PlotEstNonMixGamma(data1, data2,
a1.hat.nonmix.gamma, b1.hat.nonmix.gamma, c1.cond.hat.nonmix.gamma,
a2.hat.nonmix.gamma, b2.hat.nonmix.gamma, c2.cond.hat.nonmix.gamma,
group1.name, group2.name)
}
# plot of conditional power curves
PlotConPwrAll(theta,
gamma.theta.exp,
gamma.theta.nonmix.exp,
gamma.theta.nonmix.wei,
gamma.theta.nonmix.gamma,
theta.0,
gamma.theta.0.exp,
gamma.theta.0.nonmix.exp,
gamma.theta.0.nonmix.wei,
gamma.theta.0.nonmix.gamma,
group1.name, group2.name)
# return values
return(value = invisible(x = list(lambda1.hat.exp = lambda1.hat.exp,
lambda2.hat.exp = lambda2.hat.exp,
theta.hat.exp = theta.hat.exp,
gamma.theta.0.exp = gamma.theta.0.exp,
lambda.hat.nm.exp = lambda.hat.nonmix.exp,
c1.hat.nm.exp = c1.cond.hat.nonmix.exp,
c2.hat.nm.exp = c2.cond.hat.nonmix.exp,
theta.hat.nm.exp = theta.hat.nonmix.exp,
gamma.theta.0.nm.exp = gamma.theta.0.nonmix.exp,
lambda.hat.nm.wei = lambda.hat.nonmix.wei,
k.hat.nm.wei = k.hat.nonmix.wei,
c1.hat.nm.wei = c1.cond.hat.nonmix.wei,
c2.hat.nm.wei = c2.cond.hat.nonmix.wei,
theta.hat.nm.wei = theta.hat.nonmix.wei,
gamma.theta.0.nm.wei = gamma.theta.0.nonmix.wei,
a.hat.nm.gamma = a.hat.nonmix.gamma,
b.hat.nm.gamma = b.hat.nonmix.gamma,
c1.hat.nm.gamma = c1.cond.hat.nonmix.gamma,
c2.hat.nm.gamma = c2.cond.hat.nonmix.gamma,
theta.hat.nm.gamma = theta.hat.nonmix.gamma,
gamma.theta.0.nm.gamma = gamma.theta.0.nonmix.gamma)))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/CompSurvMod.R
|
ConPwrExp <- function(data, cont.time,
new.pat = c(0, 0), theta.0 = 1, alpha = 0.05,
disp.data = FALSE, plot.km = FALSE) {
## Calculates the conditional power and plots the conditional power curve
## for the exponential model with constant hazards by Per Kragh Andersen, i. e.
## S(t) = exp(- lambda * t), lambda > 0, t >= 0,
## with respect to two different treatments and no drop outs.
## (Andersen, P. K. (1987). Conditional power calculations as an aid
## in the decision whether to continue a clinical trial.
## Controlled Clinical Trials 8, 67-74.)
##
## Args:
## data: Data frame which consists of at least three columns with the group
## (two different expressions) in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## cont.time: Period of time of continuing the trial.
## new.pat: 2-dimensional vector which consists of numbers of new patients
## who will be recruited each time unit
## (first component = group 1, second component = group 2)
## with default at (0, 0).
## theta.0: Originally postulated clinically relevant difference
## (hazard ratio = hazard of group 2 / hazard of group 1)
## with default at 1.
## alpha: Significance level for conditional power calculations
## with default at 0.05.
## disp.data: Logical value indicating if all calculated data should be displayed
## with default at FALSE.
## plot.km: Logical value indicating if Kaplan-Meier curves
## and estimated survival curves according to
## the exponential model should be plotted
## with default at FALSE.
##
## Returns:
## Displays the calculated conditional power
## and optionally an overview of the other calculated values,
## and plots the conditional power curve
## and optionally the Kaplan-Meier curves
## plus the estimated survival curves.
## Returns the estimates of the hazards, the hazard ratio
## and the conditional power.
# check of passed parameters
IsValid(data, cont.time, new.pat, theta.0, alpha, disp.data, plot.km)
# split data frame into two data frames, each for one group,
# and converting group expressions for internal calculations
# into values 1 and 2
split.data <- SplitData(data)
data1 <- split.data[[1]]
group1.name <- split.data[[2]]
data2 <- split.data[[3]]
group2.name <- split.data[[4]]
# maximum likelihood estimators for hazards of group 1 and group 2
d1 <- sum(data1[, 2])
o1 <- sum(data1[, 3])
d2 <- sum(data2[, 2])
o2 <- sum(data2[, 3])
lambda1.hat <- d1 / o1
lambda2.hat <- d2 / o2
# estimator for hazard ratio theta = lambda2 / lambda1
theta.hat <- lambda2.hat / lambda1.hat
# estimation of person months in group 1 and group 2
n1.alive <- sum(1 - data1[, 2])
n2.alive <- sum(1 - data2[, 2])
O1.star <- PersMonExp(d1, o1, n1.alive, new.pat[1], cont.time)
O2.star <- PersMonExp(d2, o2, n2.alive, new.pat[2], cont.time)
# number of patients
n.alive <- n1.alive + n2.alive
rel <- n.alive / length(x = data[, 1])
n.star <- floor(x = (n.alive + ((new.pat[1] + new.pat[2]) * cont.time * rel)))
# conditional power calculations
calc.conpwr.exp <- CalcConPwrExp(theta.0,
d1, o1, O1.star, lambda1.hat,
d2, o2, O2.star,
n.star,
alpha)
theta <- calc.conpwr.exp[[1]]
gamma.theta <- calc.conpwr.exp[[2]]
gamma.theta.0 <- calc.conpwr.exp[[3]]
# results
# additional data (optional)
if (disp.data == TRUE) {
# calculate number of death events, person months, number of patients
# and number of patients still alive of group1 and group 2
interim.data1 <- InterimData(data1, group1.name)
interim.data2 <- InterimData(data2, group2.name)
d1 <- interim.data1[1]
o1 <- interim.data1[2]
n1 <- interim.data1[3]
n1.alive <- interim.data1[4]
d2 <- interim.data2[1]
o2 <- interim.data2[2]
n2 <- interim.data2[3]
n2.alive <- interim.data2[4]
DispDataExp(group1.name, n1, d1, n1.alive, o1, lambda1.hat, O1.star,
group2.name, n2, d2, n2.alive, o2, lambda2.hat, O2.star,
theta.0, theta.hat)
}
# conditional power
DispConPwr(gamma.theta.0, group1.name, group2.name)
# standardization of plot window
graphics::par(las = 1,
mfrow = c(1, 1))
# plot of Kaplan-Meier curves and estimated survival (optional)
if (plot.km == TRUE) {
graphics::par(mfrow = c(1, 2))
PlotKM(data, "Exponential Model")
PlotEstExp(data1, data2,
lambda1.hat, lambda2.hat,
group1.name, group2.name)
}
# plot of conditional power curve
PlotConPwr(theta, gamma.theta,
theta.0, gamma.theta.0,
group1.name, group2.name,
"Exponential Model")
# return values
return(value = invisible(x = list(lambda1.hat = lambda1.hat,
lambda2.hat = lambda2.hat,
theta.hat = theta.hat,
gamma.theta.0 = gamma.theta.0)))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/ConPwrExp.R
|
ConPwrExpAndersen <- function(data, cont.time,
new.pat = c(0, 0), theta.0 = 1, alpha = 0.05,
disp.data = FALSE, plot.km = FALSE) {
## Calculates the conditional power and plots the conditional power curve
## for the exponential model with constant hazards by Per Kragh Andersen, i. e.
## S(t) = exp(- lambda * t), lambda > 0, t >= 0,
## with respect to two different treatments and no drop outs.
## The original formulae of the Andersen paper are used.
## (Andersen, P. K. (1987). Conditional power calculations as an aid
## in the decision whether to continue a clinical trial.
## Controlled Clinical Trials 8, 67-74.)
##
## Args:
## data: Data frame which consists of at least three columns with the group
## (two different expressions) in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## cont.time: Period of time of continuing the trial.
## new.pat: 2-dimensional vector which consists of numbers of new patients
## who will be recruited each time unit
## (first component = group 1, second component = group 2)
## with default at (0, 0).
## theta.0: Originally postulated clinically relevant difference
## (hazard ratio = hazard of group 2 / hazard of group 1)
## with default at 1.
## alpha: Significance level for conditional power calculations
## with default at 0.05.
## disp.data: Logical value indicating if all calculated data should be displayed
## with default at FALSE.
## plot.km: Logical value indicating if Kaplan-Meier curves
## and estimated survival curves according to
## the exponential model should be plotted
## with default at FALSE.
##
## Returns:
## Displays the calculated conditional power
## and optionally an overview of the other calculated values,
## and plots the conditional power curve
## and optionally the Kaplan-Meier curves
## plus the estimated survival curves.
## Returns the estimates of the hazards, the hazard ratio
## and the conditional power.
# check of passed parameters
IsValid(data, cont.time, new.pat, theta.0, alpha, disp.data, plot.km)
# split data frame into two data frames, each for one group,
# and converting group expressions for internal calculations
# into values 1 and 2
split.data <- SplitData(data)
data1 <- split.data[[1]]
group1.name <- split.data[[2]]
data2 <- split.data[[3]]
group2.name <- split.data[[4]]
# maximum likelihood estimators for hazards of group 1 and group 2
d1 <- sum(data1[, 2])
o1 <- sum(data1[, 3])
d2 <- sum(data2[, 2])
o2 <- sum(data2[, 3])
lambda1.hat <- d1 / o1
lambda2.hat <- d2 / o2
# estimator for hazard ratio theta = lambda2 / lambda1
theta.hat <- lambda2.hat / lambda1.hat
# estimation of patient times in group 1 and group 2
n1.alive <- sum(1 - data1[, 2])
n2.alive <- sum(1 - data2[, 2])
O1.star <- PersMonExp(d1, o1, n1.alive, new.pat[1], cont.time)
O2.star <- PersMonExp(d2, o2, n2.alive, new.pat[2], cont.time)
# conditional power calculations
# with the original formulae of the Andersen paper
calc.conpwr.exp.andersen <- CalcConPwrExpAndersen(theta.0,
d1, o1, O1.star, lambda1.hat,
d2, o2, O2.star,
alpha)
theta <- calc.conpwr.exp.andersen[[1]]
gamma.theta <- calc.conpwr.exp.andersen[[2]]
gamma.theta.0 <- calc.conpwr.exp.andersen[[3]]
# results
# additional data (optional)
if (disp.data == TRUE) {
# calculate number of death events, person months, number of patients
# and number of patients still alive of group1 and group 2
interim.data1 <- InterimData(data1, group1.name)
interim.data2 <- InterimData(data2, group2.name)
d1 <- interim.data1[1]
o1 <- interim.data1[2]
n1 <- interim.data1[3]
n1.alive <- interim.data1[4]
d2 <- interim.data2[1]
o2 <- interim.data2[2]
n2 <- interim.data2[3]
n2.alive <- interim.data2[4]
DispDataExp(group1.name, n1, d1, n1.alive, o1, lambda1.hat, O1.star,
group2.name, n2, d2, n2.alive, o2, lambda2.hat, O2.star,
theta.0, theta.hat)
}
# conditional power
DispConPwr(gamma.theta.0, group1.name, group2.name)
# standardization of plot window
graphics::par(las = 1,
mfrow = c(1, 1))
# plot of Kaplan-Meier curves and estimated survival (optional)
if (plot.km == TRUE) {
graphics::par(mfrow = c(1, 2))
PlotKM(data, "Exponential Model (Andersen)")
PlotEstExp(data1, data2,
lambda1.hat, lambda2.hat,
group1.name, group2.name)
}
# plot of conditional power curve
PlotConPwr(theta, gamma.theta,
theta.0, gamma.theta.0,
group1.name, group2.name,
"Exponential Model (Andersen)")
# return values
return(value = invisible(x = list(lambda1.hat = lambda1.hat,
lambda2.hat = lambda2.hat,
theta.hat = theta.hat,
gamma.theta.0 = gamma.theta.0)))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/ConPwrExpAndersen.R
|
ConPwrNonMixExp <- function(data, cont.time,
new.pat = c(0, 0), theta.0 = 1, alpha = 0.05,
disp.data = FALSE, plot.km = FALSE) {
## Calculates the conditional power and plots the conditional power curve
## for the non-mixture model with exponential survival, i. e.
## S(t) = c^[1 - exp(- lambda * t)], lambda > 0, 0 < c < 1, t >= 0,
## with respect to two different treatments and no drop outs.
##
## Args:
## data: Data frame which consists of at least three columns with the group
## (two different expressions) in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## cont.time: Period of time of continuing the trial.
## new.pat: 2-dimensional vector which consists of numbers of new patients
## who will be recruited each time unit
## (first component = group 1, second component = group 2)
## with default at (0, 0).
## theta.0: Originally postulated clinically relevant difference
## (hazard ratio = hazard of group 2 / hazard of group 1)
## with default at 1.
## alpha: Significance level for conditional power calculations
## with default at 0.05.
## disp.data: Logical value indicating if all calculated data should be displayed
## with default at FALSE.
## plot.km: Logical value indicating if Kaplan-Meier curves
## and estimated survival curves according to
## the non-mixture model with exponential survival should be plotted
## with default at FALSE.
##
## Returns:
## Displays the calculated conditional power
## and optionally an overview of the other calculated values,
## and plots the conditional power curve
## and optionally the Kaplan-Meier curves
## plus the estimated survival curves.
## Returns the estimates of the parameters, the hazard ratio
## and the conditional power.
# check of passed parameters
IsValid(data, cont.time, new.pat, theta.0, alpha, disp.data, plot.km)
# split data frame into two data frames, each for one group,
# and converting group expressions for internal calculations
# into values 1 and 2
split.data <- SplitData(data)
data1 <- split.data[[1]]
group1.name <- split.data[[2]]
data2 <- split.data[[3]]
group2.name <- split.data[[4]]
# calculate initial values for maximum likelihood estimation
# of parameters in group 1
# and if applicable projection into feasible region
init.val.data1.likelihood.nonmix.exp <- InitValLikelihoodNonMixExp(data1)
# initial values for maximum likelihood estimation
# of parameters in group 1
lambda1.0 <- init.val.data1.likelihood.nonmix.exp[1]
c1.0 <- init.val.data1.likelihood.nonmix.exp[2]
# calculate initial values for maximum likelihood estimation
# of parameters in group 2
# and if applicable projection into feasible region
init.val.data2.likelihood.nonmix.exp <- InitValLikelihoodNonMixExp(data2)
# initial values for maximum likelihood estimation
# of parameters in group 2
lambda2.0 <- init.val.data2.likelihood.nonmix.exp[1]
c2.0 <- init.val.data2.likelihood.nonmix.exp[2]
# calculate initial values for maximum likelihood estimation
# of parameters for all data
# and if applicable projection into feasible region
init.val.data.likelihood.nonmix.exp <- InitValLikelihoodNonMixExp(data)
# initial values for maximum likelihood estimation
# of parameters for all data
lambda.0 <- init.val.data.likelihood.nonmix.exp[1]
c.0 <- init.val.data.likelihood.nonmix.exp[2]
# maximum likelihood estimation of parameters in group 1, group 2
# and for all data
likelihood.nonmix.exp <- LikelihoodNonMixExp(data1, data2, data,
lambda1.0, c1.0,
lambda2.0, c2.0,
lambda.0, c.0)
# maximum likelihood estimators of parameters in group 1, group 2
lambda1.hat <- likelihood.nonmix.exp[1]
c1.hat <- likelihood.nonmix.exp[2]
lambda2.hat <- likelihood.nonmix.exp[3]
c2.hat <- likelihood.nonmix.exp[4]
lambda.hat <- likelihood.nonmix.exp[5]
c1.cond.hat <- likelihood.nonmix.exp[6]
c2.cond.hat <- likelihood.nonmix.exp[7]
# estimator for hazard ratio theta = log(c2) / log(c1)
# under the assumption lambda1 = lambda2
theta.hat <- log(c2.cond.hat) / log(c1.cond.hat)
# estimation of person months in group 1 and group 2
n1.alive <- sum(1 - data1[, 2])
n2.alive <- sum(1 - data2[, 2])
O1.star <- PersMonNonMixExp(lambda1.hat, c1.hat, n1.alive, new.pat[1], cont.time)
O2.star <- PersMonNonMixExp(lambda2.hat, c2.hat, n2.alive, new.pat[2], cont.time)
# functions of person months in group 1 , group 2
# and in group 2 under the null hypothesis
o1.stroke <- FctPersMonNonMixExp(data1, lambda1.hat, group1.name)
o2.stroke <- FctPersMonNonMixExp(data2, lambda2.hat, group2.name)
o2.stroke.null <- FctPersMonNonMixExp(data2, lambda1.hat, group2.name)
# further functions of person months in group 1, group 2
# and in group 2 under the null hypothesis
n1 <- length(x = data1[, 1])
n2 <- length(x = data2[, 1])
O1.stroke.star <- o1.stroke / n1 * (n1.alive + new.pat[1] * cont.time) * c1.cond.hat
O2.stroke.star <- o2.stroke / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat
O2.stroke.star.null <- o2.stroke.null / n2 * (n2.alive + new.pat[2] * cont.time ) * c2.cond.hat
# number of patients
n.alive <- n1.alive + n2.alive
rel <- n.alive / (n1 + n2)
n.star <- floor(x = (n.alive + ((new.pat[1] + new.pat[2]) * cont.time * rel)))
# conditional power calculations
d1 <- sum(data1[, 2])
d2 <- sum(data2[, 2])
calc.conpwr.nonmix <- CalcConPwrNonMix(theta.0,
d1, o1.stroke, O1.stroke.star, c1.cond.hat,
d2, o2.stroke, O2.stroke.star, O2.stroke.star.null,
n.star,
alpha)
theta <- calc.conpwr.nonmix[[1]]
gamma.theta <- calc.conpwr.nonmix[[2]]
gamma.theta.0 <- calc.conpwr.nonmix[[3]]
# results
# additional data (optional)
if (disp.data == TRUE) {
# calculate number of death events, person months, number of patients
# and number of patients still alive of group1 and group 2
interim.data1 <- InterimData(data1, group1.name)
interim.data2 <- InterimData(data2, group2.name)
d1 <- interim.data1[1]
o1 <- interim.data1[2]
n1 <- interim.data1[3]
n1.alive <- interim.data1[4]
d2 <- interim.data2[1]
o2 <- interim.data2[2]
n2 <- interim.data2[3]
n2.alive <- interim.data2[4]
DispDataNonMixExp(group1.name, n1, d1, n1.alive, o1, lambda.hat, c1.cond.hat, O1.star,
group2.name, n2, d2, n2.alive, o2, lambda.hat, c2.cond.hat, O2.star,
theta.0, theta.hat)
}
# conditional power
DispConPwr(gamma.theta.0, group1.name, group2.name)
# standardization of plot window
graphics::par(las = 1,
mfrow = c(1, 1))
# plots of Kaplan-Meier curves (optional)
if (plot.km == TRUE) {
graphics::par(mfrow = c(1, 2))
PlotKM(data, "Non-Mixture Model with Exponential Survival")
PlotEstNonMixExp(data1, data2,
lambda1.hat, c1.cond.hat,
lambda2.hat, c2.cond.hat,
group1.name, group2.name)
}
# plot of conditional power curve
PlotConPwr(theta, gamma.theta,
theta.0, gamma.theta.0,
group1.name, group2.name,
"Non-Mixture Model with Exponential Survival")
# return values
return(value = invisible(x = list(lambda.hat = lambda.hat,
c1.hat = c1.cond.hat, c2.hat = c2.cond.hat,
theta.hat = theta.hat,
gamma.theta.0 = gamma.theta.0)))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/ConPwrNonMixExp.R
|
ConPwrNonMixGamma <- function(data, cont.time,
new.pat = c(0, 0), theta.0 = 1, alpha = 0.05,
disp.data = FALSE, plot.km = FALSE) {
## Calculates the conditional power and plots the conditional power curve
## for the non-mixture model with Gamma type survival, i. e.
## S(t) = c^Gamma^(0)(a, b * t), a > 0, b > 0, 0 < c < 1, t >= 0,
## with Gamma^(0) being the regularized incomplete Gamma function of the upper bound,
## with respect to two different treatments and no drop outs.
##
## Args:
## data: Data frame which consists of at least three columns with the group
## (two different expressions) in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## cont.time: Period of time of continuing the trial.
## new.pat: 2-dimensional vector which consists of numbers of new patients
## who will be recruited each time unit
## (first component = group 1, second component = group 2)
## with default at (0, 0).
## theta.0: Originally postulated clinically relevant difference
## (hazard ratio = hazard of group 2 / hazard of group 1)
## with default at 1.
## alpha: Significance level for conditional power calculations
## with default at 0.05.
## disp.data: Logical value indicating if all calculated data should be displayed
## with default at FALSE.
## plot.km: Logical value indicating if Kaplan-Meier curves
## and estimated survival curves according to
## the non-mixture model with Gamma type survival should be plotted
## with default at FALSE.
##
## Returns:
## Displays the calculated conditional power
## and optionally an overview of the other calculated values,
## and plots the conditional power curve
## and optionally the Kaplan-Meier curves
## plus the estimated survival curves.
## Returns the estimates of the parameters, the hazard ratio
## and the conditional power.
# check of passed parameters
IsValid(data, cont.time, new.pat, theta.0, alpha, disp.data, plot.km)
# split data frame into two data frames, each for one group,
# and converting group expressions for internal calculations
# into values 1 and 2
split.data <- SplitData(data)
data1 <- split.data[[1]]
group1.name <- split.data[[2]]
data2 <- split.data[[3]]
group2.name <- split.data[[4]]
# calculate initial values for maximum likelihood estimation
# of parameters in group 1
# and if applicable projection into feasible region
init.val.data1.likelihood.nonmix.gamma <- InitValLikelihoodNonMixGamma(data1)
# initial values for maximum likelihood estimation
# of parameters in group 1
a1.0 <- init.val.data1.likelihood.nonmix.gamma[1]
b1.0 <- init.val.data1.likelihood.nonmix.gamma[2]
c1.0 <- init.val.data1.likelihood.nonmix.gamma[3]
# calculate initial values for maximum likelihood estimation
# of parameters in group 2
# and if applicable projection into feasible region
init.val.data2.likelihood.nonmix.gamma <- InitValLikelihoodNonMixGamma(data2)
# initial values for maximum likelihood estimation
# of parameters in group 2
a2.0 <- init.val.data2.likelihood.nonmix.gamma[1]
b2.0 <- init.val.data2.likelihood.nonmix.gamma[2]
c2.0 <- init.val.data2.likelihood.nonmix.gamma[3]
# calculate initial values for maximum likelihood estimation
# of parameters for all data
# and if applicable projection into feasible region
init.val.data.likelihood.nonmix.gamma <- InitValLikelihoodNonMixGamma(data)
# initial values for maximum likelihood estimation
# of parameters for all data
a.0 <- init.val.data.likelihood.nonmix.gamma[1]
b.0 <- init.val.data.likelihood.nonmix.gamma[2]
c.0 <- init.val.data.likelihood.nonmix.gamma[3]
# maximum likelihood estimation of parameters in group 1, group 2
# and for all data
likelihood.nonmix.gamma <- LikelihoodNonMixGamma(data1, data2, data,
a1.0, b1.0, c1.0,
a2.0, b2.0, c2.0,
a.0, b.0, c.0)
# maximum likelihood estimators of parameters in group 1, group 2
a1.hat <- likelihood.nonmix.gamma[1]
b1.hat <- likelihood.nonmix.gamma[2]
c1.hat <- likelihood.nonmix.gamma[3]
a2.hat <- likelihood.nonmix.gamma[4]
b2.hat <- likelihood.nonmix.gamma[5]
c2.hat <- likelihood.nonmix.gamma[6]
a.hat <- likelihood.nonmix.gamma[7]
b.hat <- likelihood.nonmix.gamma[8]
c1.cond.hat <- likelihood.nonmix.gamma[9]
c2.cond.hat <- likelihood.nonmix.gamma[10]
# estimator for hazard ratio theta = log(c2) / log(c1)
# under the assumption a1 = a2 and b1 = b2
theta.hat <- log(c2.cond.hat) / log(c1.cond.hat)
# estimation of person months in group 1 and group 2
n1.alive <- sum(1 - data1[, 2])
n2.alive <- sum(1 - data2[, 2])
O1.star <- PersMonNonMixGamma(a1.hat, b1.hat, c1.hat, n1.alive, new.pat[1], cont.time)
O2.star <- PersMonNonMixGamma(a2.hat, b2.hat, c2.hat, n2.alive, new.pat[2], cont.time)
# functions of person months in group 1 , group 2
# and in group 2 under the null hypothesis
o1.stroke <- FctPersMonNonMixGamma(data1, a1.hat, b1.hat, group1.name)
o2.stroke <- FctPersMonNonMixGamma(data2, a2.hat, b2.hat, group2.name)
o2.stroke.null <- FctPersMonNonMixGamma(data2, a1.hat, b1.hat, group2.name)
# further functions of person months in group 1, group 2
# and in group 2 under the null hypothesis
n1 <- length(x = data1[, 1])
n2 <- length(x = data2[, 1])
O1.stroke.star <- o1.stroke / n1 * (n1.alive + new.pat[1] * cont.time) * c1.cond.hat
O2.stroke.star <- o2.stroke / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat
O2.stroke.star.null <- o2.stroke.null / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat
# number of patients
n.alive <- n1.alive + n2.alive
rel <- n.alive / (n1 + n2)
n.star <- floor(x = (n.alive + ((new.pat[1] + new.pat[2]) * cont.time * rel)))
# conditional power calculations
d1 <- sum(data1[, 2])
d2 <- sum(data2[, 2])
calc.conpwr.nonmix <- CalcConPwrNonMix(theta.0,
d1, o1.stroke, O1.stroke.star, c1.cond.hat,
d2, o2.stroke, O2.stroke.star, O2.stroke.star.null,
n.star,
alpha)
theta <- calc.conpwr.nonmix[[1]]
gamma.theta <- calc.conpwr.nonmix[[2]]
gamma.theta.0 <- calc.conpwr.nonmix[[3]]
# results
# additional data (optional)
if (disp.data == TRUE) {
# calculate number of death events, person months, number of patients
# and number of patients still alive of group1 and group 2
interim.data1 <- InterimData(data1, group1.name)
interim.data2 <- InterimData(data2, group2.name)
d1 <- interim.data1[1]
o1 <- interim.data1[2]
n1 <- interim.data1[3]
n1.alive <- interim.data1[4]
d2 <- interim.data2[1]
o2 <- interim.data2[2]
n2 <- interim.data2[3]
n2.alive <- interim.data2[4]
DispDataNonMixGamma(group1.name, n1, d1, n1.alive, o1, a.hat, b.hat, c1.cond.hat, O1.star,
group2.name, n2, d2, n2.alive, o2, a.hat, b.hat, c2.cond.hat, O2.star,
theta.0, theta.hat)
}
# conditional power
DispConPwr(gamma.theta.0, group1.name, group2.name)
# standardization of plot window
graphics::par(las = 1,
mfrow = c(1, 1))
# plots of Kaplan-Meier curves (optional)
if (plot.km == TRUE) {
graphics::par(mfrow = c(1, 2))
PlotKM(data, "Non-Mixture Model with Gamma type Survival")
PlotEstNonMixGamma(data1, data2,
a1.hat, b1.hat, c1.cond.hat,
a2.hat, b2.hat, c2.cond.hat,
group1.name, group2.name)
}
# plot of conditional power curve
PlotConPwr(theta, gamma.theta,
theta.0, gamma.theta.0,
group1.name, group2.name,
"Non-Mixture Model with Gamma type Survival")
# return values
return(value = invisible(x = list(a.hat = a.hat, b.hat = b.hat,
c1.hat = c1.cond.hat, c2.hat = c2.cond.hat,
theta.hat = theta.hat,
gamma.theta.0 = gamma.theta.0)))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/ConPwrNonMixGamma.R
|
ConPwrNonMixWei <- function(data, cont.time,
new.pat = c(0, 0), theta.0 = 1, alpha = 0.05,
disp.data = FALSE, plot.km = FALSE) {
## Calculates the conditional power and plots the conditional power curve
## for the non-mixture model with Weibull type survival, i. e.
## S(t) = c^[1 - exp(- lambda * t^k)], lambda > 0, k > 0, 0 < c < 1, t >= 0,
## with respect to two different treatments and no drop outs.
##
## Args:
## data: Data frame which consists of at least three columns with the group
## (two different expressions) in the first,
## status (1 = event, 0 = censored) in the second
## and event time in the third column.
## cont.time: Period of time of continuing the trial.
## new.pat: 2-dimensional vector which consists of numbers of new patients
## who will be recruited each time unit
## (first component = group 1, second component = group 2)
## with default at (0, 0).
## theta.0: Originally postulated clinically relevant difference
## (hazard ratio = hazard of group 2 / hazard of group 1)
## with default at 1.
## alpha: Significance level for conditional power calculations
## with default at 0.05.
## disp.data: Logical value indicating if all calculated data should be displayed
## with default at FALSE.
## plot.km: Logical value indicating if Kaplan-Meier curves
## and estimated survival curves according to
## the non-mixture model with Weibull type survival should be plotted
## with default at FALSE.
##
## Returns:
## Displays the calculated conditional power
## and optionally an overview of the other calculated values,
## and plots the conditional power curve
## and optionally the Kaplan-Meier curves
## plus the estimated survival curves.
## Returns the estimates of the parameters, the hazard ratio
## and the conditional power.
# check of passed parameters
IsValid(data, cont.time, new.pat, theta.0, alpha, disp.data, plot.km)
# split data frame into two data frames, each for one group,
# and converting group expressions for internal calculations
# into values 1 and 2
split.data <- SplitData(data)
data1 <- split.data[[1]]
group1.name <- split.data[[2]]
data2 <- split.data[[3]]
group2.name <- split.data[[4]]
# calculate initial values for maximum likelihood estimation
# of parameters in group 1
# and if applicable projection into feasible region
init.val.data1.likelihood.nonmix.wei <- InitValLikelihoodNonMixWei(data1)
# initial values for maximum likelihood estimation
# of parameters in group 1
lambda1.0 <- init.val.data1.likelihood.nonmix.wei[1]
k1.0 <- init.val.data1.likelihood.nonmix.wei[2]
c1.0 <- init.val.data1.likelihood.nonmix.wei[3]
# calculate initial values for maximum likelihood estimation
# of parameters in group 2
# and if applicable projection into feasible region
init.val.data2.likelihood.nonmix.wei <- InitValLikelihoodNonMixWei(data2)
# initial values for maximum likelihood estimation
# of parameters in group 2
lambda2.0 <- init.val.data2.likelihood.nonmix.wei[1]
k2.0 <- init.val.data2.likelihood.nonmix.wei[2]
c2.0 <- init.val.data2.likelihood.nonmix.wei[3]
# calculate initial values for maximum likelihood estimation
# of parameters for all data
# and if applicable projection into feasible region
init.val.data.likelihood.nonmix.wei <- InitValLikelihoodNonMixWei(data)
# initial values for maximum likelihood estimation
# of parameters for all data
lambda.0 <- init.val.data.likelihood.nonmix.wei[1]
k.0 <- init.val.data.likelihood.nonmix.wei[2]
c.0 <- init.val.data.likelihood.nonmix.wei[3]
# maximum likelihood estimation of parameters in group 1, group 2
# and for all data
likelihood.nonmix.wei <- LikelihoodNonMixWei(data1, data2, data,
lambda1.0, k1.0, c1.0,
lambda2.0, k2.0, c2.0,
lambda.0, k.0, c.0)
# maximum likelihood estimators of parameters in group 1, group 2
lambda1.hat <- likelihood.nonmix.wei[1]
k1.hat <- likelihood.nonmix.wei[2]
c1.hat <- likelihood.nonmix.wei[3]
lambda2.hat <- likelihood.nonmix.wei[4]
k2.hat <- likelihood.nonmix.wei[5]
c2.hat <- likelihood.nonmix.wei[6]
lambda.hat <- likelihood.nonmix.wei[7]
k.hat <- likelihood.nonmix.wei[8]
c1.cond.hat <- likelihood.nonmix.wei[9]
c2.cond.hat <- likelihood.nonmix.wei[10]
# estimator for hazard ratio theta = log(c2) / log(c1)
# under the assumption lambda1 = lambda2 and k1 = k2
theta.hat <- log(c2.cond.hat) / log(c1.cond.hat)
# estimation of person months in group 1 and group 2
n1.alive <- sum(1 - data1[, 2])
n2.alive <- sum(1 - data2[, 2])
O1.star <- PersMonNonMixWei(lambda1.hat, k1.hat, c1.hat, n1.alive, new.pat[1], cont.time)
O2.star <- PersMonNonMixWei(lambda2.hat, k2.hat, c2.hat, n2.alive, new.pat[2], cont.time)
# functions of person months in group 1 , group 2
# and in group 2 under the null hypothesis
o1.stroke <- FctPersMonNonMixWei(data1, lambda1.hat, k1.hat, group1.name)
o2.stroke <- FctPersMonNonMixWei(data2, lambda2.hat, k2.hat, group2.name)
o2.stroke.null <- FctPersMonNonMixWei(data2, lambda1.hat, k1.hat, group2.name)
# further functions of person months in group 1, group 2
# and in group 2 under the null hypothesis
n1 <- length(x = data1[, 1])
n2 <- length(x = data2[, 1])
O1.stroke.star <- o1.stroke / n1 * (n1.alive + new.pat[1] * cont.time) * c1.cond.hat
O2.stroke.star <- o2.stroke / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat
O2.stroke.star.null <- o2.stroke.null / n2 * (n2.alive + new.pat[2] * cont.time) * c2.cond.hat
# number of patients
n.alive <- n1.alive + n2.alive
rel <- n.alive / (n1 + n2)
n.star <- floor(x = (n.alive + ((new.pat[1] + new.pat[2]) * cont.time * rel)))
# conditional power calculations
d1 <- sum(data1[, 2])
d2 <- sum(data2[, 2])
calc.conpwr.nonmix <- CalcConPwrNonMix(theta.0,
d1, o1.stroke, O1.stroke.star, c1.cond.hat,
d2, o2.stroke, O2.stroke.star, O2.stroke.star.null,
n.star,
alpha)
theta <- calc.conpwr.nonmix[[1]]
gamma.theta <- calc.conpwr.nonmix[[2]]
gamma.theta.0 <- calc.conpwr.nonmix[[3]]
# results
# additional data (optional)
if (disp.data == TRUE) {
# calculate number of death events, person months, number of patients
# and number of patients still alive of group1 and group 2
interim.data1 <- InterimData(data1, group1.name)
interim.data2 <- InterimData(data2, group2.name)
d1 <- interim.data1[1]
o1 <- interim.data1[2]
n1 <- interim.data1[3]
n1.alive <- interim.data1[4]
d2 <- interim.data2[1]
o2 <- interim.data2[2]
n2 <- interim.data2[3]
n2.alive <- interim.data2[4]
DispDataNonMixWei(group1.name, n1, d1, n1.alive, o1, lambda.hat, k.hat, c1.cond.hat, O1.star,
group2.name, n2, d2, n2.alive, o2, lambda.hat, k.hat, c2.cond.hat, O2.star,
theta.0, theta.hat)
}
# conditional power
DispConPwr(gamma.theta.0, group1.name, group2.name)
# standardization of plot window
graphics::par(las = 1,
mfrow = c(1, 1))
# plots of Kaplan-Meier curves (optional)
if (plot.km == TRUE) {
graphics::par(mfrow = c(1, 2))
PlotKM(data, "Non-Mixture Model with Weibull type Survival")
PlotEstNonMixWei(data1, data2,
lambda1.hat, k1.hat, c1.cond.hat,
lambda2.hat, k2.hat, c2.cond.hat,
group1.name, group2.name)
}
# plot of conditional power curve
PlotConPwr(theta, gamma.theta,
theta.0, gamma.theta.0,
group1.name, group2.name,
"Non-Mixture Model with Weibull type Survival")
# return values
return(value = invisible(x = list(lambda.hat = lambda.hat, k.hat = k.hat,
c1.hat = c1.cond.hat, c2.hat = c2.cond.hat,
theta.hat = theta.hat,
gamma.theta.0 = gamma.theta.0)))
}
|
/scratch/gouwar.j/cran-all/cranData/CP/R/ConPwrNonMixWei.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.