content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
`pzoeppritz` <-
function( chtype= "Amplitude" , alpha1, alpha2, beta1, beta2, rho1 ,rho2, chincw="P", choutkind="ALL")
{
##X## # % plot the output of zoep
##X## # plotzpr( "Amplitude" , alpha1, alpha2, beta1, beta2, rho1 ,rho2, "SV", "S");
## chtype = type of output: "Amplitude", "Potential", "Energy"
if(missing(chtype)) { chtype= "Amplitude"}
if(missing(chincw)) { chincw="P" }
if(missing(choutkind)) { choutkind="ALL" }
########### protect against upper/lower case and use only first letter
lowtype = tolower(chtype)
type1 = substr(lowtype,1,1)
if(type1=="a" )
{
type = 1;
chtype= "Amplitude"
} else
if( type1=="p")
{
type = 2;
chtype= "Potential"
}else
if(type1=="e" )
{
type = 3;
chtype="Energy"
}
chincw = toupper(chincw)
if(chincw == "P")
{
incw = 1;
}else
if(chincw == "S")
{
incw = 2;
}else
if(chincw == "SV")
{
incw = 2;
}
choutkind = toupper(choutkind)
if(choutkind=="SV")
{
outkind = 2;
}else
if(choutkind=="S")
{
outkind = 2;
}else
if(choutkind=="P")
{
outkind = 1;
}
if(choutkind=="ALL")
{
outkind = 5;
}
if(choutkind=="NONE")
{
outkind = 0;
}
A = zoeppritz( type , alpha1, alpha2, beta1, beta2, rho1 ,rho2, incw);
A$chincw = chincw
A$chtype = chtype
A$alphacrit = NULL
if(chincw=="P")
{
if(alpha1<alpha2)
{
A$alphacrit = 180*asin(alpha1/ alpha2)/pi
}
else
{
A$alphacrit = NULL
}
}
if(chincw=="S")
{
if(beta1<beta2)
{
A$alphacrit = 180*asin(beta1/ beta2)/pi
}
else
{
A$alphacrit = NULL
}
}
########## plotting decisions:
if(outkind>4)
{
plotzoeppritz(A)
u = par("usr")
LL = list(x=c(u[1]+0.76*(u[2]-u[1]) , u[1]+0.98*(u[2]-u[1]) ) , y = c(u[3]+0.5*(u[4]-u[3]) , u[3]+0.7*(u[4]-u[3]) ) )
piczoeppritz(LL, chincw)
}
else
{
if(outkind>0)
{
plot(A$angle, A$rmat[,outkind], col=4, type="l", xlab="Angle",
ylab=chtype, ylim=c(min(c(-.1, min(A$rmat[, outkind]))) ,1.2 ) ) ;
title(paste(sep="", chincw, " Incident/", choutkind, " Out" ) );
text(0 , 1.15, paste('Layer 1: Vp=',alpha1,' Vs=',beta1, ' Rho=', rho1), pos=4)
text(0, 1.05, paste('Layer 2: Vp=',alpha2,' Vs=',beta2, ' Rho=', rho2), pos=4)
}
}
invisible(A)
}
| /scratch/gouwar.j/cran-all/cranData/zoeppritz/R/pzoeppritz.R |
`zoeppritz` <-
function(icoef, vp1, vp2, vs1, vs2, rho1, rho2, incw)
{
##X## calculate the zoeppritz equations
##X## # % calculate zoeppritz according to young and braille
##X## # zoeppritz(icoef, 3.0, 6.0, , vs2, rho1, rho2, incw)
##X## # zoeppritz(icoef, alpha1, alpha2, beta1, beta2, rho1,rho2, incw)
##X## # alpha1 = 3.0;
##X## # beta1 = 1.7321;
##X## # rho1 = 2.69;
##X## # alpha2 = 6.0;
##X## # beta2 = 3.46;
##X## # rho2 = 2.91;
##X## # incw=2; icoef=1
##X## #
##X## # alpha1 = 3.0; beta1 = 1.7321;rho1 = 2.69; alpha2 = 6.0; beta2 = 3.46; rho2 = 2.91;
##X## # icoef=1; incw=2; vp1=alpha1; vp2=alpha2; vs1=beta1; vs2=beta2; rho1=rho1; rho2=rho2
##X## # zoeppritz(icoef, alpha1, alpha2, beta1, beta2, rho1,rho2, incw)
##X## # plotzpr( "Amplitude" , alpha1, alpha2, beta1, beta2, rho1 ,rho2, "SV", "S");
input.model = list(vp1=vp1, vp2=vp2, vs1=vs1, vs2=vs2, rho1= rho1, rho2=rho2)
if(incw==1)
{
vel = vp1;
}
if(incw==2)
{
vel = vs1;
}
angle = seq(from=0.1, to=89.7, length=90)
alpha = angle*pi/180;
fp = matrix(rep(0,4), ncol=1,nrow=4);
K = 4* length(alpha)
r = matrix( rep(0,K), ncol=4, nrow=length(alpha));
rmat = matrix( rep(0,K), ncol=4, nrow=length(alpha));
fe = matrix( rep(0,K), ncol=4, nrow=length(alpha));
rra = matrix( rep(0,K), ncol=4, nrow=length(alpha));
ria = matrix( rep(0,K), ncol=4, nrow=length(alpha));
ang = matrix( rep(0,K), ncol=4, nrow=length(alpha));
theta = sin(alpha)/vel;
fac1 = rep(1,length(alpha));
fac2 =rep(1,length(alpha));
fac3 = rep(1,length(alpha));
fac4 = rep(1,length(alpha));
thetasq = theta*theta;
qa = 2.0 *(rho2*vs2*vs2 - rho1*vs1*vs1);
t1 = vp1*vp1*thetasq;
t2 = vs1*vs1*thetasq;
t3 = vp2*vp2*thetasq;
t4 = vs2*vs2*thetasq;
# %%%%%%%%%%%%%%%%%%%% test for critical P refl
b1 = rep(0, length(alpha));
a1 =rep(0, length(alpha));
flag = theta>1.0/vp1 ;
b1[flag] = -sqrt(t1[flag]-1.0);
a1[flag] = 0.0;
fac1[flag] = 0.0;
a1[!flag]=sqrt(1.0-t1[!flag]);
b1[!flag] = 0.0;
# %%%%%%%%%%%%%%%%%%%% test for critical S refl
b2 = rep(0, length(alpha));
a2 = rep(0, length(alpha));
flag = theta>1.0/vs1 ;
b2[flag] = -sqrt(t2[flag]-1.0);
a2[flag] = 0.0;
fac2[flag] = 0.0;
a2[!flag]=sqrt(1.0-t2[!flag]);
b2[!flag] = 0.0;
# %%%%%%%%%%%%%%%%%%%% test for critical p refraction
b3 = rep(0, length(alpha));
a3 = rep(0, length(alpha));
flag = theta>1.0/vp2 ;
b3[flag] = -sqrt(t3[flag]-1.0);
a3[flag] = 0.0;
fac3[flag] = 0.0;
a3[!flag]=sqrt(1.0-t3[!flag]);
b3[!flag] = 0.0;
# %%%%%%%%%%%%%%%%%%%% test for critical s refraction
b4 = rep(0, length(alpha));
a4 = rep(0, length(alpha));
flag = theta>1.0/vs2 ;
b4[flag] = -sqrt(t4[flag]-1.0);
a4[flag] = 0.0;
fac4[flag] = 0.0;
a4[!flag]=sqrt(1.0-t4[!flag]);
b4[!flag] = 0.0;
# %%%%%%%%%%%%%%%%%%%%
x = rho2-(qa*thetasq);
y = rho1+(qa*thetasq);
z = rho2-rho1-(qa*thetasq) ;
p1 = complex(real=a1, imaginary=b1);
p2 = complex(real=a2, imaginary=b2);
p3 = complex(real=a3, imaginary=b3);
p4 = complex(real=a4, imaginary=b4);
d = vp1*vp2*vs1*vs2*thetasq*z*z+ vp2*vs2*p1*p2*x*x+ vp1*vs1*p3*p4*y*y+
rho1*rho2*(vs1*vp2*p1*p4+vp1*vs2*p2*p3)+ qa*qa*thetasq*p1*p2*p3*p4 ;
fp = matrix(rep(1,4), ncol=2, nrow=2);
if(incw==1)
{
#case 1
r[,1 ] = -1.0 +2.0*p1*(vp2*vs2*p2*x*x+vs1*vp2*rho1*rho2*p4+qa*qa*thetasq*p2*p3*p4)/d;
r[,2 ] = -2.0*vp1*theta*p1*(qa*p3*p4*y+vp2*vs2*x*z)*fac2/d;
r[,3 ] = 2.0*vp1*rho1*p1*(vs2*p2*x+vs1*p4*y)*fac3/d;
r[,4] = -2.0*vp1*rho1*theta*p1*(qa*p2*p3-vs1*vp2*z)*fac4/d;
# %%%%%% /* c factor to determine rpa(**) */
fp[1] = 1.0;
fp[2] = vs1/vp1;
fp[3] = vp2/vp1;
fp[4] = vs2/vp1;
# %%%%%% /* c factor to determine rea(**) */
fe[, 1 ] = 1.0;
fe[, 2 ] = p2*vp1/(p1*vs1);
fe[, 3 ] = rho2*p3*vp1/(p1*vp2*rho1);
fe[, 4] = rho2*p4*vp1/(p1*vs2*rho1) ;
} else if(incw==2){
# case 2
r[, 1 ] = -2.0*vs1*theta*p2*(qa*p3*p4*y+vp2*vs2*x*z)*fac1/d;
r[, 2 ] = 1.0-2.0*p2*(vp2*vs2*p1*x*x+vp1*vs2*rho1*rho2*p3+qa*qa*thetasq*p1*p3*p4)/d;
r[, 3 ] = 2.0*vs1*rho1*theta*p2*(qa*p1*p4-vp1*vs2*z)*fac3/d;
r[, 4] = 2.0*vs1*rho1*p2*(vp1*p3*y + vp2*p1*x)*fac4/d;
# %%%%%% /* c factor to determine rpa(**) */
fp[1] = vp1/vs1;
fp[2] = 1.0;
fp[3] = vp2/vs1 ;
fp[4] = vs2/vs1;
# %%%%%% /* c factor to determine rea(**) */
fe[, 1 ] = vs1*p1/(vp1*p2);
fe[, 2 ] = 1.0;
fe[, 3 ] = rho2*vs1*p3/(rho1*vp2*p2);
fe[, 4 ] = rho2*vs1*p4/(rho1*vs2*p2);
}
# %%%%%%%%%%%%%%%%%%%%%%%%%
for(j in 1:4)
{
if(icoef==1)
{
# case 1
rmat[, j ] = Mod(r[, j ]);
} else if(icoef==2){
# case 2
rmat[, j] = Mod(r[, j ]*fp[j]);
} else if(icoef==3){
# case 3
rmat[, j] = Mod((r[, j ]*fp[j]) * (r[, j ]*fp[j]) *fe[, j ]);
}
rra[, j ] = Re(r[, j ]);
ria[, j ] = Im(r[, j ]);
aflag = Mod(rra[, j ])>0.0000001;
ang[aflag, j ]= atan2(ria[aflag, j ],rra[aflag, j ]);
ang[!aflag , j ]= 0.0;
}
return(list(angle=angle, rmat=rmat, rra=rra, ria=ria, ang=ang, incw=incw, icoef=icoef, input.model=input.model ))
}
| /scratch/gouwar.j/cran-all/cranData/zoeppritz/R/zoeppritz.R |
readline("Hit Return\n")
############## set up a velocity model
### layer 1:
alpha1 = 4.98
beta1 = 2.9
rho1 = 2.667
########## layer 2
alpha2 = 8.0
beta2 = 4.6
rho2 = 3.38
########## visualize the scattering coefficients
App = pzoeppritz( "Amplitude" , alpha1, alpha2, beta1, beta2, rho1 ,rho2, "P", "ALL");
readline("Hit Return\n")
########### change incoming wave to S-wave:
App = pzoeppritz( "Amplitude" , alpha1, alpha2, beta1, beta2, rho1 ,rho2, "S", "ALL");
#####################################################
############# Incident wave in high velocity layer
alpha1 = 8.0
beta1 = 4.6
rho1 = 3.38
alpha2 = 4.98
beta2 = 2.9
rho2 = 2.667
readline("Hit Return\n")
App = pzoeppritz( "Amplitude" , alpha1, alpha2, beta1, beta2, rho1 ,rho2, "P", "ALL");
readline("Hit Return\n")
########### change incoming wave to S-wave:
App = pzoeppritz( "Amplitude" , alpha1, alpha2, beta1, beta2, rho1 ,rho2, "S", "ALL");
| /scratch/gouwar.j/cran-all/cranData/zoeppritz/demo/ZOEP.R |
check.psrf <-
function(post1=NULL, post2=NULL, post3=NULL, post4=NULL, post5=NULL)
{
if(is.list(post1)){
MCMC.list <- post1
}
else{
tmp <- list(post1,post2,post3,post4,post5)
count <- 5
if(is.null(post5)) count <- count-1
if(is.null(post4)) count <- count-1
if(is.null(post3)) count <- count-1
if(is.null(post2))
stop("at last two Markov Chains are needed to compute psrf")
draw <- vector("list", count)
for(i in 1:length(tmp))
{
if(!is.null(tmp[[i]])) draw[[i]] <- mcmc(tmp[[i]])
else break
}
MCMC.list <- mcmc.list(draw)
}
x <- MCMC.list
Niter <- niter(x)
Nchain <- nchain(x)
Nvar <- nvar(x)
xnames <- varnames(x)
x <- lapply(x, as.matrix)
S2 <- array(sapply(x, var, simplify = TRUE),
dim = c(Nvar, Nvar, Nchain))
W <- apply(S2, c(1, 2), mean)
PD <- is.positive.definite(W)
if(!PD)
{
psrf.s <- gelman.diag(MCMC.list, multivariate=FALSE)$psrf
psrf.m <- NULL
print("the covariance matrix of the posterior samples is not")
print("positive definite, and the multivarite psrf cannot be")
print("computed")
}
else{
gelman.plot(MCMC.list)
psrf.s <- gelman.diag(MCMC.list)[[1]]
psrf.m <- gelman.diag(MCMC.list)[[2]]
}
par(mfrow=c(1,2),mar=c(2,2,1,1))
boxplot(psrf.s[,1]); mtext("psrf",1,cex=1.2)
boxplot(psrf.s[,2]); mtext("upper bound of 95% CI",1,cex=1.2)
print(psrf.s);
print(psrf.m)
return(list(psrf.s=psrf.s, psrf.m=psrf.m,
psrf.s.summ = apply(psrf.s,2,summary)))
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/check.psrf.R |
fixed <-
function(y, n, xmu.1,p.xmu,xsum.1,p.xsum, prior1, prec.int,prec.DN,
lambda.L1, lambda.L2, lambda.ARD, link,n.chain, inits, seed)
{
dataIn <- vector("list",10)
dataIn.name <- c("y","xmu.1","p.xmu","xsum.1","p.xsum",
"n","zero","prior1","hyper","link")
names(dataIn)<- dataIn.name
dataIn[[1]] <- y
dataIn[[2]] <- as.matrix(xmu.1)
dataIn[[3]] <- p.xmu
dataIn[[4]] <- as.matrix(xsum.1)
dataIn[[5]] <- p.xsum
dataIn[[6]] <- n
dataIn[[7]] <- rep(0,n)
dataIn[[8]] <- prior1
dataIn[[9]] <- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
dataIn[[10]] <- link
if(is.null(seed)){
init <- function(rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,1),
"sigmad.L1" = runif((p.xsum-1),0,1),
"taub.ARD" = runif((p.xmu-1),0,1),
"taud.ARD" = runif((p.xsum-1),0,1),
"taub.L2" = runif(1,0,1),
"taud.L2" = runif(1,0,1))}
# 1b, 2d
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,1),
"sigmad.L1" = runif((p.xsum-1),0,1),
"taub.ARD" = runif((p.xmu-1),0,1),
"taud.ARD" = runif((p.xsum-1),0,1),
"taub.L2" = runif(1,0,1),
"taud.L2" = runif(1,0,1),
.RNG.name = rngname,
.RNG.seed = rngseed)}
# 1b, 2d
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[3]] <- matrix(rep(inits[[i]]$b[2:p.xmu],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[4]] <- matrix(rep(inits[[i]]$d[2:p.xsum],4),
ncol=4, byrow=FALSE)}
}}
op<- system.file("bugs","fixed.bug",package="zoib")
model <- jags.model(op,data=dataIn, n.adapt=0, inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/fixed.R |
fixed0 <-
function(y, n, xmu.1, p.xmu, xsum.1,p.xsum, x0.1,p.x0, prior1, prec.int,
prec.DN, lambda.L1, lambda.L2, lambda.ARD, link,n.chain, inits, seed)
{
dataIn <- vector("list",12)
dataIn.name <- c("y","xmu.1","p.xmu","xsum.1","p.xsum", "x0.1","p.x0",
"n","zero","prior1","hyper","link")
names(dataIn)<- dataIn.name
dataIn[[1]] <- y
dataIn[[2]] <- as.matrix(xmu.1)
dataIn[[3]] <- p.xmu
dataIn[[4]] <- as.matrix(xsum.1)
dataIn[[5]] <- p.xsum
dataIn[[6]] <- as.matrix(x0.1)
dataIn[[7]] <- p.x0
dataIn[[8]] <- n
dataIn[[9]] <- rep(0,n)
dataIn[[10]]<- prior1
dataIn[[11]]<- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
dataIn[[12]]<- link
if(is.null(seed)){
init <- function( rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
.RNG.name = rngname,
.RNG.seed = rngseed)}
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[4]] <- matrix(rep(inits[[i]]$b[2:p.xmu],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[5]] <- matrix(rep(inits[[i]]$d[2:p.xsum],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1]
if(p.x0>=2) inits.internal[[i]][[6]] <- matrix(rep(inits[[i]]$b0[2:p.x0],4),
ncol=4, byrow=FALSE)}
}}
op<- system.file("bugs", "fixed0.bug",package="zoib")
model <- jags.model(op,data=dataIn,n.adapt=0,inits=inits.internal,n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/fixed0.R |
fixed01 <-
function(y, n, xmu.1,p.xmu,xsum.1,p.xsum, x0.1,p.x0, x1.1,p.x1, prior1,
prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD,link,n.chain,
inits, seed)
{
dataIn <- vector("list",14)
dataIn.name <- c("y","xmu.1","p.xmu","xsum.1","p.xsum", "x0.1","p.x0",
"x1.1","p.x1","n","zero","prior1","hyper","link")
names(dataIn)<- dataIn.name
dataIn[[1]] <- y
dataIn[[2]] <- as.matrix(xmu.1)
dataIn[[3]] <- p.xmu
dataIn[[4]] <- as.matrix(xsum.1)
dataIn[[5]] <- p.xsum
dataIn[[6]] <- as.matrix(x0.1)
dataIn[[7]] <- p.x0
dataIn[[8]] <- as.matrix(x1.1)
dataIn[[9]] <- p.x1
dataIn[[10]]<- n
dataIn[[11]]<- rep(0,n)
dataIn[[12]]<- prior1
dataIn[[13]]<- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
dataIn[[14]]<- link
if(is.null(seed)){
init <- function(rngname, rngseed ){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"tmp4" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"tmp4" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
.RNG.name = rngname,
.RNG.seed = rngseed)}
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[5]] <- matrix(rep(inits[[i]]$b[2:p.xmu],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[6]] <- matrix(rep(inits[[i]]$d[2:p.xsum],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1]
if(p.x0>=2) inits.internal[[i]][[7]] <- matrix(rep(inits[[i]]$b0[2:p.x0],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b1)) {
inits.internal[[i]][[4]] <- inits[[i]]$b1[1]
if(p.x1>=2) inits.internal[[i]][[8]] <- matrix(rep(inits[[i]]$b1[2:p.x1],4),
ncol=4, byrow=FALSE)}
}
}
op<- system.file("bugs", "fixed01.bug",package="zoib")
model<- jags.model(op,data=dataIn,n.adapt=0,inits=inits.internal,n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/fixed01.R |
fixed1 <-
function(y, n, xmu.1,p.xmu, xsum.1,p.xsum, x1.1,p.x1,prior1, prec.int,
prec.DN, lambda.L1, lambda.L2, lambda.ARD, link, n.chain, inits, seed)
{
dataIn <- vector("list",12)
dataIn.name <- c("y","xmu.1","p.xmu","xsum.1","p.xsum","x1.1",
"p.x1","n","zero","prior1","hyper","link")
names(dataIn)<- dataIn.name
dataIn[[1]] <- y
dataIn[[2]] <- as.matrix(xmu.1,nrow=n,byrow=T)
dataIn[[3]] <- p.xmu
dataIn[[4]] <- as.matrix(xsum.1,nrow=n,byrow=T)
dataIn[[5]] <- p.xsum
dataIn[[6]] <- as.matrix(x1.1,nrow=n,byrow=T)
dataIn[[7]] <- p.x1
dataIn[[8]] <- n
dataIn[[9]] <- rep(0,n)
dataIn[[10]] <- prior1
dataIn[[11]] <- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
dataIn[[12]] <- link
if(is.null(seed)){
init <- function(rngname, rngseed ){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b1.tmp"= matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1"= runif((p.x1-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD"= runif((p.x1-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b1.tmp"= matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1"= runif((p.x1-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD"= runif((p.x1-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
.RNG.name = rngname,
.RNG.seed = rngseed)}
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[4]] <- matrix(rep(inits[[i]]$b[2:p.xmu],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[5]] <- matrix(rep(inits[[i]]$d[2:p.xsum],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b1)) {
inits.internal[[i]][[3]] <- inits[[i]]$b1[1]
if(p.x1>=2) inits.internal[[i]][[6]] <- matrix(rep(inits[[i]]$b1[2:p.x1],4),
ncol=4, byrow=FALSE)}
} }
op<- system.file("bugs", "fixed1.bug", package="zoib")
model <- jags.model(op, data=dataIn, n.adapt=0, inits=inits.internal,n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/fixed1.R |
is.square.matrix <- function( x )
{
###
### determines if the given matrix is a square matrix
###
### arguments
### x = a matrix object
###
if ( !is.matrix( x ) )
stop( "argument x is not a matrix" )
return( nrow(x) == ncol(x) )
}
is.symmetric.matrix <- function( x )
{
###
### this function determines if the matrix is symmetric
###
### argument
### x = a numeric matrix object
###
if ( !is.matrix( x ) ) {
stop( "argument x is not a matrix" )
}
if ( !is.numeric( x ) ) {
stop( "argument x is not a numeric matrix" )
}
if ( !is.square.matrix( x ) )
stop( "argument x is not a square numeric matrix" )
return( sum( x == t(x) ) == ( nrow(x) ^ 2 ) )
}
is.positive.definite <- function( x, tol=1e-8 )
{
###
### this function determines if the given real symmetric matrix is positive definite
###
### parameters
### x = a square numeric matrix object
### tol = tolerance level for zero
###
if ( !is.square.matrix( x ) )
stop( "argument x is not a square matrix" )
if ( !is.symmetric.matrix( x ) )
stop( "argument x is not a symmetric matrix" )
if ( !is.numeric( x ) )
stop( "argument x is not a numeric matrix" )
eigenvalues <- eigen(x, only.values = TRUE)$values
n <- nrow( x )
for ( i in 1: n ) {
if ( abs( eigenvalues[i] ) < tol ) {
eigenvalues[i] <- 0
}
}
if ( any( eigenvalues <= 0 ) ) {
return( FALSE )
}
return( TRUE )
} | /scratch/gouwar.j/cran-all/cranData/zoib/R/is.positive.definite.R |
joint.1z <-
function(y, n, q, xmu.1, p.xmu, xsum.1, p.xsum,
rid, EUID, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",16)
names(dataIn)<- c("n","y","q","xmu.1","p.xmu",
"xsum.1","p.xsum", "zero","link", "hyper",
"prior1","prior2","rid","EUID", "nEU",
"hyper2")
dataIn[[1]] <- n
dataIn[[2]] <- as.matrix(y)
dataIn[[3]] <- q
dataIn[[4]] <- as.matrix(xmu.1, nrow=n)
dataIn[[5]] <- p.xmu
dataIn[[6]] <- as.matrix(xsum.1, nrow=n)
dataIn[[7]] <- p.xsum
dataIn[[8]] <- matrix(0,n,q)
dataIn[[9]] <- link
dataIn[[10]]<- abind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD,along=3)
dataIn[[11]] <- prior1
dataIn[[12]] <- prior2
dataIn[[13]] <- rid
dataIn[[14]] <- EUID
dataIn[[15]] <- nEU
if(grepl("unif", prior.Sigma)) dataIn[[16]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[16]] <- scale.halft
if(is.null(seed)){
init <- function( rngname, rngseed){
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1), c((p.xsum-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1), c((p.xsum-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1),
.RNG.name = rngname,
.RNG.seed = rngseed)}
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
# if joint, b is matrix of p.xmu*q
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1,]
if(p.xmu>=2) inits.internal[[i]][[3]] <- array(rep(inits[[i]]$b[2:p.xmu,],4), c((p.xmu-1),q,4))}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1,]
if(p.xsum>=2) inits.internal[[i]][[4]] <- array(rep(inits[[i]]$d[2:p.xsum,],4),c((p.xsum-1),q,4))}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[11]]<- inits[[i]]$sigma
inits.internal[[i]][[12]]<- inits[[i]]$sigma}
}}
op<- system.file("bugs", "joint_1z.bug", package="zoib")
model <- jags.model(op,data=dataIn,n.adapt=0, inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/joint.1z.R |
joint.1z0 <-
function(y, n, q, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0, inflate0,
rid, EUID, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",19)
dataIn.name <- c("n","y","q","xmu.1","p.xmu",
"xsum.1","p.xsum","x0.1","p.x0","inflate0",
"zero","link","hyper","prior1","prior2",
"rid", "EUID","nEU","hyper2")
names(dataIn)<- dataIn.name
dataIn[[1]] <- n
dataIn[[2]] <- as.matrix(y)
dataIn[[3]] <- q
dataIn[[4]] <- as.matrix(xmu.1)
dataIn[[5]] <- p.xmu
dataIn[[6]] <- as.matrix(xsum.1)
dataIn[[7]] <- p.xsum
dataIn[[8]] <- as.matrix(x0.1)
dataIn[[9]] <- p.x0
dataIn[[10]]<- inflate0
dataIn[[11]]<- matrix(0,n,q)
dataIn[[12]]<- link
dataIn[[13]]<- abind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD,along=3)
dataIn[[14]] <- prior1
dataIn[[15]] <- prior2
dataIn[[16]] <- rid
dataIn[[17]] <- EUID
dataIn[[18]] <- nEU
if(grepl("unif", prior.Sigma)) dataIn[[19]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[19]] <- scale.halft
if(is.null(seed)){
init <- function( rngname, rngseed){
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"b0.tmp"= array(rnorm((p.x0-1)*4*q,0,0.1), c((p.x0-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2),(p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab0.L1" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub0.ARD" = matrix(runif((p.x0-1)*q,0 ,2),(p.x0-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub0.L2" = runif(q,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed){
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"b0.tmp"= array(rnorm((p.x0-1)*4*q,0,0.1), c((p.x0-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2),(p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab0.L1" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub0.ARD" = matrix(runif((p.x0-1)*q,0 ,2),(p.x0-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub0.L2" = runif(q,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1),
.RNG.name = rngname,
.RNG.seed = rngseed)}
# 1b, 2d, 3b0, 4d1,
# 5 sigma1 scale2
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1,]
if(p.xmu>=2) inits.internal[[i]][[4]] <- array(rep(inits[[i]]$b[2:p.xmu,],4), c((p.xmu-1),q,4))}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1,]
if(p.xsum>=2) inits.internal[[i]][[5]] <- array(rep(inits[[i]]$d[2:p.xsum,],4), c((p.xsum-1),q,4))}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1,]
if(p.x0>=2) inits.internal[[i]][[6]] <- array(rep(inits[[i]]$b0[2:p.x0,],4), c((p.x0-1),q,4))}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[16]]<- inits[[i]]$sigma
inits.internal[[i]][[17]]<- inits[[i]]$sigma}
}}
op<- system.file("bugs", "joint_1z0.bug", package="zoib")
model <- jags.model(op,n.adapt=0, data=dataIn, inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/joint.1z0.R |
joint.1z01 <-
function(y, n, q, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1, x0.1, p.x0,
inflate0, inflate1, rid, EUID, nEU, prior1, prior2, prior.beta,
prior.Sigma, prec.int, prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",22)
dataIn.name <- c("n","y","q","xmu.1","p.xmu",
"xsum.1","p.xsum","x0.1","p.x0","x1.1",
"p.x1","inflate0","inflate1","link","hyper",
"prior1","prior2","rid","EUID", "nEU",
"zero","hyper2")
names(dataIn)<- dataIn.name
dataIn[[1]] <- n
dataIn[[2]] <- as.matrix(y)
dataIn[[3]] <- q
dataIn[[4]] <- as.matrix(xmu.1)
dataIn[[5]] <- p.xmu
dataIn[[6]] <- as.matrix(xsum.1)
dataIn[[7]] <- p.xsum
dataIn[[8]] <- as.matrix(x0.1)
dataIn[[9]] <- p.x0
dataIn[[10]]<- as.matrix(x1.1)
dataIn[[11]]<- p.x1
dataIn[[12]]<- inflate0
dataIn[[13]]<- inflate1
dataIn[[14]]<- link
dataIn[[15]]<- abind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD,along=3)
dataIn[[16]] <- prior1
dataIn[[17]] <- prior2
dataIn[[18]] <- rid
dataIn[[19]] <- EUID
dataIn[[20]] <- nEU
dataIn[[21]] <- matrix(0,n,q)
if(grepl("unif", prior.Sigma)) dataIn[[22]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[22]] <- scale.halft
if(is.null(seed)){
init <- function( rngname, rngseed){
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"tmp4" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"b0.tmp" = array(rnorm((p.x0-1)*4*q,0,0.1), c((p.x0-1),q,4)),
"b1.tmp" = array(rnorm((p.x1-1)*4*q,0,0.1), c((p.x1-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab1.L1" = matrix(runif((p.x1-1)*q,0,2),(p.x1-1),q),
"sigmab0.L1" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub1.ARD" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub0.ARD" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub0.L2" = runif(q,0,2),
"taub1.L2" = runif(q,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"tmp4" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"b0.tmp" = array(rnorm((p.x0-1)*4*q,0,0.1), c((p.x0-1),q,4)),
"b1.tmp" = array(rnorm((p.x1-1)*4*q,0,0.1), c((p.x1-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab1.L1" = matrix(runif((p.x1-1)*q,0,2),(p.x1-1),q),
"sigmab0.L1" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub1.ARD" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub0.ARD" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub0.L2" = runif(q,0,2),
"taub1.L2" = runif(q,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1),
.RNG.name = rngname,
.RNG.seed = rngseed)}
# 1b, 2d, 3b0, 4d1,
# 5 SigmaVC (sigma.VC1 or t),SigmaUN (scale1 or scale2),
# 6 rho1,2,3
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1,]
if(p.xmu>=2) inits.internal[[i]][[5]] <- array(rep(inits[[i]]$b[2:p.xmu,],4), c((p.xmu-1),q,4))}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1,]
if(p.xsum>=2) inits.internal[[i]][[6]] <- array(rep(inits[[i]]$d[2:p.xsum,],4), c((p.xsum-1),q,4))}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1,]
if(p.x0>=2) inits.internal[[i]][[7]] <- array(rep(inits[[i]]$b0[2:p.x0,],4), c((p.x0-1),q,4))}
if(!is.null(inits[[i]]$b1)) {
inits.internal[[i]][[4]] <- inits[[i]]$b1[1,]
if(p.x1>=2) inits.internal[[i]][[8]] <- array(rep(inits[[i]]$b1[2:p.x1,],4), c((p.x1-1),q,4))}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[21]]<- inits[[i]]$sigma
inits.internal[[i]][[22]]<- inits[[i]]$sigma}
}}
op<- system.file("bugs", "joint_1z01.bug", package="zoib")
model <- jags.model(op,data=dataIn,n.adapt=0,inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/joint.1z01.R |
joint.1z1 <-
function(y, n, q, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1,inflate1,
rid, EUID, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",19)
dataIn.name <- c("n","y","q","xmu.1","p.xmu",
"xsum.1","p.xsum","x1.1","p.x1","inflate1",
"zero","link","hyper","prior1","prior2",
"rid","EUID","nEU", "hyper2")
names(dataIn)<- dataIn.name
dataIn[[1]] <- n
dataIn[[2]] <- as.matrix(y)
dataIn[[3]] <- q
dataIn[[4]] <- as.matrix(xmu.1)
dataIn[[5]] <- p.xmu
dataIn[[6]] <- as.matrix(xsum.1)
dataIn[[7]] <- p.xsum
dataIn[[8]] <- as.matrix(x1.1)
dataIn[[9]] <- p.x1
dataIn[[10]]<- inflate1
dataIn[[11]]<- matrix(0,n,q)
dataIn[[12]]<- link
dataIn[[13]]<- abind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD,along=3)
dataIn[[14]] <- prior1
dataIn[[15]] <- prior2
dataIn[[16]] <- rid
dataIn[[17]] <- EUID
dataIn[[18]] <- nEU
if(grepl("unif", prior.Sigma)) dataIn[[19]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[19]] <- scale.halft
if(is.null(seed)){
init <- function(rngname, rngseed ){
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1), c((p.xsum-1),q,4)),
"b1.tmp"= array(rnorm((p.x1-1)*4*q,0,0.1), c((p.x1-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2),(p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab1.L1" = matrix(runif((p.x1-1)*q,0,2),(p.x1-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub1.ARD" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub1.L2" = runif(q,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function( rngname, rngseed){
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1), c((p.xsum-1),q,4)),
"b1.tmp"= array(rnorm((p.x1-1)*4*q,0,0.1), c((p.x1-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab1.L1" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub1.ARD" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub1.L2" = runif(q,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1),
.RNG.name = rngname,
.RNG.seed = rngseed)}
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1,]
if(p.xmu>=2) inits.internal[[i]][[4]] <- array(rep(inits[[i]]$b[2:p.xmu,],4), c((p.xmu-1),q,4))}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1,]
if(p.xsum>=2) inits.internal[[i]][[5]] <- array(rep(inits[[i]]$d[2:p.xsum,],4), c((p.xsum-1),q,4))}
if(!is.null(inits[[i]]$b1)){
inits.internal[[i]][[3]] <- inits[[i]]$b1[1,]
if(p.x1>=2) inits.internal[[i]][[6]] <- array(rep(inits[[i]]$b1[2:p.x1,],4), c((p.x1-1),q,4))}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[16]]<- inits[[i]]$sigma
inits.internal[[i]][[17]]<- inits[[i]]$sigma}
}}
op<- system.file("bugs", "joint_1z1.bug", package="zoib")
model <- jags.model(op,n.adapt=0, data=dataIn,inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/joint.1z1.R |
joint.2z <-
function(y, n, q, xmu.1, p.xmu, xsum.1, p.xsum,
zdummy, qz,nz0, m, rid, EUID, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",21)
names(dataIn) <- c("y","n","q","xmu.1","p.xmu","xsum.1","p.xsum",
"z","nz0","qz","m","cumm","zero","link","hyper",
"prior1","prior2","rid","EUID","nEU","hyper2")
dataIn[[1]] <- as.matrix(y)
dataIn[[2]] <- n
dataIn[[3]] <- q
dataIn[[4]] <- as.matrix(xmu.1)
dataIn[[5]] <- p.xmu
dataIn[[6]] <- as.matrix(xsum.1)
dataIn[[7]] <- p.xsum
dataIn[[8]] <- zdummy
dataIn[[9]] <- nz0
dataIn[[10]]<- qz
dataIn[[11]]<- m
dataIn[[12]]<- c(0,cumsum(m[-nz0]))
dataIn[[13]]<- matrix(0,n,q)
dataIn[[14]]<- link
dataIn[[15]]<- abind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD,along=3)
dataIn[[16]] <- prior1
dataIn[[17]] <- prior2
dataIn[[18]] <- rid
dataIn[[19]] <- EUID
dataIn[[20]] <- nEU
if(grepl("unif", prior.Sigma)) dataIn[[21]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[21]] <- scale.halft
if(is.null(seed)){
init <- function( rngname, rngseed){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3,
.RNG.name = rngname,
.RNG.seed = rngseed))}
# 1b, 2d,
# 3 SigmaVC (sigma.VC1 or t),SigmaUN (scale1 or scale2),
# 4 rho1,2,3
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1,]
if(p.xmu>=2) inits.internal[[i]][[3]] <-
array(rep(inits[[i]]$b[2:p.xmu,],4), c((p.xmu-1),q,4))}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1,]
if(p.xsum>=2) inits.internal[[i]][[4]] <-
array(rep(inits[[i]]$b[2:p.xsum,],4), c((p.xsum-1),q,4))}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[11]]<- inits[[i]]$sigma
inits.internal[[i]][[12]]<- inits[[i]]$sigma
inits.internal[[i]][[13]]<- runif(qz,0.25,2)
inits.internal[[i]][[14]]<- runif(qz,0.25,2)
}
# check PD of the initial R matrix
if(!is.null(inits[[i]]$R)) {
notuse <-FALSE
Rele <- inits[[i]]$R
size <- (sqrt(1+8*length(Rele))-1)/2 # (# of random effects)
R <- diag(size)
R[upper.tri(R, diag=TRUE)] <- Rele
R <- R + t(R) - diag(diag(R))
pd <- all(eigen(R)$values>0)
if(!pd) {
notuse <- TRUE
warning('the specified initial correlation matrix is not positive definite')
warning('Internal initial values are used')
break}
else{
if(size==2) {
inits.internal[[i]][[15]] <-inits[[i]]$R[2]}
if(size==3){
inits.internal[[i]][[15]] <-inits[[i]]$R[2];
inits.internal[[i]][[16]] <-inits[[i]]$R[4];
inits.internal[[i]][[17]] <-inits[[i]]$R[5]}
}
lower <- inits.internal[[i]][[15]]*inits.internal[[i]][[16]]-
sqrt((1-inits.internal[[i]][[15]]^2)*(1-inits.internal[[i]][[16]]^2))
upper <- inits.internal[[i]][[15]]*inits.internal[[i]][[16]]+
sqrt((1-inits.internal[[i]][[15]]^2)*(1-inits.internal[[i]][[16]]^2))
if(inits.internal[[i]][[17]]<lower | inits.internal[[i]][[17]]>upper)
inits.internal[[i]][[17]] <- runif(1, lower, upper)
}
}}
op<- system.file("bugs", "joint_2z.bug", package="zoib")
model <- jags.model(op,data=dataIn, n.adapt=0, inits=inits.internal,n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/joint.2z.R |
joint.2z0 <-
function(y, n, q, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0,
inflate0, zdummy, qz,nz0, m, rid, EUID, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",24)
dataIn.name <- c("y","n","q","xmu.1","p.xmu",
"xsum.1","p.xsum","x0.1","p.x0","inflate0",
"z","nz0","qz","m","zero",
"cumm","link","hyper","prior1","prior2",
"rid","EUID","nEU","hyper2")
names(dataIn)<- dataIn.name
dataIn[[1]] <- as.matrix(y)
dataIn[[2]] <- n
dataIn[[3]] <- q
dataIn[[4]] <- as.matrix(xmu.1)
dataIn[[5]] <- p.xmu
dataIn[[6]] <- as.matrix(xsum.1)
dataIn[[7]] <- p.xsum
dataIn[[8]] <- as.matrix(x0.1)
dataIn[[9]] <- p.x0
dataIn[[10]]<- inflate0
dataIn[[11]]<- zdummy
dataIn[[12]]<- nz0
dataIn[[13]]<- qz
dataIn[[14]]<- m
dataIn[[15]]<- matrix(0,n,q)
dataIn[[16]]<- c(0,cumsum(m[-nz0]))
dataIn[[17]]<- link
dataIn[[18]]<- abind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD,along=3)
dataIn[[19]] <- prior1
dataIn[[20]] <- prior2
dataIn[[21]] <- rid
dataIn[[22]] <- EUID
dataIn[[23]] <- nEU
if(grepl("unif", prior.Sigma)) dataIn[[24]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[24]] <- scale.halft
if(is.null(seed)){
init <- function(rngname, rngseed ){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"b0.tmp"= array(rnorm((p.x0-1)*4*q,0,0.1), c((p.x0-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2),(p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab0.L1" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub0.ARD" = matrix(runif((p.x0-1)*q,0 ,2),(p.x0-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub0.L2" = runif(q,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"b0.tmp"= array(rnorm((p.x0-1)*4*q,0,0.1), c((p.x0-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2),(p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab0.L1" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub0.ARD" = matrix(runif((p.x0-1)*q,0 ,2),(p.x0-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub0.L2" = runif(q,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3,
.RNG.name = rngname,
.RNG.seed = rngseed))}
# 1b, 2d, 3b0, 4d1,
# 5 SigmaVC (sigma.VC1 or t),SigmaUN (scale1 or scale2),
# 6 rho1,2,3
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1,]
if(p.xmu>=2) inits.internal[[i]][[4]] <- array(rep(inits[[i]]$b[2:p.xmu,],4), c((p.xmu-1),q,4))}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1,]
if(p.xsum>=2) inits.internal[[i]][[5]] <- array(rep(inits[[i]]$d[2:p.xsum,],4), c((p.xsum-1),q,4))}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1,]
if(p.x0>=2) inits.internal[[i]][[6]] <- array(rep(inits[[i]]$b0[2:p.x0,],4), c((p.x0-1),q,4))}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[16]]<- inits[[i]]$sigma
inits.internal[[i]][[17]]<- inits[[i]]$sigma
inits.internal[[i]][[18]]<- runif(qz,0.25,2)
inits.internal[[i]][[19]]<- runif(qz,0.25,2)
}
# check PD of the initial R matrix
if(!is.null(inits[[i]]$R)) {
notuse <-FALSE
Rele <- inits[[i]]$R
size <- (sqrt(1+8*length(Rele))-1)/2 # (# of random effects)
R <- diag(size)
R[upper.tri(R, diag=TRUE)] <- Rele
R <- R + t(R) - diag(diag(R))
pd <- all(eigen(R)$values>0)
if(!pd) {
notuse <- TRUE
warning('the specified initial correlation matrix is not positive definite')
warning('Internal initial value are used')
break}
else{
if(size==2) inits.internal[[i]][[20]] <-inits[[i]]$R[2]
if(size==3){
inits.internal[[i]][[20]] <-inits[[i]]$R[2];
inits.internal[[i]][[21]] <-inits[[i]]$R[4];
inits.internal[[i]][[22]] <-inits[[i]]$R[5]}
}
lower <- inits.internal[[i]][[20]]*inits.internal[[i]][[21]]-
sqrt((1-inits.internal[[i]][[20]]^2)*(1-inits.internal[[i]][[21]]^2))
upper <- inits.internal[[i]][[20]]*inits.internal[[i]][[21]]+
sqrt((1-inits.internal[[i]][[20]]^2)*(1-inits.internal[[i]][[21]]^2))
if(inits.internal[[i]][[22]]<lower | inits.internal[[i]][[22]]>upper)
inits.internal[[i]][[22]] <- runif(1, lower, upper)
}
}}
op<- system.file("bugs", "joint_2z0.bug", package="zoib")
model <- jags.model(op, data = dataIn, n.adapt=0, inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/joint.2z0.R |
joint.2z01 <-
function(y, n, q, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1, x0.1, p.x0,
inflate0, inflate1, zdummy, qz,nz0, m, rid, EUID, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",27)
names(dataIn) <- c("y","n","q","xmu.1","p.xmu",
"xsum.1","p.xsum","x0.1","p.x0","x1.1",
"p.x1","inflate0","inflate1","z","nz0",
"qz","m","cumm", "zero","link","hyper",
"prior1","prior2", "rid","EUID","nEU","hyper2")
dataIn[[1]] <- y
dataIn[[2]] <- n
dataIn[[3]] <- q
dataIn[[4]] <- as.matrix(xmu.1)
dataIn[[5]] <- p.xmu
dataIn[[6]] <- as.matrix(xsum.1)
dataIn[[7]] <- p.xsum
dataIn[[8]] <- as.matrix(x0.1)
dataIn[[9]] <- p.x0
dataIn[[10]]<- as.matrix(x1.1)
dataIn[[11]]<- p.x1
dataIn[[12]]<- inflate0
dataIn[[13]]<- inflate1
dataIn[[14]]<- zdummy
dataIn[[15]]<- nz0
dataIn[[16]]<- qz
dataIn[[17]]<- m
dataIn[[18]]<- c(0,cumsum(m[-nz0]))
dataIn[[19]]<- matrix(0,n,q)
dataIn[[20]]<- link
dataIn[[21]]<- abind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD,along=3)
dataIn[[22]] <- prior1
dataIn[[23]] <- prior2
dataIn[[24]] <- rid
dataIn[[25]] <- EUID
dataIn[[26]] <- nEU
if(grepl("unif", prior.Sigma)) dataIn[[27]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[27]] <- scale.halft
if(is.null(seed)){
init <- function( rngname, rngseed){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
# to ensure R is Positive definite
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"tmp4" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"b0.tmp" = array(rnorm((p.x0-1)*4*q,0,0.1), c((p.x0-1),q,4)),
"b1.tmp" = array(rnorm((p.x1-1)*4*q,0,0.1), c((p.x1-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab1.L1" = matrix(runif((p.x1-1)*q,0,2),(p.x1-1),q),
"sigmab0.L1" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub1.ARD" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub0.ARD" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub0.L2" = runif(q,0,2),
"taub1.L2" = runif(q,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3)}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
# to ensure R is Positive definite
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"tmp4" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1),c((p.xsum-1),q,4)),
"b0.tmp" = array(rnorm((p.x0-1)*4*q,0,0.1), c((p.x0-1),q,4)),
"b1.tmp" = array(rnorm((p.x1-1)*4*q,0,0.1), c((p.x1-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab1.L1" = matrix(runif((p.x1-1)*q,0,2),(p.x1-1),q),
"sigmab0.L1" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub1.ARD" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub0.ARD" = matrix(runif((p.x0-1)*q,0,2),(p.x0-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub0.L2" = runif(q,0,2),
"taub1.L2" = runif(q,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3,
.RNG.name = rngname,
.RNG.seed = rngseed )}
# 1b, 2d, 3b0, 4d1,
# 5 SigmaVC (sigma.VC1 or t),SigmaUN (scale1 or scale2),
# 6 rho1,2,3
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1,]
if(p.xmu>=2) inits.internal[[i]][[5]] <- array(rep(inits[[i]]$b[2:p.xmu,],4), c((p.xmu-1),q,4))}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1,]
if(p.xsum>=2) inits.internal[[i]][[6]] <- array(rep(inits[[i]]$d[2:p.xsum,],4), c((p.xsum-1),q,4))}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1,]
if(p.x0>=2) inits.internal[[i]][[7]] <- array(rep(inits[[i]]$b0[2:p.x0,],4), c((p.x0-1),q,4))}
if(!is.null(inits[[i]]$b1)) {
inits.internal[[i]][[4]] <- inits[[i]]$b1[1,]
if(p.x1>=2) inits.internal[[i]][[8]] <- array(rep(inits[[i]]$b1[2:p.x1,],4), c((p.x1-1),q,4))}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[21]]<- inits[[i]]$sigma
inits.internal[[i]][[22]]<- inits[[i]]$sigma
inits.internal[[i]][[23]]<- runif(qz,0.25,2)
inits.internal[[i]][[24]]<- runif(qz,0.25,2)
}
# check PD of the initial R matrix
if(!is.null(inits[[i]]$R)) {
notuse <-FALSE
Rele <- inits[[i]]$R
size <- (sqrt(1+8*length(Rele))-1)/2 # (# of random effects)
R <- diag(size)
R[upper.tri(R, diag=TRUE)] <- Rele
R <- R + t(R) - diag(diag(R))
pd <- all(eigen(R)$values>0)
if(!pd) {
notuse <- TRUE
warning('the specified initial correlation matrix is not positive definite')
warning('Internal initial value are used')
break}
else{
if(size==2) inits.internal[[i]][[25]] <-inits[[i]]$R[2]
if(size==3){
inits.internal[[i]][[25]] <-inits[[i]]$R[2];
inits.internal[[i]][[26]] <-inits[[i]]$R[4];
inits.internal[[i]][[27]] <-inits[[i]]$R[5]}
}
}
lower <- inits.internal[[i]][[25]]*inits.internal[[i]][[26]]-
sqrt((1-inits.internal[[i]][[25]]^2)*(1-inits.internal[[i]][[26]]^2))
upper <- inits.internal[[i]][[25]]*inits.internal[[i]][[26]]+
sqrt((1-inits.internal[[i]][[25]]^2)*(1-inits.internal[[i]][[26]]^2))
if(inits.internal[[i]][[27]]<lower | inits.internal[[i]][[27]]>upper)
inits.internal[[i]][[27]] <- runif(1, lower, upper)
}}
op<- system.file("bugs", "joint_2z01.bug", package="zoib")
model <- jags.model(op,data=dataIn, n.adapt=0, inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/joint.2z01.R |
joint.2z1 <-
function(y, n, q, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1,
inflate1, zdummy, qz,nz0, m, rid, EUID, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",24)
dataIn.name <- c("y","n","q","xmu.1","p.xmu",
"xsum.1","p.xsum","x1.1","p.x1","inflate1",
"z","nz0","qz","m","zero",
"cumm","link","hyper","prior1","prior2",
"rid","EUID","nEU","hyper2")
names(dataIn)<- dataIn.name
dataIn[[1]] <- as.matrix(y)
dataIn[[2]] <- n
dataIn[[3]] <- q
dataIn[[4]] <- as.matrix(xmu.1)
dataIn[[5]] <- p.xmu
dataIn[[6]] <- as.matrix(xsum.1)
dataIn[[7]] <- p.xsum
dataIn[[8]] <- as.matrix(x1.1)
dataIn[[9]] <- p.x1
dataIn[[10]]<- inflate1
dataIn[[11]]<- zdummy
dataIn[[12]]<- nz0
dataIn[[13]]<- qz
dataIn[[14]]<- m
dataIn[[15]]<- matrix(0,n,q)
dataIn[[16]]<- c(0,cumsum(m[-nz0]))
dataIn[[17]]<- link
dataIn[[18]]<- abind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD,along=3)
dataIn[[19]] <- prior1
dataIn[[20]] <- prior2
dataIn[[21]] <- rid
dataIn[[22]] <- EUID
dataIn[[23]] <- nEU
if(grepl("unif", prior.Sigma)) dataIn[[24]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[24]] <- scale.halft
if(is.null(seed)){
init <- function( rngname, rngseed){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1), c((p.xsum-1),q,4)),
"b1.tmp"= array(rnorm((p.x1-1)*4*q,0,0.1), c((p.x1-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2),(p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab1.L1" = matrix(runif((p.x1-1)*q,0,2),(p.x1-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub1.ARD" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub1.L2" = runif(q,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(q,0,0.1),
"tmp2" = rnorm(q,0,0.1),
"tmp3" = rnorm(q,0,0.1),
"b.tmp" = array(rnorm((p.xmu-1)*4*q,0,0.1), c((p.xmu-1),q,4)),
"d.tmp" = array(rnorm((p.xsum-1)*4*q,0,0.1), c((p.xsum-1),q,4)),
"b1.tmp"= array(rnorm((p.x1-1)*4*q,0,0.1), c((p.x1-1),q,4)),
"sigmab.L1" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"sigmad.L1" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"sigmab1.L1" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub.ARD" = matrix(runif((p.xmu-1)*q,0,2), (p.xmu-1),q),
"taud.ARD" = matrix(runif((p.xsum-1)*q,0,2),(p.xsum-1),q),
"taub1.ARD" = matrix(runif((p.x1-1)*q,0,2), (p.x1-1),q),
"taub.L2" = runif(q,0,2),
"taud.L2" = runif(q,0,2),
"taub1.L2" = runif(q,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3,
.RNG.name = rngname,
.RNG.seed = rngseed))}
# 1b, 2d, 3b0, 4d1,
# 5 SigmaVC (sigma.VC1 or t),SigmaUN (scale1 or scale2),
# 6 rho1,2,3
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1,]
if(p.xmu>=2) inits.internal[[i]][[4]] <- array(rep(inits[[i]]$b[2:p.xmu,],4), c((p.xmu-1),q,4))}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1,]
if(p.xsum>=2) inits.internal[[i]][[5]] <- array(rep(inits[[i]]$b[2:p.xsum,],4), c((p.xsum-1),q,4))}
if(!is.null(inits[[i]]$b1)) {
inits.internal[[i]][[3]] <- inits[[i]]$b1[1,]
if(p.x1>=2) inits.internal[[i]][[6]] <- array(rep(inits[[i]]$b[2:p.x1,],4), c((p.x1-1),q,4))}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[16]]<- inits[[i]]$sigma
inits.internal[[i]][[17]]<- inits[[i]]$sigma
inits.internal[[i]][[18]]<- runif(qz,0.25,2)
inits.internal[[i]][[19]]<- runif(qz,0.25,2)
}
# check PD of the initial R matrix
if(!is.null(inits[[i]]$R)) {
notuse <-FALSE
Rele <- inits[[i]]$R
size <- (sqrt(1+8*length(Rele))-1)/2 # (# of random effects)
R <- diag(size)
R[upper.tri(R, diag=TRUE)] <- Rele
R <- R + t(R) - diag(diag(R))
pd <- all(eigen(R)$values>0)
if(!pd) {
notuse <- TRUE
warning('the specified initial correlation matrix is not positive definite')
warning('Internal initial value are used')
break}
else{
if(size==2) inits.internal[[i]][[20]] <-inits[[i]]$R[2]
if(size==3){
inits.internal[[i]][[20]] <-inits[[i]]$R[2];
inits.internal[[i]][[21]] <-inits[[i]]$R[4];
inits.internal[[i]][[22]] <-inits[[i]]$R[5]}
}
lower <- inits.internal[[i]][[20]]*inits.internal[[i]][[21]]-
sqrt((1-inits.internal[[i]][[20]]^2)*(1-inits.internal[[i]][[21]]^2))
upper <- inits.internal[[i]][[20]]*inits.internal[[i]][[21]]+
sqrt((1-inits.internal[[i]][[20]]^2)*(1-inits.internal[[i]][[21]]^2))
if(inits.internal[[i]][[22]]<lower | inits.internal[[i]][[22]]>upper)
inits.internal[[i]][[22]] <- runif(1, lower, upper)
}
}}
op<- system.file("bugs", "joint_2z1.bug", package="zoib")
model <- jags.model(op, data=dataIn, n.adapt=0, inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/joint.2z1.R |
paraplot <- function(para1, para2=NULL, para3=NULL, para4=NULL,
tickx=NULL, jitter=NULL, pch = 1:4, col=1:4,
legpos=NULL, legtext, annotate=FALSE)
{
K <- 4
if(is.null(para4)) K <- K-1
if(is.null(para3)) K <- K-1
if(is.null(para2)) K <- K-1
par(mfrow=c(1,1),mar=c(3,2,1,1))
if(K==1){
p <- nrow(para1)
index <- 1:p
l <- min(para1[,2])
u <- max(para1[,3])
plot(para1[,1], index, xlim=c(l-abs(l)*0.1,u), ylim=c(1,p+1), axes=F,
pch=pch[1], col=col[1], xlab="", ylab="")
segments(para1[,2],index, para1[,3], index, col=col[1])
axis(2, at=NULL, labels=FALSE, tick = FALSE)
if(is.null(tickx)) axis(1,round(seq(l,u,length.out=8),2))
else axis(1,tickx)
if(annotate){
if(is.null(rownames(para1))) stop("Assign row names to the parameter object")
else text(rep(l-abs(l)*0.15,p), 1:p, rownames(para1), cex=1.2)
}
legend(legpos[1], legpos[2], legtext, pch=pch[1], col=pch[1], lty=1, cex=1.2)
}
else if(K>=2){
mesg <- "Assign row names to the parameter object. \n Use the same row name if two parameter \n objects share the same parameter."
if(K==2) {
if(any(c(is.null(rownames(para1)),is.null(rownames(para2))))) stop(mesg)
l <- min(c(min(para1),min(para2)))
u <- max(c(max(para1),max(para2)))
para1 <- data.frame(para1);
para1 <- cbind(rownames(para1), para1);
colnames(para1) <- c("id","e1","l1","u1")
para2 <- data.frame(para2);
para2 <- cbind(rownames(para2), para2);
colnames(para2) <- c("id","e2","l2","u2")
para<- merge(para1, para2, by="id", all.x=TRUE,
all.y=TRUE, suffixes = c(" "," "))
}
if(K==3) {
if(any(c(is.null(rownames(para1)),is.null(rownames(para2)),
is.null(rownames(para3))))) stop(mesg)
l <- min(c(min(para1),min(para2),min(para3)))
u <- max(c(max(para1),max(para2),max(para3)))
para1<- data.frame(para1); id<- rownames(para1);
para1<- cbind(id, para1); colnames(para1) <- c("id","e1","l1","u1")
para2<- data.frame(para2); id<- rownames(para2);
para2<- cbind(id, para2); colnames(para2) <- c("id","e2","l2","u2")
para3<- data.frame(para3); id<- rownames(para3);
para3<- cbind(id, para3); colnames(para3) <- c("id","e3","l3","u3")
para<- merge(para1, para2, by = "id", all.x=TRUE,
all.y=TRUE, suffixes = c(" "," "))
para<- merge(para, para3, by = "id", all.x=TRUE,
all.y=TRUE, suffixes = c(" "," "))
}
if(K==4) {
if(any(c(is.null(rownames(para1)),is.null(rownames(para2)),
is.null(rownames(para3)),is.null(rownames(para4))))) stop(mesg)
l <- min(c(min(para1),min(para2),min(para3),min(para4)))
u <- max(c(max(para1),max(para2),max(para3),max(para4)))
para1<- data.frame(para1); id<- rownames(para1);
para1<- cbind(id, para1); colnames(para1) <- c("id","e1","l1","u1")
para2<- data.frame(para2); id<- rownames(para2);
para2<- cbind(id, para2); colnames(para2) <- c("id","e2","l2","u2")
para3<- data.frame(para3); id<- rownames(para3);
para3<- cbind(id, para3); colnames(para3) <- c("id","e3","l3","u3")
para4<- data.frame(para4); id<- rownames(para4);
para4<- cbind(id, para4); colnames(para4) <- c("id","e4","l4","u4")
para<- merge(para1, para2, by = "id", all.x=TRUE,
all.y=TRUE, suffixes = c(" "," "))
para<- merge(para, para3, by = "id", all.x=TRUE,
all.y=TRUE, suffixes = c(" "," "))
para<- merge(para, para4, by = "id", all.x=TRUE,
all.y=TRUE, suffixes = c(" "," "))
}
p<- nrow(para)
index <- 1:p
for(i in 1:p){
for(j in 1:ncol(para)){
if(is.na(para[i,j])) para[i,j]<- -99999
}}
if(is.null(jitter)) jitter <- p*0.01
plot(para[,2], index, xlim=c(l-abs(l)*0.3,u),
ylim=c(1,p+max(1,jitter*K)),
col=col[1], pch=pch[1], axes=F, xlab="", ylab="")
segments(para[,3], index, para[,4], index, col=col[1])
for(i in 2:K){
points(para[,1+(i-1)*3+1], index+jitter*i, col=col[i], pch=pch[i])
segments(para[,1+(i-1)*3+2], index+jitter*i,
para[,1+(i-1)*3+3], index+jitter*i, col=col[i])
}
axis(2, at=NULL, labels=FALSE, tick=FALSE)
if(is.null(tickx)) axis(1,round(seq(l,u,length.out=8),2))
else axis(1,tickx)
if(annotate) text(rep(l-abs(l)*0.15,p), 1:p, para$id, cex=1.2)
legend(legpos[1],legpos[2],legtext,
pch=pch[1:K],col=pch[1:K],lty=rep(1,K), cex=1.2)
}
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/paraplot.R |
pred.zoib<- function(object, xnew, summary=TRUE)
{
model<- object$model
formula <- as.Formula(model)
nterm <- length(formula)
data <- xnew
n<- nrow(xnew)
nchain<- length(object$coeff)
zero.inflation <- !is.null(object$Xb0)
one.inflation <- !is.null(object$Xb1)
xmu <- as.matrix(model.matrix(formula,data=xnew,rhs=1))
xsum <- as.matrix(model.matrix(formula,data=xnew,rhs=2))
if(zero.inflation & one.inflation){
x0 <- as.matrix(model.matrix(formula,data=xnew,rhs=3))
x1 <- as.matrix(model.matrix(formula,data=xnew,rhs=4))
} else if(zero.inflation & !one.inflation){
x0 <- as.matrix(model.matrix(formula,data=xnew,rhs=3))
} else if(!zero.inflation & one.inflation){
x1 <- as.matrix(model.matrix(formula,data=xnew,rhs=4))
}
############## original data design matrix #################
#ypred <- list(newdata, newdata)
ypred <- NULL
xmu.1 <- object$Xb; p.xmu <- ncol(xmu.1)
xsum.1 <- object$Xd; p.xsum<- ncol(xsum.1)
x0.1 <- object$Xb0
x1.1 <- object$Xb1
nsample <- n
for(k in 1:nchain){
b <- t(object$coeff[[k]][,1:p.xmu])
ypred[[k]]<- matrix(NA,n, ncol(b))
if(is.null(x1.1)& is.null(x0.1)){
for(i in 1:n)
ypred[[k]][i,]<- exp(xmu[i,]%*%b)/(1+exp(xmu[i,]%*%b))
}
if(!is.null(x0.1) & is.null(x1.1)){
p.x0 <- ncol(x0.1)
b0 <- t(object$coeff[[k]][,1:p.x0+p.xmu])
for(i in 1:n)
ypred[[k]][i,]<- exp(xmu[i,]%*%b)/(1+exp(xmu[i,]%*%b))/(1+exp(x0[i,]%*%b0))
}
if(!is.null(x1.1)& is.null(x0.1)){
p.x1 <-ncol(x1.1)
b1 <- object$coeff[[k]][,1:p.x1+p.xmu]
for(i in 1:n)
ypred[[k]][i,]<- (exp(xmu[i,]%*%b)/(1+exp(xmu[i,]%*%b))+exp(x1[i,]%*%b1))/(1+exp(x1[i,]%*%b1))
}
if(!is.null(x0.1) & !is.null(x1.1)){
p.x0 <- ncol(x0.1)
b0 <- t(object$coeff[[k]][,1:p.x0+p.xmu])
p.x1 <-ncol(x1.1)
b1 <- t(object$coeff[[k]][,1:p.x1+p.xmu+p.x0])
for(i in 1:n)
ypred[[k]][i,]<- (exp(xmu[i,]%*%b)/(1+exp(xmu[i,]%*%b))+ exp(x1[i,]%*%b1) )/((1+exp(x0[i,]%*%b0))*(1+exp(x1[i,]%*%b1)))
}
}
if(summary){
summ<- matrix(0,n,8)
colnames(summ)<- c("n",'mean','SD','min','max','med','2.5%','97.5%')
for(i in 1:n){
ypredi <- ypred[[1]][i,]
if(nchain>1) for(k in 2:nchain) ypredi<- c(ypredi,ypred[[1]][i,])
summ[i,]<- c(length(ypredi),mean(ypredi),sd(ypredi),min(ypredi),max(ypredi),
median(ypredi),quantile(ypredi,0.025),quantile(ypredi,0.975))
}
}
return(list(pred=ypred, summary=summ))
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/pred.zoib.R |
sep.1z <-
function(y, n, xmu.1, p.xmu, xsum.1, p.xsum,
rid, EUID, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",15)
names(dataIn) <- c("n","y","xmu.1","p.xmu","xsum.1", "p.xsum",
"zero","link","hyper","prior1","prior2","rid",
"EUID","nEU","hyper2")
dataIn[[1]] <- n
dataIn[[2]] <- y
dataIn[[3]] <- as.matrix(xmu.1)
dataIn[[4]] <- p.xmu
dataIn[[5]] <- as.matrix(xsum.1)
dataIn[[6]] <- p.xsum
dataIn[[7]] <- rep(0,n)
dataIn[[8]] <- link
dataIn[[9]] <- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
if(grepl("unif",prior.Sigma)) dataIn[[15]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[15]] <- scale.halft
dataIn[[10]] <- prior1
dataIn[[11]] <- prior2
dataIn[[12]] <- rid
dataIn[[13]] <- EUID
dataIn[[14]] <- nEU
if(is.null(seed)){
init <- function( rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1),
.RNG.name = rngname,
.RNG.seed = rngseed)}
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[3]] <-
matrix(rep(inits[[i]]$b[2:p.xmu],4),ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[4]] <-
matrix(rep(inits[[i]]$d[2:p.xsum],4), ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[11]]<- inits[[i]]$sigma
inits.internal[[i]][[12]]<- inits[[i]]$sigma}
}}
op<- system.file("bugs", "sep_1z.bug",package="zoib")
model <- jags.model(op, data=dataIn,n.adapt=0,inits=inits.internal,n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/sep.1z.R |
sep.1z0 <-
function(y, n, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0,
rid, EUID, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",17)
names(dataIn) <- c("n","y","xmu.1","p.xmu","xsum.1","p.xsum","x0.1","p.x0",
"zero","link","hyper","prior1","prior2","rid","EUID",
"nEU","hyper2")
dataIn[[1]] <- n
dataIn[[2]] <- y
dataIn[[3]] <- as.matrix(xmu.1)
dataIn[[4]] <- p.xmu
dataIn[[5]] <- as.matrix(xsum.1)
dataIn[[6]] <- p.xsum
dataIn[[7]] <- as.matrix(x0.1)
dataIn[[8]] <- p.x0
dataIn[[9]]<- rep(0,n)
dataIn[[10]]<- link
dataIn[[11]]<- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
if(grepl("unif",prior.Sigma)) dataIn[[17]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[17]] <- scale.halft
dataIn[[12]] <- prior1
dataIn[[13]] <- prior2
dataIn[[14]] <- rid
dataIn[[15]] <- EUID
dataIn[[16]] <- nEU
if(is.null(seed)){
init <- function( rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function( rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1),
.RNG.name = rngname,
.RNG.seed = rngseed)}
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[4]] <- matrix(rep(inits[[i]]$b[2:p.xmu],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[5]] <- matrix(rep(inits[[i]]$d[2:p.xsum],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1]
if(p.x0>=2) inits.internal[[i]][[6]] <- matrix(rep(inits[[i]]$b0[2:p.x0],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[16]]<- inits[[i]]$sigma
inits.internal[[i]][[17]]<- inits[[i]]$sigma
}
}}
op<- system.file("bugs", "sep_1z0.bug",package="zoib")
model <- jags.model(op,data=dataIn,n.adapt=0,inits=inits.internal,n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/sep.1z0.R |
sep.1z01 <-
function(y, n, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1, x0.1, p.x0,
rid, EUID, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",19)
names(dataIn) <- c("n","y","xmu.1","p.xmu","xsum.1","p.xsum","x0.1","p.x0",
"x1.1","p.x1","zero","link","hyper","prior1","prior2","rid",
"EUID", "nEU","hyper2")
dataIn[[1]] <- n
dataIn[[2]] <- y
dataIn[[3]] <- as.matrix(xmu.1)
dataIn[[4]] <- p.xmu
dataIn[[5]] <- as.matrix(xsum.1)
dataIn[[6]] <- p.xsum
dataIn[[7]] <- as.matrix(x0.1)
dataIn[[8]] <- p.x0
dataIn[[9]] <- as.matrix(x1.1)
dataIn[[10]]<- p.x1
dataIn[[11]]<- rep(0,n)
dataIn[[12]]<- link
dataIn[[13]]<- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
if(grepl("unif",prior.Sigma)) dataIn[[19]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[19]] <- scale.halft
dataIn[[14]] <- prior1
dataIn[[15]] <- prior2
dataIn[[16]] <- rid
dataIn[[17]] <- EUID
dataIn[[18]] <- nEU
if(is.null(seed)){
init <- function(rngname, rngseed ){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"tmp4" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"tmp4" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1),
.RNG.name = rngname,
.RNG.seed = rngseed)}
# 1b, 2d, 3b0, 4d1,
# 5 sigma1 scale2
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[5]] <- matrix(rep(inits[[i]]$b[2:p.xmu],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[6]] <- matrix(rep(inits[[i]]$d[2:p.xsum],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1]
if(p.x0>=2) inits.internal[[i]][[7]] <- matrix(rep(inits[[i]]$b0[2:p.x0],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b1)) {
inits.internal[[i]][[4]] <- inits[[i]]$b1[1]
if(p.x1>=2) inits.internal[[i]][[8]] <- matrix(rep(inits[[i]]$b1[2:p.x1],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[21]]<- inits[[i]]$sigma
inits.internal[[i]][[22]]<- inits[[i]]$sigma}
}}
op<- system.file("bugs", "sep_1z01.bug",package="zoib")
model <- jags.model(op,data=dataIn,n.adapt=0, inits=inits.internal,n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/sep.1z01.R |
sep.1z1 <-
function(y, n, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1,
rid, EUID, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",17)
names(dataIn) <- c("n","y","xmu.1","p.xmu","xsum.1", "p.xsum",
"x1.1","p.x1", "zero","link","hyper","prior1",
"prior2","rid","EUID", "nEU","hyper2")
dataIn[[1]] <- n
dataIn[[2]] <- y
dataIn[[3]] <- as.matrix(xmu.1)
dataIn[[4]] <- p.xmu
dataIn[[5]] <- as.matrix(xsum.1)
dataIn[[6]] <- p.xsum
dataIn[[7]] <- as.matrix(x1.1)
dataIn[[8]] <- p.x1
dataIn[[9]]<- rep(0,n)
dataIn[[10]]<- link
dataIn[[11]] <- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
if(grepl("unif",prior.Sigma)) dataIn[[17]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[17]] <- scale.halft
dataIn[[12]] <- prior1
dataIn[[13]] <- prior2
dataIn[[14]] <- rid
dataIn[[15]] <- EUID
dataIn[[16]] <- nEU
if(is.null(seed)){
init <- function(rngname, rngseed ){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function( rngname, rngseed){
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
"sigma1" = runif(1,0.25,1),
"scale2" = runif(1,0.25,1),
.RNG.name = rngname,
.RNG.seed = rngseed)}
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[4]] <- matrix(rep(inits[[i]]$b[2:p.xmu],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[5]] <- matrix(rep(inits[[i]]$d[2:p.xsum],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b1)) {
inits.internal[[i]][[3]] <- inits[[i]]$b1[1]
if(p.x1>=2) inits.internal[[i]][[6]] <- matrix(rep(inits[[i]]$b1[2:p.x1],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[16]]<- inits[[i]]$sigma
inits.internal[[i]][[17]]<- inits[[i]]$sigma
}
}}
op<- system.file("bugs", "sep_1z1.bug",package="zoib")
model <- jags.model(op,data=dataIn,n.adapt=0,inits=inits.internal,n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/sep.1z1.R |
sep.2z <-
function(y, n, xmu.1, p.xmu, xsum.1, p.xsum,
zdummy, qz,nz0, m, rid, EUID, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",20)
names(dataIn) <- c("n","y","xmu.1","p.xmu","xsum.1","p.xsum","z",
"nz0","qz","m","cumm", "zero","link","prior1",
"prior2","hyper","rid","EUID","nEU","hyper2")
dataIn[[1]] <- n
dataIn[[2]] <- y
dataIn[[3]] <- as.matrix(xmu.1)
dataIn[[4]] <- p.xmu
dataIn[[5]] <- as.matrix(xsum.1)
dataIn[[6]] <- p.xsum
dataIn[[7]] <- zdummy
dataIn[[8]] <- nz0
dataIn[[9]] <- qz
dataIn[[10]]<- m
dataIn[[11]]<- c(0,cumsum(m[-nz0]))
dataIn[[12]]<- rep(0,n)
dataIn[[13]]<- link
dataIn[[14]] <- prior1
dataIn[[15]] <- prior2
dataIn[[16]] <- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
dataIn[[17]] <- rid
dataIn[[18]] <- EUID
dataIn[[19]] <- nEU
if(grepl("unif",prior.Sigma)) dataIn[[20]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[20]] <- scale.halft
if(is.null(seed)){
init <- function(rngname, rngseed ){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3,
.RNG.name = rngname,
.RNG.seed = rngseed))}
# 1b, 2d,
# 3 SigmaVC (sigma.VC1 or t),SigmaUN (scale1 or scale2),
# 4 rho1,2,3
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[3]] <- matrix(rep(inits[[i]]$b[2:p.xmu,1],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[4]] <- matrix(rep(inits[[i]]$d[2:p.xsum,1],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[11]]<- inits[[i]]$sigma
inits.internal[[i]][[12]]<- inits[[i]]$sigma
inits.internal[[i]][[13]]<- runif(qz,0.25,2)
inits.internal[[i]][[14]]<- runif(qz,0.25,2)
}
# check PD of the initial R matrix
if(!is.null(inits[[i]]$R)) {
notuse <-FALSE
Rele <- inits[[i]]$R
size <- (sqrt(1+8*length(Rele))-1)/2 # (# of random effects)
R <- diag(size)
R[upper.tri(R, diag=TRUE)] <- Rele
R <- R + t(R) - diag(diag(R))
pd <- all(eigen(R)$values>0)
if(!pd) {
notuse <- TRUE
warning('the specified initial correlation matrix is not positive definite')
warning('Internal initial value are used')
break}
else{
if(size==2) inits.internal[[i]][[15]] <-inits[[i]]$R[2]
if(size==3){
inits.internal[[i]][[15]] <-inits[[i]]$R[2];
inits.internal[[i]][[16]] <-inits[[i]]$R[4];
inits.internal[[i]][[17]] <-inits[[i]]$R[5]}
}
lower <- inits.internal[[i]][[15]]*inits.internal[[i]][[16]]-
sqrt((1-inits.internal[[i]][[15]]^2)*(1-inits.internal[[i]][[16]]^2))
upper <- inits.internal[[i]][[15]]*inits.internal[[i]][[16]]+
sqrt((1-inits.internal[[i]][[15]]^2)*(1-inits.internal[[i]][[16]]^2))
if(inits.internal[[i]][[17]]<lower | inits.internal[[i]][[17]]>upper)
inits.internal[[i]][[17]] <- runif(1, lower, upper)
}
}}
op<- system.file("bugs", "sep_2z.bug", package="zoib")
model <- jags.model(op, data=dataIn,n.adapt=0, inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/sep.2z.R |
sep.2z0 <-
function(y, n, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0,
zdummy, qz,nz0, m, rid, EUID, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",22)
names(dataIn) <- c("n","y","xmu.1","p.xmu","xsum.1",
"p.xsum","x0.1", "p.x0","z","nz0",
"qz", "m","cumm", "zero","link",
"hyper","prior1","prior2","rid","EUID","nEU","hyper2")
dataIn[[1]] <- n
dataIn[[2]] <- y
dataIn[[3]] <- as.matrix(xmu.1)
dataIn[[4]] <- p.xmu
dataIn[[5]] <- as.matrix(xsum.1)
dataIn[[6]] <- p.xsum
dataIn[[7]] <- as.matrix(x0.1)
dataIn[[8]] <- p.x0
dataIn[[9]] <- zdummy
dataIn[[10]]<- nz0
dataIn[[11]]<- qz
dataIn[[12]]<- m
dataIn[[13]]<- c(0,cumsum(m[-nz0]))
dataIn[[14]]<- rep(0,n)
dataIn[[15]]<- link
dataIn[[16]] <- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
dataIn[[17]] <- prior1
dataIn[[18]] <- prior2
dataIn[[19]] <- rid
dataIn[[20]] <- EUID
dataIn[[21]] <- nEU
if(grepl("unif",prior.Sigma)) dataIn[[22]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[22]] <- scale.halft
if(is.null(seed)){
init <- function( rngname, rngseed){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1), ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1), ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3,
.RNG.name = rngname,
.RNG.seed = rngseed))}
# 1b, 2d, 3b0, 4d1,
# 5 SigmaVC (sigma.VC1 or t),SigmaUN (scale1 or scale2),
# 6 rho1,2,3
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[4]] <- matrix(rep(inits[[i]]$b[2:p.xmu,1],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[5]] <- matrix(rep(inits[[i]]$d[2:p.xsum,1],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1]
if(p.x0>=2) inits.internal[[i]][[6]] <- matrix(rep(inits[[i]]$b0[2:p.x0,1],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[16]]<- inits[[i]]$sigma
inits.internal[[i]][[17]]<- inits[[i]]$sigma
inits.internal[[i]][[18]]<- runif(qz,0.25,2)
inits.internal[[i]][[19]]<- runif(qz,0.25,2)
}
# check PD of the initial R matrix
if(!is.null(inits[[i]]$R)) {
notuse <-FALSE
Rele <- inits[[i]]$R
size <- (sqrt(1+8*length(Rele))-1)/2 # (# of random effects)
R <- diag(size)
R[upper.tri(R, diag=TRUE)] <- Rele
R <- R + t(R) - diag(diag(R))
pd <- all(eigen(R)$values>0)
if(!pd) {
notuse <- TRUE
warning('the specified initial correlation matrix is not positive definite')
warning('Internal initial value are used')
break}
else{
if(size==2) inits.internal[[i]][[20]] <-inits[[i]]$R[2]
if(size==3){
inits.internal[[i]][[20]] <-inits[[i]]$R[2];
inits.internal[[i]][[21]] <-inits[[i]]$R[4];
inits.internal[[i]][[22]] <-inits[[i]]$R[5]}
}
lower <- inits.internal[[i]][[20]]*inits.internal[[i]][[21]]-
sqrt((1-inits.internal[[i]][[20]]^2)*(1-inits.internal[[i]][[21]]^2))
upper <- inits.internal[[i]][[20]]*inits.internal[[i]][[21]]+
sqrt((1-inits.internal[[i]][[20]]^2)*(1-inits.internal[[i]][[21]]^2))
if(inits.internal[[i]][[22]]<lower | inits.internal[[i]][[22]]>upper)
inits.internal[[i]][[22]] <- runif(1, lower, upper)
}
}}
op<- system.file("bugs", "sep_2z0.bug",package="zoib")
model<- jags.model(op,data=dataIn,n.adapt=0,inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/sep.2z0.R |
sep.2z01 <-
function(y, n, xmu.1,p.xmu, xsum.1,p.xsum, x0.1,p.x0, x1.1,p.x1,
zdummy, qz,nz0, m, rid, EUID, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",24)
names(dataIn) <- c("n","y","xmu.1","p.xmu","xsum.1","p.xsum","x0.1","p.x0",
"x1.1", "p.x1","z","nz0","qz","m","cumm","zero","link",
"hyper","prior1","prior2","rid","EUID","nEU","hyper2")
dataIn[[1]] <- n
dataIn[[2]] <- y
dataIn[[3]] <- as.matrix(xmu.1)
dataIn[[4]] <- p.xmu
dataIn[[5]] <- as.matrix(xsum.1)
dataIn[[6]] <- p.xsum
dataIn[[7]] <- as.matrix(x0.1)
dataIn[[8]] <- p.x0
dataIn[[9]] <- as.matrix(x1.1)
dataIn[[10]]<- p.x1
dataIn[[11]]<- zdummy
dataIn[[12]]<- nz0
dataIn[[13]]<- qz
dataIn[[14]]<- m
dataIn[[15]]<- c(0,cumsum(m[-nz0]))
dataIn[[16]]<- rep(0,n)
dataIn[[17]]<- link
dataIn[[18]] <- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
dataIn[[19]] <- prior1
dataIn[[20]] <- prior2
dataIn[[21]] <- rid
dataIn[[22]] <- EUID
dataIn[[23]] <- nEU
if(grepl("unif",prior.Sigma)) dataIn[[24]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[24]] <- scale.halft
if(is.null(seed)){
init <- function( rngname, rngseed){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
# to ensure R is Positive definite
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"tmp4" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
# to ensure R is Positive definite
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"tmp4" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b0.tmp" = matrix(rnorm((p.x0-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1),ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab0.L1" = runif((p.x0-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub0.ARD" = runif((p.x0-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub0.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3,
.RNG.name = rngname,
.RNG.seed = rngseed))}
# 1b, 2d, 3b0, 4d1,
# 5 SigmaVC (sigma.VC1 or t),SigmaUN (scale1 or scale2),
# 6 rho1,2,3
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[5]] <- matrix(rep(inits[[i]]$b[2:p.xmu],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[6]] <- matrix(rep(inits[[i]]$d[2:p.xsum],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b0)) {
inits.internal[[i]][[3]] <- inits[[i]]$b0[1]
if(p.x0>=2) inits.internal[[i]][[7]] <- matrix(rep(inits[[i]]$b0[2:p.x0],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b1)) {
inits.internal[[i]][[4]] <- inits[[i]]$b1[1]
if(p.x1>=2) inits.internal[[i]][[8]] <- matrix(rep(inits[[i]]$b1[2:p.x1],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[21]]<- inits[[i]]$sigma
inits.internal[[i]][[22]]<- inits[[i]]$sigma
inits.internal[[i]][[23]]<- runif(qz,0.25,2)
inits.internal[[i]][[24]]<- runif(qz,0.25,2)
}
# check PD of the initial R matrix
if(!is.null(inits[[i]]$R)) {
notuse <-FALSE
Rele <- inits[[i]]$R
size <- (sqrt(1+8*length(Rele))-1)/2 # (# of random effects)
R <- diag(size)
R[upper.tri(R, diag=TRUE)] <- Rele
R <- R + t(R) - diag(diag(R))
pd <- all(eigen(R)$values>0)
if(!pd) {
notuse <- TRUE
warning('the specified initial correlation matrix is not positive definite')
warning('Internal initial value are used')
break}
else{
if(size==2) inits.internal[[i]][[25]] <-inits[[i]]$R[2]
if(size==3){
inits.internal[[i]][[25]] <-inits[[i]]$R[2];
inits.internal[[i]][[26]] <-inits[[i]]$R[4];
inits.internal[[i]][[27]] <-inits[[i]]$R[5]}
}
lower <- inits.internal[[i]][[25]]*inits.internal[[i]][[26]]-
sqrt((1-inits.internal[[i]][[25]]^2)*(1-inits.internal[[i]][[26]]^2))
upper <- inits.internal[[i]][[25]]*inits.internal[[i]][[26]]+
sqrt((1-inits.internal[[i]][[25]]^2)*(1-inits.internal[[i]][[26]]^2))
if(inits.internal[[i]][[27]]<lower | inits.internal[[i]][[27]]>upper)
inits.internal[[i]][[27]] <- runif(1, lower, upper)
}
}}
op<- system.file("bugs", "sep_2z01.bug",package="zoib")
model <- jags.model(op,data=dataIn,n.adapt=0, inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/sep.2z01.R |
sep.2z1 <-
function(y, n, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1,
zdummy, qz,nz0, m, rid, EUID, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2,lambda.ARD,
scale.unif, scale.halft, link, n.chain, inits, seed)
{
dataIn <- vector("list",22)
names(dataIn) <- c("n","y","xmu.1","p.xmu","xsum.1","p.xsum","x1.1",
"p.x1","z","nz0","qz","m","cumm","zero","link",
"hyper","prior1","prior2","rid","EUID","nEU","hyper2")
dataIn[[1]] <- n
dataIn[[2]] <- y
dataIn[[3]] <- as.matrix(xmu.1)
dataIn[[4]] <- p.xmu
dataIn[[5]] <- as.matrix(xsum.1)
dataIn[[6]] <- p.xsum
dataIn[[7]] <- as.matrix(x1.1)
dataIn[[8]] <- p.x1
dataIn[[9]] <- zdummy
dataIn[[10]]<- nz0
dataIn[[11]]<- qz
dataIn[[12]]<- m
dataIn[[13]]<- c(0,cumsum(m[-nz0]))
dataIn[[14]]<- rep(0,n)
dataIn[[15]]<- link
dataIn[[16]]<- as.matrix(cbind(prec.int,prec.DN,lambda.L1,lambda.L2,lambda.ARD))
dataIn[[17]] <- prior1
dataIn[[18]] <- prior2
dataIn[[19]] <- rid
dataIn[[20]] <- EUID
dataIn[[21]] <- nEU
if(grepl("unif",prior.Sigma)) dataIn[[22]] <- scale.unif
if(grepl("halfcauchy",prior.Sigma)) dataIn[[22]] <- scale.halft
if(is.null(seed)){
init <- function( rngname, rngseed){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1), ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3))}
inits.internal <- list(init( ));
if(n.chain >= 2) {
for(j in 2:n.chain) inits.internal <- c(inits.internal,list(init()))}
} else{
init <- function(rngname, rngseed ){
rho1 <- runif(1,-0.5,0.5)
rho2 <- runif(1,-0.5,0.5)
rho3 <- runif(1, rho1*rho2 - sqrt((1-rho1^2)*(1-rho2^2)),
rho1*rho2 + sqrt((1-rho1^2)*(1-rho2^2)))
return(
list("tmp1" = rnorm(1,0,0.1),
"tmp2" = rnorm(1,0,0.1),
"tmp3" = rnorm(1,0,0.1),
"b.tmp" = matrix(rnorm((p.xmu-1)*4,0,0.1),ncol=4),
"d.tmp" = matrix(rnorm((p.xsum-1)*4,0,0.1),ncol=4),
"b1.tmp" = matrix(rnorm((p.x1-1)*4,0,0.1), ncol=4),
"sigmab.L1" = runif((p.xmu-1),0,2),
"sigmad.L1" = runif((p.xsum-1),0,2),
"sigmab1.L1" = runif((p.x1-1),0,2),
"taub.ARD" = runif((p.xmu-1),0,2),
"taud.ARD" = runif((p.xsum-1),0,2),
"taub1.ARD" = runif((p.x1-1),0,2),
"taub.L2" = runif(1,0,2),
"taud.L2" = runif(1,0,2),
"taub1.L2" = runif(1,0,2),
"sigma.VC1" = runif(nz0,0.25,2),
"t" = runif(nz0,0.25,1),
"scale1" = runif(qz,0.25,2),
"scale2" = runif(qz,0.25,2),
"rho1" = rho1,
"rho2" = rho2,
"rho3" = rho3,
.RNG.name = rngname,
.RNG.seed = rngseed))}
# 1b, 2d, 3b0, 4d1,
# 5 SigmaVC (sigma.VC1 or t),SigmaUN (scale1 or scale2),
# 6 rho1,2,3
set.seed(seed[1]); inits.internal <- list(init("base::Super-Duper", seed[1]));
if(n.chain >= 2) {
for(j in 2:n.chain){
set.seed(seed[j]);
inits.internal <- c(inits.internal,list(init("base::Wichmann-Hill",seed[j])))}}
}
if(!is.null(inits)){
for(i in 1:n.chain){
if(!is.null(inits[[i]]$b)) {
inits.internal[[i]][[1]] <- inits[[i]]$b[1]
if(p.xmu>=2) inits.internal[[i]][[4]] <- matrix(rep(inits[[i]]$b[2:p.xmu],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$d)) {
inits.internal[[i]][[2]] <- inits[[i]]$d[1]
if(p.xsum>=2) inits.internal[[i]][[5]] <- matrix(rep(inits[[i]]$d[2:p.xsum],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$b1)) {
inits.internal[[i]][[3]] <- inits[[i]]$b1[1]
if(p.x1>=2) inits.internal[[i]][[6]] <- matrix(rep(inits[[i]]$b1[2:p.x1],4),
ncol=4, byrow=FALSE)}
if(!is.null(inits[[i]]$sigma)) {
inits.internal[[i]][[16]]<- inits[[i]]$sigma
inits.internal[[i]][[17]]<- inits[[i]]$sigma
inits.internal[[i]][[18]]<- runif(qz,0.25,2)
inits.internal[[i]][[19]]<- runif(qz,0.25,2)
}
# check PD of the initial R matrix
if(!is.null(inits[[i]]$R)) {
notuse <-FALSE
Rele <- inits[[i]]$R
size <- (sqrt(1+8*length(Rele))-1)/2 # (# of random effects)
R <- diag(size)
R[upper.tri(R, diag=TRUE)] <- Rele
R <- R + t(R) - diag(diag(R))
pd <- all(eigen(R)$values>0)
if(!pd) {
notuse <- TRUE
warning('the specified initial correlation matrix is not positive definite')
warning('Internal initial value are used')
break}
else{
if(size==2) inits.internal[[i]][[20]] <-inits[[i]]$R[2]
if(size==3){
inits.internal[[i]][[20]] <-inits[[i]]$R[2];
inits.internal[[i]][[21]] <-inits[[i]]$R[4];
inits.internal[[i]][[22]] <-inits[[i]]$R[5]}
}
lower <- inits.internal[[i]][[20]]*inits.internal[[i]][[21]]-
sqrt((1-inits.internal[[i]][[20]]^2)*(1-inits.internal[[i]][[21]]^2))
upper <- inits.internal[[i]][[20]]*inits.internal[[i]][[21]]+
sqrt((1-inits.internal[[i]][[20]]^2)*(1-inits.internal[[i]][[21]]^2))
if(inits.internal[[i]][[22]]<lower | inits.internal[[i]][[22]]>upper)
inits.internal[[i]][[22]] <- runif(1, lower, upper)
}
}}
op<- system.file("bugs", "sep_2z1.bug",package="zoib")
model<- jags.model(op,data=dataIn,n.adapt=0, inits=inits.internal, n.chains=n.chain)
return(model)
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/sep.2z1.R |
zoib <-
function(
model,
data,
n.response = 1,
joint = TRUE,
zero.inflation = TRUE,
one.inflation = TRUE,
random = 0,
EUID,
link.mu="logit",
link.x0="logit",
link.x1="logit",
prec.int = matrix(1e-3, n.response, 4),
prior.beta = matrix("DN", n.response, 4),
prec.DN = matrix(1e-3, n.response, 4),
lambda.L2 = matrix(1e-3, n.response, 4),
lambda.L1 = matrix(1e-3, n.response, 4),
lambda.ARD = matrix(1e-3, n.response, 4),
prior.Sigma = "VC.unif",
scale.unif = 20,
scale.halfcauchy = 20,
n.chain = 2,
n.iter = 5000,
n.burn = 200,
n.thin = 2,
inits = NULL,
seeds = NULL
)
{
if(!is.matrix(prec.int))
stop("prec.int should be in the format of a matrix, even it has only one row")
if(!is.matrix(prior.beta))
stop("prior.beta should be in the format of a matrix, even it has only one row")
if(!is.matrix(prec.DN))
stop("prec.DN should be in the format of a matrix, even it has only one row")
if(!is.matrix(lambda.L2))
stop("lambda.L2 should be in the format of a matrix, even it has only one row")
if(!is.matrix(lambda.L1))
stop("lambda.L1 should be in the format of a matrix, even it has only one row")
if(!is.matrix(lambda.ARD))
stop("lambda.ARD should be in the format of a matrix, even it has only one row")
# the model equation for output, ow, the jags model would be by default
model.format <- model
cl <- match.call()
if(missing(data)) data <- environment(model)
mf <- match.call(expand.dots = FALSE)
m <- match(c("model", "data"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
formula <- as.Formula(model)
nterm <- length(formula)
mf$formula <- formula
mod <- model.frame(formula,data=data)
y <- NULL
for(i in 1:nterm[1L]){
y <- cbind(y, as.matrix(model.part(formula,data=mod,lhs=i)) )
}
x1 <- NULL; x0 <- NULL;
if(nterm[2L]==5L){
xmu <- as.matrix(model.matrix(formula,data=mod,rhs=1))
xsum <- as.matrix(model.matrix(formula,data=mod,rhs=2))
x0 <- as.matrix(model.matrix(formula,data=mod,rhs=3))
x1 <- as.matrix(model.matrix(formula,data=mod,rhs=4))
z <- as.matrix(model.matrix(formula,data=mod,rhs=5))
zname <- c("int",colnames(z))
Fc <- as.character(formula)
chai <- strsplit(Fc[3], " ")
bar.pos <- which(chai[[1]]=="|")
rand.part <- chai[[1]][(bar.pos[4]+1):length(chai[[1]])]
zname <- rand.part[which(rand.part!="+")]
if(all(zname=='1')) zname='int'
else zname <- c("int",zname)
}
else if(nterm[2L]==4L){
xmu <- as.matrix(model.matrix(formula,data=mod,rhs=1))
xsum <- as.matrix(model.matrix(formula,data=mod,rhs=2))
if(random==0){
x0 <- as.matrix(model.matrix(formula,data=mod,rhs=3))
x1 <- as.matrix(model.matrix(formula,data=mod,rhs=4))
}
else{
if(any(one.inflation) & all(!zero.inflation))
x1 <- as.matrix(model.matrix(formula,data=mod,rhs=3))
else if(any(zero.inflation) & !all(one.inflation))
x0 <- as.matrix(model.matrix(formula,data=mod,rhs=3))
z <- as.matrix(model.matrix(formula,data=mod,rhs=4))
zname <- c("int",colnames(z))
Fc <- as.character(formula)
chai <- strsplit(Fc[3], " ")
bar.pos <- which(chai[[1]]=="|")
rand.part <- chai[[1]][(bar.pos[3]+1):length(chai[[1]])]
zname <- rand.part[which(rand.part!="+")]
if(all(zname=='1')) zname='int'
else zname <- c("int",zname)
}
}
else if(nterm[2L]==3L){
if(random!=0){
xmu <- as.matrix(model.matrix(formula,data=mod,rhs=1))
xsum <- as.matrix(model.matrix(formula,data=mod,rhs=2))
Fc <- as.character(formula)
chai <- strsplit(Fc[3], " ")
bar.pos <- which(chai[[1]]=="|")
rand.part <- chai[[1]][(bar.pos[2]+1):length(chai[[1]])]
zname <- rand.part[which(rand.part!="+")]
if(all(zname=='1')) zname='int'
else zname <- c("int",zname)
}
else{
if(any(one.inflation) & all(!zero.inflation)){
xmu <- as.matrix(model.matrix(formula,data=mod,rhs=1))
xsum <- as.matrix(model.matrix(formula,data=mod,rhs=2))
x1 <- as.matrix(model.matrix(formula,data=mod,rhs=3))
}
else if(any(zero.inflation) & all(!one.inflation)){
xmu <- as.matrix(model.matrix(formula,data=mod,rhs=1))
xsum <- as.matrix(model.matrix(formula,data=mod,rhs=2))
x0 <- as.matrix(model.matrix(formula,data=mod,rhs=3))
}
}
}
else if(nterm[2L]==2L){
xmu <- as.matrix(model.matrix(formula,data=mod,rhs=1))
xsum <- as.matrix(model.matrix(formula,data=mod,rhs=2))
}
else if(nterm[2L]<2L){
# xmu <- as.matrix(model.matrix(formula,data=mod,rhs=1))
# xsum <- as.matrix(rep(1,nrow(xsum)))
warning("The right side of the model should have at least two parts;")
warning("or two link functions, one for the mean of beta distribution")
warning("alpha/(alpha+beta) and one for the sum (alpha+beta)")
stop("please re-specify the model")
# warning("The model with an intercept is for modelling the sum of the")
# warning("two shape parameters of the beta distribution")
}
if(is.null(ncol(y))) y<-as.matrix(y,ncol=1)
n <- nrow(y)
q <- ncol(y)
# link choice
link <- matrix(0,3,3)
if(link.mu=="logit") link[1,1]<- 1
else if(link.mu=="cloglog") link[1,2]<- 1
else if(link.mu=="probit") link[1,3]<- 1
if(link.x0=="logit") link[2,1]<- 1
else if(link.x0=="cloglog") link[2,2]<- 1
else if(link.x0=="probit") link[2,3]<- 1
if(link.x1=="logit") link[3,1]<- 1
else if(link.x1=="cloglog") link[3,2]<- 1
else if(link.x1=="probit") link[3,3]<- 1
# prior.beta choice; 4 link functions (rows), each with 4 choices (columns).
prior1 <- array(0,c(4,4,q))
for(j in 1:q){
for(i in 1:4){
if(prior.beta[j,i]=="DN" | prior.beta[j,i]=="D") prior1[i,1,j]<-1
else if(prior.beta[j,i]=="L1") prior1[i,2,j]<-1
else if(prior.beta[j,i]=="L2") prior1[i,3,j]<-1
else if(prior.beta[j,i]=="ARD"|prior.beta[j,i]=="A") prior1[i,4,j]<-1
}
}
# prior.Sigma choice
prior2 <- matrix(0,2,2)
if(prior.Sigma==c("VC.unif")) prior2[1,1]<-1
else if(prior.Sigma==c("VC.halfcauchy")) prior2[1,2]<-1
else if(prior.Sigma==c("UN.unif")) prior2[2,1]<-1
else if(prior.Sigma==c("UN.halfcauchy")) prior2[2,2]<-1
# random effect choice
rid=rep(0,4)
if(any(random==c(1,12,13,14,123,124,134,1234))) rid[1]<-1
if(any(random==c(2,12,23,24,123,124,234,1234))) rid[2]<-1
if(any(random==c(3,13,23,34,123,134,234,1234))) rid[3]<-1
if(any(random==c(4,14,24,34,124,134,234,1234))) rid[4]<-1
# x-mu
xmu.1 <- xmu; p.xmu <- ncol(xmu.1)
if(p.xmu >1){
mean.mu <- apply(as.data.frame(xmu.1[,2:p.xmu]),2,mean)
sd.mu <- apply(as.data.frame(xmu.1[,2:p.xmu]),2,sd)
for(i in 2:p.xmu) xmu.1[,i] <- (xmu.1[,i]-mean.mu[i-1])/sd.mu[i-1]
}
# x-sum
xsum.1 <- xsum; p.xsum <- ncol(xsum)
if(p.xsum>1) {
mean.sum <- apply(as.data.frame(xsum.1[,2:p.xsum]),2,mean)
sd.sum <- apply(as.data.frame(xsum.1[,2:p.xsum]),2,sd)
for(i in 2:p.xsum) xsum.1[,i] <- (xsum.1[,i]-mean.sum[i-1])/sd.sum[i-1]
}
# x0
if(!is.null(x0)){
x0.1 <- x0; p.x0 <- ncol(x0.1)
if(p.x0>1) {
mean0<- apply(as.data.frame(x0.1[,2:p.x0]),2,mean)
sd0 <- apply(as.data.frame(x0.1[,2:p.x0]),2,sd)
for(i in 2:p.x0) x0.1[,i] <- (x0.1[,i]-mean0[i-1])/sd0[i-1]
}
}
# x1
if(!is.null(x1)){
x1.1 <- x1; p.x1 <-ncol(x1.1)
if(p.x1>1) {
mean1<- apply(as.data.frame(x1.1[,2:p.x1]),2,mean)
sd1 <- apply(as.data.frame(x1.1[,2:p.x1]),2,sd)
for(i in 2:p.x1) x1.1[,i] <- (x1.1[,i]-mean1[i-1])/sd1[i-1]
}
}
print("***************************************************************************")
print("* List of parameter for which the posterior samples are generated *")
print("* b: regression coeff in the linear predictor for the mean of beta dist'n *")
print("* d: regression coeff in the linear predictor for the sum of the two *")
print("* shape parameters in the beta distribution *")
print("* b0: regression coeff in the linear predictor for Prob(y=0) *")
print("* b1: regression coeff in the linear predictor for Prob(y=1) *")
print("***************************************************************************")
################################################################
# 1- 4: fixed effects model
################################################################
if(random==0)
{
model <- vector("list", q)
post.samples <- vector("list", q)
for(i in 1:q)
{
if(one.inflation[i] & zero.inflation[i] ){
model[[i]]<- fixed01(y[,i], n, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0, x1.1, p.x1,
prior1[,,i], prec.int[i,], prec.DN[i,],lambda.L1[i,],lambda.L2[i,],
lambda.ARD[i,],link,n.chain, inits, seeds)
para.list <- c("b","d","b0","b1")}
else if(zero.inflation[i] & !one.inflation[i] ){
model[[i]]<- fixed0(y[,i],n, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0, prior1[,,i],
prec.int[i,], prec.DN[i,], lambda.L1[i,], lambda.L2[i,],
lambda.ARD[i,],link, n.chain, inits, seeds)
para.list <- c("b","d","b0")}
else if(one.inflation[i] & !zero.inflation[i] ){
model[[i]]<- fixed1(y[,i],n, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1,prior1[,,i],
prec.int[i,], prec.DN[i,],lambda.L1[i,],lambda.L2[i,],
lambda.ARD[i,],link, n.chain, inits, seeds)
para.list <- c("b","d","b1")}
else if(!one.inflation[i] & !zero.inflation[i] ){
model[[i]]<- fixed(y[,i],n, xmu.1, p.xmu, xsum.1, p.xsum, prior1[,,i],
prec.int[i,], prec.DN[i,],lambda.L1[i,],lambda.L2[i,],
lambda.ARD[i,], link, n.chain, inits, seeds)
para.list <- c("b","d")}
para.list <- c(para.list,"ypred") #"phi"
#print(para.list)
post.samples[[i]]<- coda.samples(model[[i]], para.list, n.iter=n.iter, thin=n.thin)
dim.para <- dim(post.samples[[i]][[1]])
name.para <- colnames(post.samples[[i]][[1]])
#print(name.para) b first, followed by b0, b1, and d.
post.samples.raw <- post.samples
for(k in 1:dim.para[2]){
if(grepl("b0",name.para[k])){
if(p.x0 > 1){
MEAN <- matrix(rep(mean0,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd0,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[i]][[j]]
post.samples.raw[[i]][[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.x0-1)]*MEAN/SD,1,sum)
post.samples.raw[[i]][[j]][,(k+1):(k+p.x0-1)] <- tmp[,(k+1):(k+p.x0-1)]/SD}}
break}}
for(k in 1:dim.para[2]){
if(grepl("b1",name.para[k])){
if(p.x1 > 1){
MEAN <- matrix(rep(mean1,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd1,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[i]][[j]]
post.samples.raw[[i]][[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.x1-1)]*MEAN/SD,1,sum)
post.samples.raw[[i]][[j]][,(k+1):(k+p.x1-1)] <- tmp[,(k+1):(k+p.x1-1)]/SD}}
break}}
for(k in 1:dim.para[2]){
if(grepl("b",name.para[k])){
if(p.xmu > 1){
MEAN <- matrix(rep(mean.mu,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd.mu,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[i]][[j]]
post.samples.raw[[i]][[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.xmu-1)]*MEAN/SD,1,sum)
post.samples.raw[[i]][[j]][,(k+1):(k+p.xmu-1)] <- tmp[,(k+1):(k+p.xmu-1)]/SD}}
break}}
for(k in 1:dim.para[2]){
if(grepl("d",name.para[k])){
if(p.xsum > 1){
MEAN <- matrix(rep(mean.sum,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd.sum,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[i]][[j]]
post.samples.raw[[i]][[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.xsum-1)]*MEAN/SD,1,sum)
post.samples.raw[[i]][[j]][,(k+1):(k+p.xsum-1)] <- tmp[,(k+1):(k+p.xsum-1)]/SD}}
break}}
}
if(q==1) {
model<- model[[1]];
post.samples<- post.samples[[1]];
post.samples.raw<- post.samples.raw[[1]]}
}
########## random effect models ############################
# nz0: # of raw random variables
# m: a vector/list with nz0 element that contains the number
# of levels for each random effect. sum(m) would give the
# dimension of the random effects
# qz: the number of the column of zdummy
#############################################################
else
{
# -----------------------------------------------
EUID.m <- unique(EUID)
nEU <- length(EUID.m)
EUID1 <- rep(0,n)
for(i in 1:n){
for(j in 1:nEU){
if(EUID[i]==EUID.m[j]) {EUID1[i]<- j; break}
}
}
# ------------------------------------------------
if(all(zname=='int')) nz0<- 1
else
{
nz0 <- length(zname)
nms<- colnames(data)
z<- rep(1,n)
for(k1 in 1:nz0){
for(k2 in 1:length(nms)) {
if(zname[k1]== nms[k2]) {
dk2 <- data[,k2]
names(dk2) <-nms[k2]
z <- data.frame(z, dk2); break }
}
}
zuniq <- vector("list",nz0)
m <- rep(1,nz0)
for(i in 1:nz0){
if(is.factor(z[,i])|is.character(z[,i])){
zuniq[[i]] <- unique(z[,i]);
m[i] <- length(zuniq[[i]])
}
else{
zuniq[[i]] <- z[,i]
}
}
qz <- sum(m)
zdummy <- matrix(0,n,qz)
id <- 0
zdummy[,1]<- 1
for(j in 2:nz0)
{
id <- id+m[j-1]
if(is.factor(z[,j])|is.character(z[,j])){
for(i in 1:nrow(z)){
for(k in 1:m[j]){
if(z[i,j]==zuniq[[j]][k]) {zdummy[i,id+k]<- 1; break}
}}}
else{ zdummy[,id+1] <- z[,j]}}
}
########################################################
# 5-12: random, separate modeling
########################################################
# q: # of y variables
# z: random variables, before dummy coding
# EUID: ID of the experimental units of all rows in the data sets
# nEU: # of independent experimental units.
# nz0: # of raw random variables
# qz: # of zdummy variable
# rid: a vector of 4, if random effects in mu, then the 1st element in rid =1; if sum, then 2nd =1
# so on so forth.
if(!joint)
{
model <- vector("list", q)
post.samples <- vector("list", q)
for(i in 1:q)
{
if(nz0>1){
if(one.inflation[i] & zero.inflation[i]){
model[[i]]<- sep.2z01(y[,i],n, xmu.1, p.xmu, xsum.1,p.xsum, x0.1,p.x0, x1.1,p.x1,
zdummy,qz,nz0,m, rid, EUID1, nEU, prior1[,,i], prior2, prior.beta,
prior.Sigma, prec.int[i,], prec.DN[i,], lambda.L1[i,],
lambda.L2[i,],lambda.ARD[i,], scale.unif, scale.halfcauchy,link,
n.chain, inits, seeds)
para.list <- c("b","d","b0","b1","Sigma")}
else if(!one.inflation[i] & zero.inflation[i]){
model[[i]]<- sep.2z0(y[,i],n, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0, zdummy,
qz,nz0, m, rid, EUID1, nEU, prior1[,,i], prior2, prior.beta, prior.Sigma,
prec.int[i,], prec.DN[i,], lambda.L1[i,],
lambda.L2[i,],lambda.ARD[i,],scale.unif, scale.halfcauchy,link,
n.chain, inits, seeds)
para.list <- c("b","d", "b0","Sigma")}
else if(one.inflation[i] & !zero.inflation[i]){
model[[i]]<- sep.2z1(y[,i], n, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1, zdummy,
qz,nz0, m, rid, EUID1, nEU, prior1[,,i], prior2, prior.beta,
prior.Sigma, prec.int[i,], prec.DN[i,], lambda.L1[i,],
lambda.L2[i,],lambda.ARD[i,],scale.unif, scale.halfcauchy,link,
n.chain, inits, seeds)
para.list <- c("b","d", "b1","Sigma")}
else if(!one.inflation[i] & !zero.inflation[i]){
model[[i]]<- sep.2z(y[,i], n, xmu.1, p.xmu, xsum.1, p.xsum, zdummy, qz,nz0, m,
rid, EUID1, nEU, prior1[,,i], prior2, prior.beta, prior.Sigma,
prec.int[i,], prec.DN[i,], lambda.L1[i,],lambda.L2[i,],
lambda.ARD[i,],scale.unif, scale.halfcauchy,link, n.chain,
inits, seeds)
para.list <- c("b","d", "Sigma")}
}
else
{
if(one.inflation[i] & zero.inflation[i]){
model[[i]]<- sep.1z01(y[,i], n, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0, x1.1, p.x1,
rid, EUID1, nEU, prior1[,,i], prior2, prior.beta, prior.Sigma,
prec.int[i,], prec.DN[i,], lambda.L1[i,],lambda.L2[i,],
lambda.ARD[i,],scale.unif, scale.halfcauchy,link, n.chain,
inits, seeds)
para.list <- c("b","d", "b0","b1","sigma") }
else if(one.inflation[i] & !zero.inflation[i]){
model[[i]]<- sep.1z1(y[,i], n, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1,
rid, EUID1, nEU, prior1[,,i], prior2, prior.beta, prior.Sigma,
prec.int[i,], prec.DN[i,], lambda.L1[i,],lambda.L2[i,],
lambda.ARD[i,],scale.unif, scale.halfcauchy,link, n.chain,
inits, seeds)
para.list <- c("b","d", "b1","sigma") }
else if(!one.inflation[i] & zero.inflation[i]){
model[[i]]<- sep.1z0(y[,i], n, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0,
rid, EUID1, nEU, prior1[,,i], prior2, prior.beta, prior.Sigma,
prec.int[i,], prec.DN[i,], lambda.L1[i,],lambda.L2[i,],
lambda.ARD[i,],scale.unif, scale.halfcauchy,link, n.chain,
inits, seeds)
para.list <- c("b","d", "b0","sigma")}
else if(!one.inflation[i] & !zero.inflation[i]) {
model[[i]]<- sep.1z(y[,i], n, xmu.1, p.xmu, xsum.1, p.xsum, rid,EUID1,
nEU, prior1[,,i], prior2, prior.beta, prior.Sigma,
prec.int[i,], prec.DN[i,], lambda.L1[i,],lambda.L2[i,],
lambda.ARD[i,],scale.unif, scale.halfcauchy,link, n.chain,
inits, seeds)
para.list <- c("b","d", "sigma")}
}
para.list <- c(para.list, "ypred") # "phi"
#print(para.list)
post.samples[[i]]<- coda.samples(model[[i]], para.list, thin=n.thin, n.iter=n.iter)
# print(post.samples)
dim.para <- dim(post.samples[[i]][[1]])
#print(dim.para)
name.para <- colnames(post.samples[[i]][[1]])
post.samples.raw <- post.samples
for(k in 1:dim.para[2]){
if(grepl("b0",name.para[k])){
if(p.x0 > 1){
MEAN <- matrix(rep(mean0,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd0,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[i]][[j]]
post.samples.raw[[i]][[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.x0-1)]*MEAN/SD,1,sum)
post.samples.raw[[i]][[j]][,(k+1):(k+p.x0-1)] <- tmp[,(k+1):(k+p.x0-1)]/SD}}
break}}
for(k in 1:dim.para[2]){
if(grepl("b1",name.para[k])){
if(p.x1 > 1){
MEAN <- matrix(rep(mean1,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd1,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[i]][[j]]
post.samples.raw[[i]][[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.x1-1)]*MEAN/SD,1,sum)
post.samples.raw[[i]][[j]][,(k+1):(k+p.x1-1)] <- tmp[,(k+1):(k+p.x1-1)]/SD}}
break}}
for(k in 1:dim.para[2]){
if(grepl("b",name.para[k])){
if(p.xmu > 1){
MEAN <- matrix(rep(mean.mu,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd.mu,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[i]][[j]]
post.samples.raw[[i]][[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.xmu-1)]*MEAN/SD,1,sum)
post.samples.raw[[i]][[j]][,(k+1):(k+p.xmu-1)] <- tmp[,(k+1):(k+p.xmu-1)]/SD}}
break}}
for(k in 1:dim.para[2]){
if(grepl("d",name.para[k])){
if(p.xsum > 1){
MEAN <- matrix(rep(mean.sum,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd.sum,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[i]][[j]]
post.samples.raw[[i]][[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.xsum-1)]*MEAN/SD,1,sum)
post.samples.raw[[i]][[j]][,(k+1):(k+p.xsum-1)] <- tmp[,(k+1):(k+p.xsum-1)]/SD}}
break}}
}
if(q==1) {
model<- model[[1]];
post.samples<- post.samples[[1]];
post.samples.raw<- post.samples.raw[[1]]}
}
########################################################
# 13-20: random, joint modeling
########################################################
else
{
if(nz0>1)
{
if(any(one.inflation) & any(zero.inflation)){
inflate1 <- rep(0,q)
inflate0 <- rep(0,q)
for(j in 1:q){
if(one.inflation[j]) inflate1[j]<- 1
if(zero.inflation[j]) inflate0[j]<- 1}
model<- joint.2z01(y,n,q, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0, x1.1, p.x1,
inflate0, inflate1, zdummy, qz,nz0,m, rid, EUID1, nEU,
prior1, prior2, prior.beta, prior.Sigma, prec.int, prec.DN,
lambda.L1, lambda.L2, lambda.ARD, scale.unif, scale.halfcauchy,
link, n.chain, inits, seeds)
para.list <- c("b","d", "b0","b1","Sigma")}
else if(all(!one.inflation) & any(zero.inflation)){
inflate0 <- rep(0,q)
for(j in 1:q){
if(zero.inflation[j]) inflate0[j]<- 1}
model<- joint.2z0(y,n,q, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0,inflate0,
zdummy, qz,nz0,m,rid, EUID1, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halfcauchy,link, n.chain, inits, seeds)
para.list <- c("b","d", "b0","Sigma")}
else if(any(one.inflation) & all(!zero.inflation)) {
inflate1 <- rep(0,q)
for(j in 1:q){
if(one.inflation[j]) inflate1[j]<- 1}
model<- joint.2z1(y,n,q, xmu.1, p.xmu, xsum.1, p.xsum, x1.1, p.x1,inflate1,
zdummy,qz,nz0,m,rid, EUID1, nEU,
prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halfcauchy,link, n.chain, inits, seeds)
para.list <- c("b","d","b1","Sigma")}
else if(all(!one.inflation) & all(!zero.inflation)){
model<- joint.2z(y,n,q, xmu.1, p.xmu, xsum.1, p.xsum,zdummy,qz,nz0,m,
rid, EUID1, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halfcauchy,link, n.chain, inits, seeds)
para.list <- c("b","d","Sigma")}
}
else
{
if(any(one.inflation) & any(zero.inflation)) {
inflate1 <- rep(0,q)
inflate0 <- rep(0,q)
for(j in 1:q){
if(one.inflation[j]) inflate1[j]<- 1
if(zero.inflation[j]) inflate0[j]<- 1}
model<- joint.1z01(y,n,q, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0, x1.1, p.x1,
inflate0, inflate1, rid, EUID1, nEU, prior1, prior2,
prior.beta, prior.Sigma,prec.int, prec.DN, lambda.L1,lambda.L2,
lambda.ARD, scale.unif, scale.halfcauchy,link, n.chain,
inits, seeds)
para.list <- c("b","d", "b0","b1","sigma")}
else if(all(!one.inflation) & any(zero.inflation)) {
inflate0 <- rep(0,q)
for(j in 1:q){
if(zero.inflation[j]) inflate0[j]<- 1}
model<- joint.1z0(y,n,q, xmu.1, p.xmu, xsum.1, p.xsum, x0.1, p.x0, inflate0,
rid, EUID1, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int,prec.DN, lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halfcauchy,link, n.chain, inits, seeds)
para.list <- c("b","d", "b0","sigma")}
else if(any(one.inflation) & all(!zero.inflation)){
inflate1 <- rep(0,q)
for(j in 1:q){
if(one.inflation[j]) inflate1[j]<- 1}
model<- joint.1z1(y,n,q, xmu.1, p.xmu, xsum.1, p.xsum,x1.1, p.x1,inflate1,
rid, EUID1, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN,lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halfcauchy,link, n.chain, inits, seeds)
para.list <- c("b","d","b1","sigma")}
else if(all(!one.inflation) & all(!zero.inflation)) {
model<- joint.1z(y,n,q, xmu.1, p.xmu, xsum.1, p.xsum,
rid, EUID1, nEU, prior1, prior2, prior.beta, prior.Sigma,
prec.int, prec.DN,lambda.L1, lambda.L2, lambda.ARD,
scale.unif, scale.halfcauchy,link, n.chain, inits, seeds)
para.list <- c("b","d", "sigma")}
}
para.list <- c(para.list, "ypred") # "phi"
#print(para.list)
post.samples <- coda.samples(model, para.list, thin=n.thin, n.iter=n.iter)
#print(post.samples)
dim.para <- dim(post.samples[[1]])
#print(dim.para)
name.para <- colnames(post.samples[[1]])
post.samples.raw <- post.samples
for(k in 1:dim.para[2]){
if(grepl("b0",name.para[k])){
if(p.x0 > 1){
MEAN <- matrix(rep(mean0,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd0,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[j]]
post.samples.raw[[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.x0-1)]*MEAN/SD,1,sum)
post.samples.raw[[j]][,(k+1):(k+p.x0-1)] <- tmp[,(k+1):(k+p.x0-1)]/SD}}
break}}
for(k in 1:dim.para[2]){
if(grepl("b1",name.para[k])){
if(p.x1 > 1){
MEAN <- matrix(rep(mean1,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd1,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[j]]
post.samples.raw[[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.x1-1)]*MEAN/SD,1,sum)
post.samples.raw[[j]][,(k+1):(k+p.x1-1)] <- tmp[,(k+1):(k+p.x1-1)]/SD}}
break}}
for(k in 1:dim.para[2]){
if(grepl("b",name.para[k])){
if(p.xmu > 1){
MEAN <- matrix(rep(mean.mu,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd.mu,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[j]]
for(m in 1:q){
s <- k+p.xmu*(m-1)
post.samples.raw[[j]][,s] <- tmp[,s]-apply(tmp[,(s+1):(s+p.xmu-1)]*MEAN/SD,1,sum)
post.samples.raw[[j]][,(s+1):(s+p.xmu-1)] <- tmp[,(s+1):(s+p.xmu-1)]/SD}}}
break}}
for(k in 1:dim.para[2]){
if(grepl("d",name.para[k])){
if(p.xsum > 1){
MEAN <- matrix(rep(mean.sum,dim.para[1]), nrow=dim.para[1], byrow=T)
SD <- matrix(rep(sd.sum,dim.para[1]), nrow=dim.para[1], byrow=T)
for(j in 1:n.chain) {
tmp <- post.samples[[j]]
post.samples.raw[[j]][,k] <- tmp[,k]-apply(tmp[,(k+1):(k+p.xsum-1)]*MEAN/SD,1,sum)
post.samples.raw[[j]][,(k+1):(k+p.xsum-1)] <- tmp[,(k+1):(k+p.xsum-1)]/SD}}
break}}
}
}
# construct Design Matrix X
Xbeta.mean <- xmu
Xbeta.sum <- xsum
if(0)
{
X0 <- NULL
X1 <- NULL
if(nterm[2L]==5L){
X0 <- x0
X1 <- x1
}
else if(nterm[2L]==4L){
if(random==0){
X0 <- x0
X1 <- x1
}
else{
if(any(one.inflation) & all(!zero.inflation)) X0 <- x0
else if(any(zero.inflation) & !all(one.inflation)) X1 <-x1
}
}
else if(nterm[2L]==3L){
if(random == 0){
if(any(zero.inflation) & !all(one.inflation)) X1 <- x1
}}
}
#print(post.samples.raw)
#print(post.samples.raw[[1]][[1]])
yobs=c(y)
howmany <- length(yobs)
res.names <- paste(rep("r",howmany),as.character(1:howmany),sep="")
nburn <- n.burn/n.thin
if((!joint) & q>1){
coeff.tmp = NULL
ypred.tmp = NULL
res.tmp = NULL
for(l in 1:q){
ypredcol <- which(substr(colnames(post.samples.raw[[l]][[1]]),1,5)=="ypred")
coeff.tmp <- cbind(coeff.tmp, post.samples.raw[[l]][[1]][-(1:nburn),1:(ypredcol[1]-1)])
ypred.tmp <- cbind(ypred.tmp, post.samples.raw[[l]][[1]][-(1:nburn),ypredcol])
}
res.tmp<- yobs-ypred.tmp
colnames(res.tmp)<- res.names
coeff<- list(mcmc(coeff.tmp))
ypred<- list(mcmc(ypred.tmp))
resid<- list(mcmc(res.tmp))
}
else {
ypredcol <- which(substr(colnames(post.samples.raw[[1]]),1,5)=="ypred")
coeff<- list(mcmc(post.samples.raw[[1]][-(1:nburn),1:(ypredcol[1]-1)]))
ypred<- list(mcmc(post.samples.raw[[1]][-(1:nburn),ypredcol]))
resid <- yobs-ypred[[1]];
colnames(resid)<- res.names
resid<- list(mcmc(resid))
}
#phicol <- which(substr(colnames(post.samples.raw[[1]]),1,3)=="phi")
#coeff <- mcmc.list(mcmc(post.samples.raw[[1]][-(1:nburn),1:(phicol[1]-1)]),
# mcmc(post.samples.raw[[2]][-(1:nburn),1:(phicol[1]-1)]))
# standardized residuals
rstd.names <- paste(rep("rstd",howmany),as.character(1:howmany),sep="")
resid.std <- resid[[1]]/apply(resid[[1]],2,sd);
colnames(resid.std)<- rstd.names
resid.std <- list(resid.std)
for(k in 2:n.chain) {
coeff<- c(coeff,k)
ypred<- c(ypred,k)
resid<- c(resid,k)
if(!joint & q>1){
coeff.tmp = NULL
ypred.tmp = NULL
for(l in 1:q){
ypredcol <- which(substr(colnames(post.samples.raw[[l]][[k]]),1,5)=="ypred")
coeff.tmp <- cbind(coeff.tmp, post.samples.raw[[l]][[k]][-(1:nburn),1:(ypredcol[1]-1)])
ypred.tmp <- cbind(ypred.tmp, post.samples.raw[[l]][[k]][-(1:nburn),ypredcol])
}
resid.tmp<- yobs- ypred.tmp
colnames(resid.tmp)<- res.names
coeff[[k]]<- mcmc(coeff.tmp)
ypred[[k]]<- mcmc(ypred.tmp)
resid[[k]]<- mcmc(resid.tmp)
}
else{
coeff[[k]]<- mcmc(post.samples.raw[[k]][-(1:nburn),1:(ypredcol[1]-1)])
ypred[[k]]<- mcmc(post.samples.raw[[k]][-(1:nburn),ypredcol])
tmp<- yobs- post.samples.raw[[k]][-(1:nburn),ypredcol]
colnames(tmp)<- res.names
resid[[k]] <- mcmc(tmp)
}
resid.std<- c(resid.std,k)
tmp <- resid[[k]]/apply(resid[[k]],2,sd);
colnames(tmp)<- rstd.names
resid.std[[k]] <- mcmc(tmp)
}
coeff<- mcmc.list(coeff)
ypred<- mcmc.list(ypred)
resid<- mcmc.list(resid)
resid.std<- mcmc.list(resid.std)
# print(coeff)
if(0){
coeff <- mcmc.list(mcmc(post.samples.raw[[1]][-(1:nburn),1:(ypredcol[1]-1)]),
mcmc(post.samples.raw[[2]][-(1:nburn),1:(ypredcol[1]-1)]))
pred1 <- post.samples.raw[[1]][-(1:nburn),ypredcol]
pred2 <- post.samples.raw[[2]][-(1:nburn),ypredcol]
ypred <- mcmc.list(mcmc(pred1), mcmc(pred2))
yobs <- c(y);
howmany <- length(yobs)
names <- paste(rep("r",howmany),as.character(1:howmany),sep="")
resid1 <- yobs-pred1; colnames(resid1)<- names
resid2 <- yobs-pred2; colnames(resid2)<- names
resid <- mcmc.list(mcmc(resid1), mcmc(resid2))
# standardized residuals
names <- paste(rep("rstd",howmany),as.character(1:howmany),sep="")
resid1 <- resid1/apply(resid1,2,sd); colnames(resid1)<- names
resid2 <- resid2/apply(resid2,2,sd); colnames(resid2)<- names
resid.std <- mcmc.list(mcmc(resid1), mcmc(resid2))
}
if(0){
if(!joint & q==1){
orinames <- colnames(coeff[[1]])
newnames <- orinames
n.orinames <- length(orinames)
tmp<- which(substr(orinames,1,1)=='d')
newname<- colnames(Xbeta.sum); newname<-rep(newname, q)
for(k in 1:length(tmp)){
#newnames[tmp[k]]<-paste(newname[k],substr(orinames[tmp[k]],2,nchar(orinames[tmp[k]])),sep="")
newnames[tmp[k]]<- newname[k]
}
tmp<- which(substr(orinames,1,2)=='b[')
newname<- colnames(Xbeta.mean); newname<-rep(newname, q)
for(k in 1:length(tmp)){
#newnames[tmp[k]]<-paste(newname[k],substr(orinames[tmp[k]],2,nchar(orinames[tmp[k]])),sep="")
newnames[tmp[k]]<- newname[k]
}
tmp0<- any(substr(orinames,1,2)=='b0');
if(tmp0){
tmp<- which(substr(orinames,1,2)=='b0');
newname<- colnames(x0); newname<-rep(newname, q)
for(k in 1:length(tmp)){
#newnames[tmp[k]]<-paste(newname[k],substr(orinames[tmp[k]],3,nchar(orinames[tmp[k]])),sep="")
newnames[tmp[k]]<- newname[k]
}}
tmp1<- any(substr(orinames,1,2)=='b1');
if(tmp1){
tmp<- which(substr(orinames,1,2)=='b1')
newname<- colnames(x1); newname<-rep(newname, q)
for(k in 1:length(tmp)){
#newnames[tmp[k]]<-paste(newname[k],substr(orinames[tmp[k]],3,nchar(orinames[tmp[k]])),sep="")
newnames[tmp[k]]<- newname[k]
}}
for(i in 1:n.chain){
colnames(coeff[[i]])=newnames
}
}
}
if(joint & q>1){
for(i in 1:n.chain){
tmp<- coeff[[i]]
howmany <- ncol(tmp)
tmp.names<- colnames(tmp)
#print(tmp.names)
reorder<- matrix(0,q, howmany)
for(k in 1:q){
count<- 0
for(j in 1:howmany){
nc <- nchar(tmp.names[j])
if(substr(tmp.names[j],nc-1,nc-1)==as.character(k)) {
count<- count+1
reorder[k,count] = j}
}
}
tmp.coeff <- NULL
for(k in 1:q) tmp.coeff<- cbind(tmp.coeff,coeff[[i]][,reorder[k,]])
# print(tmp.coeff)
coeff[[i]] <- tmp.coeff
rem<- NULL
for(j in 1:ncol(tmp.coeff)){
if(sum(tmp.coeff[1:2,j])==0) rem<- c(rem,j)}
if(!is.null(rem)) coeff[[i]]<- tmp.coeff[,-rem]
}}
print("NOTE: in the header of Markov Chain Monte Carlo (MCMC) output of ")
print("parameters (coeff), predicted values (ypred), residuals (resid), and")
print("standardized residuals (resid.std), *Start, End, Thinning Interval* ")
print("values are after the initial burning and thinning periods specified ")
print("by the user. For example, n.iter = 151, n.thin = 2, n.burn=1, ")
print("then MCMC header of the *coeff* output would read as follows ")
print("--------------------------------------------------------------------")
print("Markov Chain Monte Carlo (MCMC) output:")
print("Start = 1")
print("End = 75")
print("Thinning interval = 1")
print("--------------------------------------------------------------------")
print(" ")
print("Coefficients are presented in the order of b, b0 (if zero.inflation=TRUE),")
print("b1 (if one.inflation=TRUE), and d. If the names of independent variables X")
print("are not shown for the coefficients within each type (b, b0, b1, d), the ")
print("first coeffient is always the intercept, followed the coefficients for the")
print("X's in the order as how they are entered in the model specification. ")
print("--------------------------------------------------------------------------")
return(list(model= model.format, MCMC.model= model,
Xb= Xbeta.mean, Xd= Xbeta.sum, Xb0= x0, Xb1=x1,
coeff= coeff, ypred= ypred, yobs= yobs,
resid= resid, resid.std= resid.std))
}
| /scratch/gouwar.j/cran-all/cranData/zoib/R/zoib.R |
# Random generation of datasets using the dirichlet broken stick method
#'
#' Random generation of datasets using the dirichlet broken stick method
#'
#' @param n_obs Number of observations (rows of data matrix to simulate). Defaults to 10
#' @param n_groups Number of categories for each observation (columns of data matrix). Defaults to 10
#' @param ess_fraction The effective sample size fraction, defaults to 1
#' @param tot_n The total sample size to simulate for each observation. This is approximate and the actual
#' simulated sample size will be slightly smaller. Defaults to 100
#' @param p The stock proportions to simulate from, as a vector. Optional, and when not included,
#' random draws from the dirichlet are used
#' @return A 2-element list, whose 1st element `X_obs` is the simulated dataset, and whose
#' 2nd element is the underlying vector of proportions `p` used to generate the data
#' @export
#' @importFrom gtools rdirichlet
#' @importFrom stats rbeta rbinom
#'
#' @examples
#' \donttest{
#' y <- broken_stick(n_obs = 3, n_groups = 5, tot_n = 100)
#'
#' # add custom proportions
#' y <- broken_stick(
#' n_obs = 3, n_groups = 5, tot_n = 100,
#' p = c(0.1, 0.2, 0.3, 0.2, 0.2)
#' )
#' }
broken_stick <- function(n_obs = 1000,
n_groups = 10,
ess_fraction = 1,
tot_n = 100,
p = NULL) {
if (is.null(p)) {
p <- gtools::rdirichlet(1, rep(1, n_groups))
}
ess <- tot_n * ess_fraction
# first, determine the presence of zeros using the stick breaking algorithm
# second, for instances where zeros and tot_n are not observed, rescale the parameters and
# use the stick breaking algorithm for the Dirichlet process a second time
X_mean_prob <- matrix(p, n_obs, n_groups, byrow = T)
X_var_prob <- matrix((p * (1 - p)) / (ess + 1),
n_obs, n_groups,
byrow = T
)
X_mean <- X_mean_prob * tot_n
X_var <- X_var_prob * tot_n^2
# Simulate occurrence of 0s, 1s
X_obs_0 <- matrix(NA, n_obs, n_groups) #1 indicates that the observation is a zero
X_obs_1 <- matrix(NA, n_obs, n_groups)
for(i in 1:n_obs){
BREAK = "FALSE"
repeat{
for(j in 1:n_groups){
X_obs_0[i,j] <- rbinom(1,1,(1-p[j])^ess)
}
if(sum(X_obs_0[i,])<n_groups){break}
}
for(j in 1:n_groups){
X_obs_1[i,j] <- ifelse(X_obs_0[i,j]==0 & sum(X_obs_0[i,])==(n_groups-1),1,0)
}
}
# Simulate proportions
X_indicator <- matrix(NA,n_obs,n_groups)
X_indicator[X_obs_0 == 0] <- 1
X_indicator[X_obs_0 == 1] <- 0
X_alpha <- (X_mean_prob) * ((X_mean_prob * (1-X_mean_prob)/ X_var_prob) - 1)
X_alpha_mod <- X_alpha * 0
mu_vals <- X_mean * 0 # These will be independent Beta draws, conditioned on being non-zero.
q_vals <- mu_vals # These will be equivalent to dirichlet draws (mu_vals modified by stick-breaking algorithm)
X_obs <- X_indicator*0
for(i in 1:n_obs){
if(BREAK == "FALSE"){
X_alpha_mod[i,] <- X_alpha[i,] * X_indicator[i,]
for(j in 1:(n_groups-1)){ # Loop over stocks for dirichlet component
if(j==1){
if(X_alpha_mod[i,j]>0){
mu_vals[i,j] <- rbeta(1,X_alpha_mod[i,j],sum(X_alpha_mod[i,(j+1):n_groups]))
q_vals[i,j] = mu_vals[i,j]
}
}else if(j>1 ){
if(X_alpha_mod[i,j]>0){
mu_vals[i,j] <- rbeta(1,X_alpha_mod[i,j],sum(X_alpha_mod[i,(j+1):n_groups]))
q_vals[i,j] <- prod(1 - mu_vals[i,(1:j-1)]) * mu_vals[i,j]
}
}
}
if(X_alpha_mod[i,n_groups]>0){
q_vals[i,n_groups] <- 1 - sum(q_vals[i,(1:n_groups-1)])
}
X_obs[i,] <- q_vals[i,] * (tot_n)
}
}
return(list(X_obs = X_obs, p = p))
}
| /scratch/gouwar.j/cran-all/cranData/zoid/R/broken_stick.R |
#' Data from Satterthwaite, W.H., Ciancio, J., Crandall, E., Palmer-Zwahlen,
#' M.L., Grover, A.M., O’Farrell, M.R., Anson, E.C., Mohr, M.S. & Garza,
#' J.C. (2015). Stock composition and ocean spatial distribution from
#' California recreational chinook salmon fisheries using genetic stock
#' identification. Fisheries Research, 170, 166–178. The data
#' genetic data collected from port-based sampling of recreationally-landed
#' Chinook salmon in California from 1998-2002.
#'
#' @format A data frame.
"chinook"
#' Data from Magnussen, E. 2011. Food and feeding habits of cod (Gadus morhua)
#' on the Faroe Bank. – ICES Journal of Marine Science, 68: 1909–1917. The data
#' here are Table 3 from the paper, with sample proportions (columns w) multiplied
#' by total weight to yield total grams (g) for each sample-diet item combination. Dashes
#' have been replaced with 0s.
#'
#' @format A data frame.
"coddiet"
| /scratch/gouwar.j/cran-all/cranData/zoid/R/data.R |
#' Find appropriate standard deviations for prior
#'
#' @param n_bins Bins for the Dirichlet distribution
#' @param n_draws Numbers of samples to use for doing calculation
#' @param target The goal of the specified prior, e.g. 1 or 1/n_bins
#' @param iterations to try, to ensure robust solution. Defaults to 5
#' @export
#' @importFrom stats optim runif
#' @return A 3-element list consisting of `sd` (the approximate standard deviation
#' in transformed space that gives a similar prior to that specified), `value` (the
#' value of the root mean squared percent error function being minimized),
#' and `convergence` (0 if convergence occurred, error code from
#' [optim()] otherwise)
#' @examples
#' \donttest{
#' # fit model with 3 components / alpha = 1
#' set.seed(123)
#' f <- fit_prior(n_bins = 3, n_draws = 1000, target = 1)
#' # fit model with 20 components / alpha = 1/20
#' f <- fit_prior(n_bins = 20, n_draws = 1000, target = 1 / 20)
#' }
#'
fit_prior <- function(n_bins, n_draws = 10000, target = 1 / n_bins, iterations = 5) {
best <- 1.0e10
best_value <- NA
for (i in 1:iterations) {
o <- try(optim(
par = runif(1), rmspe_calc, n_bins = n_bins,
target = target,
n_draws = n_draws,
method = "BFGS"
), silent = TRUE)
if(!identical(class(o), "try-error")) {
if (o$value < best) {
best <- o$value
best_value <- list(
sd = exp(o$par), value = o$value,
convergence = o$convergence
)
}
}
}
return(best_value)
}
#' Find appropriate prior for a given target distribution.
#'
#' Extract point estimates of compositions from fitted model.
#'
#' @param par The parameter (standard deviation) to be searched over to find a Dirichlet equivalent
#' @param n_bins Bins for the Dirichlet distribution
#' @param n_draws Numbers of samples to use for doing calculation
#' @param target The goal of the specified prior, e.g. 1 or 1/n_bins
#' @importFrom stats rnorm
rmspe_calc <- function(par, n_bins, n_draws, target) {
x <- matrix(rnorm(n_draws * (n_bins - 1), 0, exp(par)), n_draws, n_bins - 1)
x <- cbind(x, 0)
f <- function(x) {
return(exp(x) / sum(exp(x)))
}
p <- t(apply(x, 1, f))
funct_alpha <- fit_dirichlet(p)
rmspe <- sqrt(mean(((funct_alpha - target) / target)^2))
return(rmspe)
}
#' Extract point estimates of compositions from fitted model.
#'
#' @param data The data to fit the dirichlet distribution to
#' @importFrom stats optim
fit_dirichlet <- function(data) {
# Log-likelihood of the Dirichlet distribution
logLikelihood <- function(params) {
# Ensure parameters are positive
if (any(params <= 0)) return(Inf)
alpha <- params
lgamma(sum(alpha)) - sum(lgamma(alpha)) +
sum((alpha - 1) * apply(log(data), 1, sum))
}
# Initial parameter estimates
init_params <- rep(1, ncol(data))
# Optimization using optim
fit <- optim(init_params, logLikelihood, control = list(fnscale = -1))
# Return estimated parameters
return(fit$par)
}
| /scratch/gouwar.j/cran-all/cranData/zoid/R/fit_prior.R |
#' Fit a trinomial mixture model with Stan
#'
#' Fit a trinomial mixture model that optionally includes covariates to estimate
#' effects of factor or continuous variables on proportions.
#'
#' @param formula The model formula for the design matrix. Does not need to have a response specified. If =NULL, then
#' the design matrix is ignored and all rows are treated as replicates
#' @param design_matrix A data frame, dimensioned as number of observations, and covariates in columns
#' @param data_matrix A matrix, with observations on rows and number of groups across columns
#' @param chains Number of mcmc chains, defaults to 3
#' @param iter Number of mcmc iterations, defaults to 2000
#' @param warmup Number iterations for mcmc warmup, defaults to 1/2 of the iterations
#' @param overdispersion Whether or not to include overdispersion parameter, defaults to FALSE
#' @param overdispersion_sd Prior standard deviation on 1/overdispersion parameter, Defaults to inv-Cauchy(0,5)
#' @param posterior_predict Whether or not to return draws from posterior predictive distribution (requires more memory)
#' @param moment_match Whether to do moment matching via [loo::loo_moment_match()]. This increases memory by adding all temporary
#' parmaeters to be saved and returned
#' @param prior_sd Parameter to be passed in to use as standard deviation of the normal distribution in transformed space. If
#' covariates are included this defaults to 1, but for models with single replicate, defaults to 1/n_bins.
#' @param ... Any other arguments to pass to [rstan::sampling()].
#'
#' @export
#' @importFrom rstan sampling
#' @importFrom stats model.frame model.matrix rcauchy
#' @import Rcpp
#'
#' @examples
#' \donttest{
#' y <- matrix(c(3.77, 6.63, 2.60, 0.9, 1.44, 0.66, 2.10, 3.57, 1.33),
#' nrow = 3, byrow = TRUE
#' )
#' # fit a model with no covariates
#' fit <- fit_zoid(data_matrix = y, chains = 1, iter = 100)
#'
#' # fit a model with 1 factor
#' design <- data.frame("fac" = c("spring", "spring", "fall"))
#' fit <- fit_zoid(formula = ~fac, design_matrix = design, data_matrix = y, chains = 1, iter = 100)
#' }
#' # try a model with random effects
#' set.seed(123)
#' y <- matrix(runif(99,1,4), ncol=3)
#' design <- data.frame("fac" = sample(letters[1:5], size=nrow(y), replace=TRUE))
#' design$fac <- as.factor(design$fac)
#' fit <- fit_zoid(formula = ~(1|fac), design_matrix = design, data_matrix = y, chains = 1, iter = 100)
#'
fit_zoid <- function(formula = NULL,
design_matrix,
data_matrix,
chains = 3,
iter = 2000,
warmup = floor(iter / 2),
overdispersion = FALSE,
overdispersion_sd = 5,
posterior_predict = FALSE,
moment_match = FALSE,
prior_sd = NA,
...) {
# if a single observation
if (class(data_matrix)[1] != "matrix") {
data_matrix <- matrix(data_matrix, nrow = 1)
}
# fill with dummy values
parsed_res <- list(design_matrix = matrix(0, nrow(data_matrix),ncol=1),
var_indx = 1,
n_re_by_group = 1,
tot_re = 1,
n_groups = 1)
est_re <- FALSE
re_group_names <- NA
if (!is.null(formula)) {
model_frame <- model.frame(formula, design_matrix)
model_matrix <- model.matrix(formula, model_frame)
# extract the random effects
res <- parse_re_formula(formula, design_matrix)
if(length(res$var_indx) > 0) {
parsed_res <- res # only update if REs are in formula
est_re <- TRUE
model_matrix <- res$fixed_design_matrix
re_group_names <- res$random_effect_group_names
}
} else {
model_matrix <- matrix(1, nrow = nrow(data_matrix))
colnames(model_matrix) <- "(Intercept)"
}
sd_prior <- 1 / ncol(data_matrix) # default if no covariates
if (ncol(model_matrix) > 1) sd_prior <- 1
if (!is.na(prior_sd)) sd_prior <- prior_sd
par_names <- colnames(model_matrix)
prod_idx <- matrix(0, ncol(data_matrix), ncol(data_matrix)-1)
for(j in 1:ncol(data_matrix)){
prod_idx[j,] <- seq(1,ncol(data_matrix),1)[-j]
}
stan_data <- list(
N_bins = ncol(data_matrix),
N_samples = nrow(data_matrix),
X = data_matrix,
prod_idx = prod_idx,
N_covar = ncol(model_matrix),
design_X = model_matrix,
overdisp = ifelse(overdispersion == TRUE, 1, 0),
overdispersion_sd = overdispersion_sd,
postpred = ifelse(posterior_predict == TRUE, 1, 0),
prior_sd = sd_prior,
design_Z = parsed_res$design_matrix, # design matrix for Z (random int)
re_var_indx = c(parsed_res$var_indx, 1), # index of the group for each re
n_re_by_group = c(parsed_res$n_re_by_group, 1), # number of random ints per group
tot_re = parsed_res$tot_re, # total number of random ints, across all groups
n_groups = parsed_res$n_group,
est_re = as.numeric(est_re)
)
pars <- c("beta", "log_lik", "mu")
if(est_re == TRUE) pars <- c(pars, "zeta", "zeta_sds")
if (overdispersion == TRUE) pars <- c(pars, "phi")
if (posterior_predict == TRUE) pars <- c(pars, "ynew")
if (moment_match == TRUE) pars <- c(pars, "phi_inv", "beta_raw", "p_zero", "p_one")
sampling_args <- list(
object = stanmodels$dirichregmod,
chains = chains,
iter = iter,
warmup = warmup,
pars = pars,
data = stan_data, ...
)
fit <- do.call(sampling, sampling_args)
prior <- NULL
if (overdispersion) {
prior <- abs(rcauchy(n = chains * (iter - warmup), location = 0, scale = overdispersion_sd))
}
return(list(
model = fit, par_names = par_names,
design_matrix = model_matrix,
data_matrix = data_matrix,
overdispersion = overdispersion,
overdispersion_prior = prior,
posterior_predict = posterior_predict,
stan_data = stan_data,
re_group_names = re_group_names
))
}
#' Fit a trinomial mixture model that optionally includes covariates to estimate
#' effects of factor or continuous variables on proportions.
#'
#' @param formula The model formula for the design matrix.
#' @param data The data matrix used to construct RE design matrix
#' @importFrom stats model.matrix as.formula
parse_re_formula <- function(formula, data) {
# Convert the formula to a character string
formula_str <- as.character(formula)
# Split the formula into parts based on '+' and '-' symbols
formula_parts <- unlist(strsplit(formula_str, split = "[-+]", perl = TRUE))
# Trim whitespace from each part
formula_parts <- trimws(formula_parts)
# Identify parts containing a bar '|'
random_effects <- grep("\\|", formula_parts, value = TRUE)
fixed_effects <- setdiff(formula_parts, random_effects)
# Create design matrix for fixed effects. Catch the cases where no fixed
# effects are included, or intercept-only models used
if (length(fixed_effects) > 1 || (length(fixed_effects) == 1 && fixed_effects != "~")) {
fixed_formula_str <- paste("~", paste(fixed_effects, collapse = "+"))
} else {
fixed_formula_str <- "~ 1" # Only intercept
}
fixed_design_matrix <- model.matrix(as.formula(fixed_formula_str), data)
random_effect_group_names <- sapply(random_effects, function(part) {
# Extract the part after the '|'
split_part <- strsplit(part, "\\|", perl = TRUE)[[1]]
# Remove the closing parenthesis and trim
group_name <- gsub("\\)", "", split_part[2])
trimws(group_name)
})
# create design matrices by group
for(i in 1:length(random_effects)) {
new_formula <- as.formula(paste("~", random_effect_group_names[i], "-1"))
if(i ==1) {
design_matrix <- model.matrix(new_formula, data)
var_indx <- rep(1, ncol(design_matrix))
n_re <- length(var_indx)
} else {
design_matrix <- cbind(design_matrix, model.matrix(new_formula, data))
var_indx <- c(var_indx, rep(i, ncol(design_matrix)))
n_re <- c(n_re, length(ncol(design_matrix)))
}
}
n_groups <- 0
if(length(var_indx) > 0) n_groups <- max(var_indx)
return(list(design_matrix = design_matrix, var_indx = var_indx, n_re_by_group = n_re,
tot_re = sum(n_re), n_groups = n_groups,
fixed_design_matrix = fixed_design_matrix,
random_effect_group_names = random_effect_group_names))
}
| /scratch/gouwar.j/cran-all/cranData/zoid/R/fitting.R |
#' Extract estimates of predicted latent proportions.
#'
#' Extract point estimates of compositions from fitted model.
#'
#' @param fitted_model The fitted model returned as an rstan object from the call to zoid
#' @param conf_int Parameter controlling confidence intervals calculated, defaults to 0.05
#' for 95% intervals
#' @export
#' @return A list containing the posterior summaries of estimated parameters, with
#' element `mu` (the predicted values in normal space). For predictions
#' in transformed space, or overdispersion, see [get_pars()]
#' @importFrom rstan extract
#' @importFrom stats median quantile
#'
#' @examples
#' \donttest{
#' y <- matrix(c(3.77, 6.63, 2.60, 0.9, 1.44, 0.66, 2.10, 3.57, 1.33),
#' nrow = 3, byrow = TRUE
#' )
#' # fit a model with no covariates
#' fit <- fit_zoid(data_matrix = y)
#' p_hat <- get_fitted(fit)
#' }
#'
get_fitted <- function(fitted_model, conf_int = 0.05) {
if ("model" %in% names(fitted_model) == FALSE & class(fitted_model$model)[1] != "stanfit") {
stop("Error: input isn't an stanfit object")
}
pars <- rstan::extract(fitted_model$model)
n_obs <- dim(pars$mu)[2]
n_group <- dim(pars$mu)[3]
df <- expand.grid(
"obs" = seq(1, n_obs),
"group" = seq(1, n_group)
)
for (i in 1:nrow(df)) {
df$mean[i] <- mean(pars$mu[, df$obs[i], df$group[i]])
df$median[i] <- median(pars$mu[, df$obs[i], df$group[i]])
df$lo[i] <- quantile(pars$mu[, df$obs[i], df$group[i]], conf_int / 2.0)
df$hi[i] <- quantile(pars$mu[, df$obs[i], df$group[i]], 1 - conf_int / 2.0)
}
return(df)
}
| /scratch/gouwar.j/cran-all/cranData/zoid/R/get_fitted.R |
#' Extract parameters from fitted model.
#'
#' Extract estimated parameters from fitted model.
#'
#' @param fitted_model The fitted model returned as an rstan object from the call to zoid
#' @param conf_int Parameter controlling confidence intervals calculated, defaults to 0.05
#' for 95% intervals
#' @export
#' @return A list containing the posterior summaries of estimated parameters. At minimum,
#' this will include `p` (the estimated proportions) and `betas` (the predicted values in
#' transformed space). For models with overdispersion, an extra
#' element `phi` will also be returned, summarizing overdispersion. For models with random
#' intercepts, estimates of the group level effects will also be returned as `zetas` (again,
#' in transformed space). For predictions
#' in normal space, see [get_fitted()]
#' @importFrom rstan extract
#' @importFrom stats median quantile
#'
#' @examples
#' \donttest{
#' y <- matrix(c(3.77, 6.63, 2.60, 0.9, 1.44, 0.66, 2.10, 3.57, 1.33),
#' nrow = 3, byrow = TRUE
#' )
#' # fit a model with no covariates
#' fit <- fit_zoid(data_matrix = y)
#' p_hat <- get_pars(fit)
#' }
#'
get_pars <- function(fitted_model, conf_int = 0.05) {
if ("model" %in% names(fitted_model) == FALSE & class(fitted_model$model)[1] != "stanfit") {
stop("Error: input isn't an stanfit object")
}
p <- get_fitted(fitted_model, conf_int = conf_int)
# add on other parameters
pars <- rstan::extract(fitted_model$model)
n_group <- dim(pars$beta)[2]
n_cov <- dim(pars$beta)[3]
betas <- expand.grid(
"m" = seq(1, n_group),
"cov" = seq(1, n_cov),
"par" = NA,
"mean" = NA,
"median" = NA,
"lo" = NA,
"hi" = NA
)
for (i in 1:nrow(betas)) {
betas$mean[i] <- mean(pars$beta[, betas$m[i], betas$cov[i]])
betas$median[i] <- median(pars$beta[, betas$m[i], betas$cov[i]])
betas$lo[i] <- quantile(pars$beta[, betas$m[i], betas$cov[i]], conf_int / 2.0)
betas$hi[i] <- quantile(pars$beta[, betas$m[i], betas$cov[i]], 1 - conf_int / 2.0)
betas$par[i] <- fitted_model$par_names[betas$cov[i]]
}
par_list <- list(p = p, betas = betas)
if (fitted_model$overdispersion == TRUE) {
phi <- data.frame(
"mean" = mean(pars$phi),
"median" = median(pars$phi),
"lo" = quantile(pars$phi, conf_int / 2.0),
"hi" = quantile(pars$phi, 1 - conf_int / 2.0)
)
par_list$phi <- phi
}
# include zetas (random group intercepts)
if (fitted_model$stan_data$est_re == 1) {
m <- dim(pars$zeta)[2]
group <- dim(pars$zeta)[3]
zetas <- expand.grid(
"m" = seq(1, m),
"group" = seq(1, group),
"par" = NA,
"mean" = NA,
"median" = NA,
"lo" = NA,
"hi" = NA
)
for (i in 1:nrow(zetas)) {
zetas$mean[i] <- mean(pars$zeta[, zetas$m[i], zetas$group[i]])
zetas$median[i] <- median(pars$zeta[, zetas$m[i], zetas$group[i]])
zetas$lo[i] <- quantile(pars$zeta[, zetas$m[i], zetas$group[i]], conf_int / 2.0)
zetas$hi[i] <- quantile(pars$zeta[, zetas$m[i], zetas$group[i]], 1 - conf_int / 2.0)
zetas$par[i] <- fitted_model$par_names[zetas$group[i]]
}
# add group names
for(i in 1:fitted_model$stan_data$n_groups) {
if(i==1) {
ids <- rep(i,fitted_model$stan_data$n_re_by_group[i])
} else {
ids <- c(ids, rep(i,fitted_model$stan_data$n_re_by_group[i]))
}
}
df <- data.frame("group" = 1:max(zetas$group), "group_name" = fitted_model$re_group_names[ids])
zetas$group_name <- ""
for(i in 1:nrow(zetas)) zetas$group_name[i] <- df$group_name[which(df$group == zetas$group[i])]
zetas <- zetas[,c("m","group","group_name","par","mean","median","lo","hi")]
par_list$zetas <- zetas
}
return(par_list)
}
| /scratch/gouwar.j/cran-all/cranData/zoid/R/get_pars.R |
# Generated by rstantools. Do not edit by hand.
# names of stan models
stanmodels <- c("dirichregmod")
# load each stan module
Rcpp::loadModule("stan_fit4dirichregmod_mod", what = TRUE)
# instantiate each stanmodel object
stanmodels <- sapply(stanmodels, function(model_name) {
# create C++ code for stan model
stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan")
stan_file <- file.path(stan_file, paste0(model_name, ".stan"))
stanfit <- rstan::stanc_builder(stan_file,
allow_undefined = TRUE,
obfuscate_model_name = FALSE)
stanfit$model_cpp <- list(model_cppname = stanfit$model_name,
model_cppcode = stanfit$cppcode)
# create stanmodel object
methods::new(Class = "stanmodel",
model_name = stanfit$model_name,
model_code = stanfit$model_code,
model_cpp = stanfit$model_cpp,
mk_cppmodule = function(x) get(paste0("rstantools_model_", model_name)))
})
| /scratch/gouwar.j/cran-all/cranData/zoid/R/stanmodels.R |
#' The 'zoid' package.
#'
#' @description A DESCRIPTION OF THE PACKAGE
#'
#' @docType package
#' @name zoid-package
#' @aliases trinomix
#' @useDynLib zoid, .registration = TRUE
#' @import methods
#' @import Rcpp
#' @importFrom rstan sampling
#'
#' @references
#' Stan Development Team (2020). RStan: the R interface to Stan. R package version 2.21.2. https://mc-stan.org
#'
NULL
| /scratch/gouwar.j/cran-all/cranData/zoid/R/zoid-package.R |
## ----set-knitr-options, cache=FALSE, echo=FALSE-------------------------------
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
build = "cran"
if(build=="cran") {
mcmc_iter = 50
} else {
mcmc_iter = 5000
}
## ----message=FALSE, warning=FALSE---------------------------------------------
library(zoid)
## -----------------------------------------------------------------------------
data(coddiet)
## -----------------------------------------------------------------------------
design_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==TRUE]
data_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==FALSE]
## ----results="hide", eval=FALSE-----------------------------------------------
# design_matrix$Season = as.factor(design_matrix$Season)
# design_matrix$Year = as.factor(design_matrix$Year)
# design_matrix$y = 1 # dummy variable
#
# set.seed(123)
# fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
# overdispersion = TRUE,
# chains=4,
# iter=4000)
#
# fit_2 <- fit_zoid(formula = y ~ Season,
# design_matrix = design_matrix,
# data_matrix = as.matrix(data_matrix),
# overdispersion = TRUE,
# chains=4,
# iter=4000)
#
# fit_3 <- fit_zoid(formula = y ~ Year,
# design_matrix = design_matrix,
# data_matrix = as.matrix(data_matrix),
# overdispersion = TRUE,
# chains=4,
# iter=4000)
#
# fit_4 <- fit_zoid(formula = y ~ Year + Season,
# design_matrix = design_matrix,
# data_matrix = as.matrix(data_matrix),
# overdispersion = TRUE,
# chains=4,
# iter=4000)
## ----eval=FALSE---------------------------------------------------------------
# loo_1 = loo::loo(fit_1$model)
# loo_2 = loo::loo(fit_2$model)
# loo_3 = loo::loo(fit_3$model)
# loo_4 = loo::loo(fit_4$model)
## ----warning=FALSE, message=FALSE---------------------------------------------
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
chains=1,
iter=mcmc_iter,
overdispersion = TRUE, refresh=0)
fitted_vals = get_fitted(fit_1)
head(fitted_vals)
## -----------------------------------------------------------------------------
fitted_vals = get_pars(fit_1)
head(fitted_vals$betas)
## ----eval=FALSE---------------------------------------------------------------
# formula <- ~ (1|group)
# formula <- ~ (1|group) + (1|year) + (1|season)
| /scratch/gouwar.j/cran-all/cranData/zoid/inst/doc/a01_fitting.R |
---
title: "Fitting models with zoid"
author: ""
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Fitting models with zoid}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r set-knitr-options, cache=FALSE, echo=FALSE}
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
build = "cran"
if(build=="cran") {
mcmc_iter = 50
} else {
mcmc_iter = 5000
}
```
Let's load the necessary packages:
```{r, message=FALSE, warning=FALSE}
library(zoid)
```
## Fish stomach contents example
Here, we will use a dataset of cod stomachs on the Faroe Bank, published in
*Magnussen, E. 2011. Food and feeding habits of cod (Gadus morhua) on the Faroe Bank. – ICES Journal of Marine Science, 68: 1909–1917.*
The data are also included with our package, and represent a dataframe with observations on rows (stratified by year and season) and prey items across columns.
```{r}
data(coddiet)
```
We need to split the dataset into 2 matrices, one representing the design matrix for the observations ('Year' and 'Season') and the other representing the data matrix of observed biomass per sample - prey item. If all rows are considered replicate observations, there's no need to create a design matrix - but we can test for the effects of Season and Year.
```{r}
design_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==TRUE]
data_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==FALSE]
```
### Overdispersion or not?
One optional feature of the model is to include overdispersion in the calculations of the 3 probabilities for each cell. Including overdispersion is generally only advised with replicated data or shared information -- and may fit heterogeneous datasets with lots of 0s better than the model without overdispersion. For the cod diet data, we'll include overdispersion because of poor MCMC sampling without it.
### Model selection
Next, we can test some hypotheses about how the data are structured. We'll create the following models (1) a model with all observations as replicate samples, (2) a model with only seasonal effects, (3) a model with only differences by year, and (4) a model with both year and season. Both year and season are treated as factors here -- but continuous covariates can also be included.
Note that for fitting, the data_matrix should be a matrix, but the design_matrix should be a data frame.
```{r results="hide", eval=FALSE}
design_matrix$Season = as.factor(design_matrix$Season)
design_matrix$Year = as.factor(design_matrix$Year)
design_matrix$y = 1 # dummy variable
set.seed(123)
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
chains=4,
iter=4000)
fit_2 <- fit_zoid(formula = y ~ Season,
design_matrix = design_matrix,
data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
chains=4,
iter=4000)
fit_3 <- fit_zoid(formula = y ~ Year,
design_matrix = design_matrix,
data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
chains=4,
iter=4000)
fit_4 <- fit_zoid(formula = y ~ Year + Season,
design_matrix = design_matrix,
data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
chains=4,
iter=4000)
```
To compare models, we could use criteria like LOOIC in the loo package -- this is accessible by calling
```{r eval=FALSE}
loo_1 = loo::loo(fit_1$model)
loo_2 = loo::loo(fit_2$model)
loo_3 = loo::loo(fit_3$model)
loo_4 = loo::loo(fit_4$model)
```
For our example, the LOOIC from the model with Season and Year is lowest (2584.9) indicating the most support over the base model (2879.7), model with Season only (2892.4), and model with Year only (2637.2). Two words of caution for this application are (1) the standard errors of the LOOIC estimates are all in the 120-160 range, so many of the models are within +/- 1 SE of each other and (2) the Pareto-k diagnostic values fall into the 'bad' category for about 20% of the data points. There's a couple solutions for this, including more MCMC sampling, and using PSIS smooth sampling
### Summarizing estimates
We include several helper functions for processing output into more manageable data frames. First, we can extract the predicted point estimates (and uncertainty intervals) around proportions,
```{r warning=FALSE, message=FALSE}
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
chains=1,
iter=mcmc_iter,
overdispersion = TRUE, refresh=0)
fitted_vals = get_fitted(fit_1)
head(fitted_vals)
```
Second, we can return all parameters (including betas for coefficients and $\phi$, the overdispersion term)
```{r}
fitted_vals = get_pars(fit_1)
head(fitted_vals$betas)
```
### Random effects
For models with grouping variables or factor predictor variables, either fixed or random effects may be included. Both need to be included via the formula interface, and random effects may be included in the typical form (`lme4`, `glmmTMB`),
```{r eval=FALSE}
formula <- ~ (1|group)
formula <- ~ (1|group) + (1|year) + (1|season)
```
For now, only random intercepts are allowed; random slopes will be included eventually.
| /scratch/gouwar.j/cran-all/cranData/zoid/inst/doc/a01_fitting.Rmd |
## ----set-knitr-options, cache=FALSE, echo=FALSE-------------------------------
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
## ----message=FALSE, warning=FALSE---------------------------------------------
library(zoid)
## -----------------------------------------------------------------------------
y = broken_stick(n_obs = 10,
n_groups = 10,
tot_n = 100)
## ----eval=FALSE---------------------------------------------------------------
# y$p
# y$X_obs
## -----------------------------------------------------------------------------
p = gtools::rdirichlet(1, alpha = rep(2,10))
y = broken_stick(n_obs = 10,
n_groups = 10,
tot_n = 100,
p = p)
| /scratch/gouwar.j/cran-all/cranData/zoid/inst/doc/a02_simulating.R |
---
title: "Simulating data"
author: ""
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Simulating data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r set-knitr-options, cache=FALSE, echo=FALSE}
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
```
Let's load the necessary packages:
```{r, message=FALSE, warning=FALSE}
library(zoid)
```
## Simulating data
We will use the "broken stick" approach to simulate data from the Dirichlet - trinomial model. This model assumes that the group proportions for each observation are Dirichlet, but the observed values are either 0, the total sample size (N) or a number between 0 and N.
Our `broken_stick` function can be called as follows,
```{r}
y = broken_stick(n_obs = 10,
n_groups = 10,
tot_n = 100)
```
The object `y` is a list with 2 elements, (1) the true underlying compositions (p) and the realized data (X_obs). They can be accessed as
```{r eval=FALSE}
y$p
y$X_obs
```
By default, the simulation function assumes a uniform prior for the Dirichlet, with hyperparameters = 1. We can change this by specifying our own values of hyperparameters. Using the argument `p`, we can simulate new values with a slightly larger effective sample size, and pass that into `broken_stick`
```{r}
p = gtools::rdirichlet(1, alpha = rep(2,10))
y = broken_stick(n_obs = 10,
n_groups = 10,
tot_n = 100,
p = p)
```
| /scratch/gouwar.j/cran-all/cranData/zoid/inst/doc/a02_simulating.Rmd |
## ----set-knitr-options, cache=FALSE, echo=FALSE, message=FALSE, warning=FALSE----
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
build = "cran"
if(build=="cran") {
draws=100
iter = 3
} else {
draws=1000
iter = 10
}
## ----message=FALSE, warning=FALSE---------------------------------------------
library(zoid)
## ----eval=FALSE---------------------------------------------------------------
# fit <- fit_zoid(data, prior_sd = 2)
## -----------------------------------------------------------------------------
set.seed(123)
sd = fit_prior(n_bins = 8, n_draws = draws, target = 1, iterations=iter)
## ----eval=FALSE---------------------------------------------------------------
# fit <- fit_zoid(data, prior_sd = 1.2)
| /scratch/gouwar.j/cran-all/cranData/zoid/inst/doc/a03_beta_priors.R |
---
title: "Priors for compositions"
author: ""
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Priors for compositions}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r set-knitr-options, cache=FALSE, echo=FALSE, message=FALSE, warning=FALSE}
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
build = "cran"
if(build=="cran") {
draws=100
iter = 3
} else {
draws=1000
iter = 10
}
```
Let's load the necessary packages:
```{r, message=FALSE, warning=FALSE}
library(zoid)
```
This vignette is designed to illustrate how to build priors for the estimated compositions, $\textbf{p}$. When covariates are included in the design matrix, all elements of $\textbf{p}$ in the Dirichlet regression (both intercepts and slopes or offsets) are assigned $\sim N(0,1)$ priors. This approach is similar to the improper priors used in `brms::brm()`. These priors may be weakly informative, and the user may wish to change the standard deviation -- which can be done with the `prior_sd` argument in `fit_zoid()`.
```{r eval=FALSE}
fit <- fit_zoid(data, prior_sd = 2)
```
## Dirichlet priors
A more familiar approach may be to work with Dirichlet priors. We can adjust the standard deviation in our Normal priors to match the Dirichlet. The helper function for this uses `optim` to minimize the RMSPE between the observed and target values. For example, if we had 8 bins and wanted to find the Dirichlet prior that would correspond to hyperparamters $(\alpha)=1$, we could call the `fit_prior` function.
```{r}
set.seed(123)
sd = fit_prior(n_bins = 8, n_draws = draws, target = 1, iterations=iter)
```
The `sd` object is a list that contains (1) the estimated standard deviation, (2) the value of the objective function at convergence, and (3) whether or not convergence occurred (anything other than 0 is problematic). The value of the standard deviation here in `sd$sd` is 1.200453.
So in this case, a standard deviation of ~ 1.20 yields a prior equivalent to a $\sim Dirichlet(1)$ prior. This new value can then be entered into our model with the `prior_sd` argument,
```{r eval=FALSE}
fit <- fit_zoid(data, prior_sd = 1.2)
```
| /scratch/gouwar.j/cran-all/cranData/zoid/inst/doc/a03_beta_priors.Rmd |
## ----set-knitr-options, cache=FALSE, echo=FALSE, message=FALSE, warning=FALSE----
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
build = "cran"
if(build=="cran") {
length_out = 10
mcmc_iter = 50
} else {
length_out = 4
mcmc_iter = 5000
}
## ----message=FALSE, warning=FALSE---------------------------------------------
library(zoid)
## -----------------------------------------------------------------------------
data(coddiet)
data_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==FALSE]
## ----results="hide"-----------------------------------------------------------
set.seed(123)
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = 5,
chains=1,
iter=mcmc_iter, refresh=0)
## -----------------------------------------------------------------------------
prior = data.frame("value" = fit_1$overdispersion_prior,
"dist"="prior")
post = data.frame("value" = rstan::extract(fit_1$model,"phi")$phi,
"dist"="post")
hist(log(fit_1$overdispersion_prior), breaks=seq(-20,20,length.out=100), col=rgb(0,0,1,1/4), xlim=c(-10,10),ylim=c(0,1000), main="Posterior (pink) and prior (blue)", xlab=expression(phi))
hist(log(rstan::extract(fit_1$model,"phi")$phi),breaks=seq(-20,20,length.out=100), col=rgb(1,0,0,1/4), add=T)
## ----results="hide"-----------------------------------------------------------
df = data.frame("sd"=exp(seq(log(0.001),log(0.1),length.out=length_out)),overlap=0)
for(i in 1:nrow(df)) {
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = df$sd[i],
chains=1,
iter=mcmc_iter, refresh=0)
df$overlap[i] = length(which(fit_1$overdispersion_prior < max(rstan::extract(fit_1$model,"phi")$phi))) / length(fit_1$overdispersion_prior)
}
## -----------------------------------------------------------------------------
plot(df$sd,df$overlap, xlab="Prior SD", ylab="% Overlap",main="Data units: grams",type="b")
## -----------------------------------------------------------------------------
data_matrix = data_matrix / 1000
## ----results="hide"-----------------------------------------------------------
df = data.frame("sd"=exp(seq(log(0.001),log(20),length.out=length_out)),overlap=0)
for(i in 1:nrow(df)) {
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = df$sd[i],
chains=1,
iter=mcmc_iter, refresh=0)
df$overlap[i] = length(which(fit_1$overdispersion_prior < max(rstan::extract(fit_1$model,"phi")$phi))) / length(fit_1$overdispersion_prior)
}
## -----------------------------------------------------------------------------
plot(df$sd,df$overlap, xlab="Prior SD", ylab="% Overlap",main="Data units: kilograms",type="b")
## ----results="hide"-----------------------------------------------------------
data("coddiet")
data_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==FALSE]
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = 5,
chains=1,
iter=mcmc_iter, refresh=0)
fit_2 <- fit_zoid(data_matrix = as.matrix(data_matrix)/1000,
overdispersion = TRUE,
overdispersion_sd = 5,
chains=1,
iter=mcmc_iter, refresh=0)
## -----------------------------------------------------------------------------
pars_g = get_fitted(fit_1)
pars_kg = get_fitted(fit_2)
plot(pars_g$hi-pars_g$lo, pars_kg$hi-pars_kg$lo,main="",xlab="95% CI width (g)", ylab="95% CI width (kg)")
abline(0,1,col="red")
| /scratch/gouwar.j/cran-all/cranData/zoid/inst/doc/a04_priors.R |
---
title: "Prior sensitivity for overdispersion"
author: ""
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Prior sensitivity for overdispersion}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r set-knitr-options, cache=FALSE, echo=FALSE, message=FALSE, warning=FALSE}
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
build = "cran"
if(build=="cran") {
length_out = 10
mcmc_iter = 50
} else {
length_out = 4
mcmc_iter = 5000
}
```
Let's load the necessary packages:
```{r, message=FALSE, warning=FALSE}
library(zoid)
```
This vignette is designed to illustrate the interaction between the units that responses are measured on, and the scale of the prior distribution on overdispersion, $\phi$.
## Fish stomach contents example
We will demonstrate this sensitivity with the cod stomach contents data included with the package. The original units reported are in grams (so numbers in each cell are up to several thousand).
```{r}
data(coddiet)
data_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==FALSE]
```
We'll ignore the important covariates (year, season) and just treat all observations (rows) as replicates.
The overdispersion parameter $\phi$ is assigned a prior so that $1/\phi \sim Cauchy(0,\sigma)$ where $\sigma$ represents the scale parameter. Because $\phi$ has to be positive, this prior is also assigned a lower bound of 0 (half-Cauchy).
Using our cod diet data, we can fit the model with several different values of the prior scale, and look at the prior versus posterior draws for $\phi$. First, we'll use the default prior of $\sigma=5$.
```{r results="hide"}
set.seed(123)
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = 5,
chains=1,
iter=mcmc_iter, refresh=0)
```
Now we can compare the prior and posterior distributions; because these are skewed, we'll show them in log-space.
```{r}
prior = data.frame("value" = fit_1$overdispersion_prior,
"dist"="prior")
post = data.frame("value" = rstan::extract(fit_1$model,"phi")$phi,
"dist"="post")
hist(log(fit_1$overdispersion_prior), breaks=seq(-20,20,length.out=100), col=rgb(0,0,1,1/4), xlim=c(-10,10),ylim=c(0,1000), main="Posterior (pink) and prior (blue)", xlab=expression(phi))
hist(log(rstan::extract(fit_1$model,"phi")$phi),breaks=seq(-20,20,length.out=100), col=rgb(1,0,0,1/4), add=T)
```
Next we can try re-fitting the model with a lot more informative (smaller) value of $\sigma$. We can calculate the percent overlap across each iteration to quantify similarity between prior and posterior.
```{r results="hide"}
df = data.frame("sd"=exp(seq(log(0.001),log(0.1),length.out=length_out)),overlap=0)
for(i in 1:nrow(df)) {
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = df$sd[i],
chains=1,
iter=mcmc_iter, refresh=0)
df$overlap[i] = length(which(fit_1$overdispersion_prior < max(rstan::extract(fit_1$model,"phi")$phi))) / length(fit_1$overdispersion_prior)
}
```
```{r}
plot(df$sd,df$overlap, xlab="Prior SD", ylab="% Overlap",main="Data units: grams",type="b")
```
As a sensitivity, we will change the units of the original data from grams to kilograms.
```{r}
data_matrix = data_matrix / 1000
```
Now, we repeat the sensitivity as a function of the prior scale $\sigma$.
```{r results="hide"}
df = data.frame("sd"=exp(seq(log(0.001),log(20),length.out=length_out)),overlap=0)
for(i in 1:nrow(df)) {
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = df$sd[i],
chains=1,
iter=mcmc_iter, refresh=0)
df$overlap[i] = length(which(fit_1$overdispersion_prior < max(rstan::extract(fit_1$model,"phi")$phi))) / length(fit_1$overdispersion_prior)
}
```
```{r}
plot(df$sd,df$overlap, xlab="Prior SD", ylab="% Overlap",main="Data units: kilograms",type="b")
```
## Does scale of the data impact precision of the posterior estimates?
To answer whether rescaling the data has any impact on parameters other than overdispersion, we can compare the model fit in grams to that fit with kg. We find no differences between the point estimates of proportions or uncertainty intervals.
```{r results="hide"}
data("coddiet")
data_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==FALSE]
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = 5,
chains=1,
iter=mcmc_iter, refresh=0)
fit_2 <- fit_zoid(data_matrix = as.matrix(data_matrix)/1000,
overdispersion = TRUE,
overdispersion_sd = 5,
chains=1,
iter=mcmc_iter, refresh=0)
```
```{r}
pars_g = get_fitted(fit_1)
pars_kg = get_fitted(fit_2)
plot(pars_g$hi-pars_g$lo, pars_kg$hi-pars_kg$lo,main="",xlab="95% CI width (g)", ylab="95% CI width (kg)")
abline(0,1,col="red")
```
| /scratch/gouwar.j/cran-all/cranData/zoid/inst/doc/a04_priors.Rmd |
---
title: "Fitting models with zoid"
author: ""
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Fitting models with zoid}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r set-knitr-options, cache=FALSE, echo=FALSE}
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
build = "cran"
if(build=="cran") {
mcmc_iter = 50
} else {
mcmc_iter = 5000
}
```
Let's load the necessary packages:
```{r, message=FALSE, warning=FALSE}
library(zoid)
```
## Fish stomach contents example
Here, we will use a dataset of cod stomachs on the Faroe Bank, published in
*Magnussen, E. 2011. Food and feeding habits of cod (Gadus morhua) on the Faroe Bank. – ICES Journal of Marine Science, 68: 1909–1917.*
The data are also included with our package, and represent a dataframe with observations on rows (stratified by year and season) and prey items across columns.
```{r}
data(coddiet)
```
We need to split the dataset into 2 matrices, one representing the design matrix for the observations ('Year' and 'Season') and the other representing the data matrix of observed biomass per sample - prey item. If all rows are considered replicate observations, there's no need to create a design matrix - but we can test for the effects of Season and Year.
```{r}
design_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==TRUE]
data_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==FALSE]
```
### Overdispersion or not?
One optional feature of the model is to include overdispersion in the calculations of the 3 probabilities for each cell. Including overdispersion is generally only advised with replicated data or shared information -- and may fit heterogeneous datasets with lots of 0s better than the model without overdispersion. For the cod diet data, we'll include overdispersion because of poor MCMC sampling without it.
### Model selection
Next, we can test some hypotheses about how the data are structured. We'll create the following models (1) a model with all observations as replicate samples, (2) a model with only seasonal effects, (3) a model with only differences by year, and (4) a model with both year and season. Both year and season are treated as factors here -- but continuous covariates can also be included.
Note that for fitting, the data_matrix should be a matrix, but the design_matrix should be a data frame.
```{r results="hide", eval=FALSE}
design_matrix$Season = as.factor(design_matrix$Season)
design_matrix$Year = as.factor(design_matrix$Year)
design_matrix$y = 1 # dummy variable
set.seed(123)
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
chains=4,
iter=4000)
fit_2 <- fit_zoid(formula = y ~ Season,
design_matrix = design_matrix,
data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
chains=4,
iter=4000)
fit_3 <- fit_zoid(formula = y ~ Year,
design_matrix = design_matrix,
data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
chains=4,
iter=4000)
fit_4 <- fit_zoid(formula = y ~ Year + Season,
design_matrix = design_matrix,
data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
chains=4,
iter=4000)
```
To compare models, we could use criteria like LOOIC in the loo package -- this is accessible by calling
```{r eval=FALSE}
loo_1 = loo::loo(fit_1$model)
loo_2 = loo::loo(fit_2$model)
loo_3 = loo::loo(fit_3$model)
loo_4 = loo::loo(fit_4$model)
```
For our example, the LOOIC from the model with Season and Year is lowest (2584.9) indicating the most support over the base model (2879.7), model with Season only (2892.4), and model with Year only (2637.2). Two words of caution for this application are (1) the standard errors of the LOOIC estimates are all in the 120-160 range, so many of the models are within +/- 1 SE of each other and (2) the Pareto-k diagnostic values fall into the 'bad' category for about 20% of the data points. There's a couple solutions for this, including more MCMC sampling, and using PSIS smooth sampling
### Summarizing estimates
We include several helper functions for processing output into more manageable data frames. First, we can extract the predicted point estimates (and uncertainty intervals) around proportions,
```{r warning=FALSE, message=FALSE}
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
chains=1,
iter=mcmc_iter,
overdispersion = TRUE, refresh=0)
fitted_vals = get_fitted(fit_1)
head(fitted_vals)
```
Second, we can return all parameters (including betas for coefficients and $\phi$, the overdispersion term)
```{r}
fitted_vals = get_pars(fit_1)
head(fitted_vals$betas)
```
### Random effects
For models with grouping variables or factor predictor variables, either fixed or random effects may be included. Both need to be included via the formula interface, and random effects may be included in the typical form (`lme4`, `glmmTMB`),
```{r eval=FALSE}
formula <- ~ (1|group)
formula <- ~ (1|group) + (1|year) + (1|season)
```
For now, only random intercepts are allowed; random slopes will be included eventually.
| /scratch/gouwar.j/cran-all/cranData/zoid/vignettes/a01_fitting.Rmd |
---
title: "Simulating data"
author: ""
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Simulating data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r set-knitr-options, cache=FALSE, echo=FALSE}
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
```
Let's load the necessary packages:
```{r, message=FALSE, warning=FALSE}
library(zoid)
```
## Simulating data
We will use the "broken stick" approach to simulate data from the Dirichlet - trinomial model. This model assumes that the group proportions for each observation are Dirichlet, but the observed values are either 0, the total sample size (N) or a number between 0 and N.
Our `broken_stick` function can be called as follows,
```{r}
y = broken_stick(n_obs = 10,
n_groups = 10,
tot_n = 100)
```
The object `y` is a list with 2 elements, (1) the true underlying compositions (p) and the realized data (X_obs). They can be accessed as
```{r eval=FALSE}
y$p
y$X_obs
```
By default, the simulation function assumes a uniform prior for the Dirichlet, with hyperparameters = 1. We can change this by specifying our own values of hyperparameters. Using the argument `p`, we can simulate new values with a slightly larger effective sample size, and pass that into `broken_stick`
```{r}
p = gtools::rdirichlet(1, alpha = rep(2,10))
y = broken_stick(n_obs = 10,
n_groups = 10,
tot_n = 100,
p = p)
```
| /scratch/gouwar.j/cran-all/cranData/zoid/vignettes/a02_simulating.Rmd |
---
title: "Priors for compositions"
author: ""
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Priors for compositions}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r set-knitr-options, cache=FALSE, echo=FALSE, message=FALSE, warning=FALSE}
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
build = "cran"
if(build=="cran") {
draws=100
iter = 3
} else {
draws=1000
iter = 10
}
```
Let's load the necessary packages:
```{r, message=FALSE, warning=FALSE}
library(zoid)
```
This vignette is designed to illustrate how to build priors for the estimated compositions, $\textbf{p}$. When covariates are included in the design matrix, all elements of $\textbf{p}$ in the Dirichlet regression (both intercepts and slopes or offsets) are assigned $\sim N(0,1)$ priors. This approach is similar to the improper priors used in `brms::brm()`. These priors may be weakly informative, and the user may wish to change the standard deviation -- which can be done with the `prior_sd` argument in `fit_zoid()`.
```{r eval=FALSE}
fit <- fit_zoid(data, prior_sd = 2)
```
## Dirichlet priors
A more familiar approach may be to work with Dirichlet priors. We can adjust the standard deviation in our Normal priors to match the Dirichlet. The helper function for this uses `optim` to minimize the RMSPE between the observed and target values. For example, if we had 8 bins and wanted to find the Dirichlet prior that would correspond to hyperparamters $(\alpha)=1$, we could call the `fit_prior` function.
```{r}
set.seed(123)
sd = fit_prior(n_bins = 8, n_draws = draws, target = 1, iterations=iter)
```
The `sd` object is a list that contains (1) the estimated standard deviation, (2) the value of the objective function at convergence, and (3) whether or not convergence occurred (anything other than 0 is problematic). The value of the standard deviation here in `sd$sd` is 1.200453.
So in this case, a standard deviation of ~ 1.20 yields a prior equivalent to a $\sim Dirichlet(1)$ prior. This new value can then be entered into our model with the `prior_sd` argument,
```{r eval=FALSE}
fit <- fit_zoid(data, prior_sd = 1.2)
```
| /scratch/gouwar.j/cran-all/cranData/zoid/vignettes/a03_beta_priors.Rmd |
---
title: "Prior sensitivity for overdispersion"
author: ""
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Prior sensitivity for overdispersion}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r set-knitr-options, cache=FALSE, echo=FALSE, message=FALSE, warning=FALSE}
library("knitr")
opts_chunk$set(message = FALSE, warning=FALSE, fig.width = 5.5)
build = "cran"
if(build=="cran") {
length_out = 10
mcmc_iter = 50
} else {
length_out = 4
mcmc_iter = 5000
}
```
Let's load the necessary packages:
```{r, message=FALSE, warning=FALSE}
library(zoid)
```
This vignette is designed to illustrate the interaction between the units that responses are measured on, and the scale of the prior distribution on overdispersion, $\phi$.
## Fish stomach contents example
We will demonstrate this sensitivity with the cod stomach contents data included with the package. The original units reported are in grams (so numbers in each cell are up to several thousand).
```{r}
data(coddiet)
data_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==FALSE]
```
We'll ignore the important covariates (year, season) and just treat all observations (rows) as replicates.
The overdispersion parameter $\phi$ is assigned a prior so that $1/\phi \sim Cauchy(0,\sigma)$ where $\sigma$ represents the scale parameter. Because $\phi$ has to be positive, this prior is also assigned a lower bound of 0 (half-Cauchy).
Using our cod diet data, we can fit the model with several different values of the prior scale, and look at the prior versus posterior draws for $\phi$. First, we'll use the default prior of $\sigma=5$.
```{r results="hide"}
set.seed(123)
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = 5,
chains=1,
iter=mcmc_iter, refresh=0)
```
Now we can compare the prior and posterior distributions; because these are skewed, we'll show them in log-space.
```{r}
prior = data.frame("value" = fit_1$overdispersion_prior,
"dist"="prior")
post = data.frame("value" = rstan::extract(fit_1$model,"phi")$phi,
"dist"="post")
hist(log(fit_1$overdispersion_prior), breaks=seq(-20,20,length.out=100), col=rgb(0,0,1,1/4), xlim=c(-10,10),ylim=c(0,1000), main="Posterior (pink) and prior (blue)", xlab=expression(phi))
hist(log(rstan::extract(fit_1$model,"phi")$phi),breaks=seq(-20,20,length.out=100), col=rgb(1,0,0,1/4), add=T)
```
Next we can try re-fitting the model with a lot more informative (smaller) value of $\sigma$. We can calculate the percent overlap across each iteration to quantify similarity between prior and posterior.
```{r results="hide"}
df = data.frame("sd"=exp(seq(log(0.001),log(0.1),length.out=length_out)),overlap=0)
for(i in 1:nrow(df)) {
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = df$sd[i],
chains=1,
iter=mcmc_iter, refresh=0)
df$overlap[i] = length(which(fit_1$overdispersion_prior < max(rstan::extract(fit_1$model,"phi")$phi))) / length(fit_1$overdispersion_prior)
}
```
```{r}
plot(df$sd,df$overlap, xlab="Prior SD", ylab="% Overlap",main="Data units: grams",type="b")
```
As a sensitivity, we will change the units of the original data from grams to kilograms.
```{r}
data_matrix = data_matrix / 1000
```
Now, we repeat the sensitivity as a function of the prior scale $\sigma$.
```{r results="hide"}
df = data.frame("sd"=exp(seq(log(0.001),log(20),length.out=length_out)),overlap=0)
for(i in 1:nrow(df)) {
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = df$sd[i],
chains=1,
iter=mcmc_iter, refresh=0)
df$overlap[i] = length(which(fit_1$overdispersion_prior < max(rstan::extract(fit_1$model,"phi")$phi))) / length(fit_1$overdispersion_prior)
}
```
```{r}
plot(df$sd,df$overlap, xlab="Prior SD", ylab="% Overlap",main="Data units: kilograms",type="b")
```
## Does scale of the data impact precision of the posterior estimates?
To answer whether rescaling the data has any impact on parameters other than overdispersion, we can compare the model fit in grams to that fit with kg. We find no differences between the point estimates of proportions or uncertainty intervals.
```{r results="hide"}
data("coddiet")
data_matrix = coddiet[,names(coddiet)%in%c("Year","Season")==FALSE]
fit_1 <- fit_zoid(data_matrix = as.matrix(data_matrix),
overdispersion = TRUE,
overdispersion_sd = 5,
chains=1,
iter=mcmc_iter, refresh=0)
fit_2 <- fit_zoid(data_matrix = as.matrix(data_matrix)/1000,
overdispersion = TRUE,
overdispersion_sd = 5,
chains=1,
iter=mcmc_iter, refresh=0)
```
```{r}
pars_g = get_fitted(fit_1)
pars_kg = get_fitted(fit_2)
plot(pars_g$hi-pars_g$lo, pars_kg$hi-pars_kg$lo,main="",xlab="95% CI width (g)", ylab="95% CI width (kg)")
abline(0,1,col="red")
```
| /scratch/gouwar.j/cran-all/cranData/zoid/vignettes/a04_priors.Rmd |
CDC_POINT_ROW_TYPE <- "Point" # incoming cdc.csv file row type
POINT_PREDICTION_CLASS <- "point" # JSON prediction class for point prediction elements
BIN_PREDICTION_CLASS <- "bin" # "" bin ""
#' Loads and converts a CDC CSV file to Zoltar's native `list` format
#'
#' @return cdc_csv_file's data as Zoltar's native `list` format, but only the "predictions" item, and not "meta"
#' @param season_start_year An integer specifying the "season" that cdc_csv_file is in. Used to convert EWs to
#' YYYY_MM_DD_DATE_FORMAT. zoltr uses week 30 as the season breakpoint, e.g. the "2016/2017 season" starts with
# EW30-2016 (EWs 30 through 52/53) and ends with EW29-2017 (EWs 01 through 29).
#' @param cdc_csv_file A CDC CSV file
#' @export
#' @examples \dontrun{
#' forecast_data <- forecast_data_from_cdc_csv_file(2016, "my_forecast.cdc.csv")
#' }
forecast_data_from_cdc_csv_file <- function(season_start_year, cdc_csv_file) {
cdc_data_frame <- utils::read.csv(cdc_csv_file, stringsAsFactors = FALSE) # "NA" -> NA
forecast_data_from_cdc_data_frame(season_start_year, cdc_data_frame)
}
#
# Recall the seven cdc-project.json targets and their types:
# -------------------------+-------------------------------+-----------+-----------+---------------------
# Target name | target_type | unit | data_type | step_ahead_increment
# -------------------------+-------------------------------+-----------+-----------+---------------------
# "Season onset" | Target.NOMINAL_TARGET_TYPE | n/a (week)| date | n/a
# "Season peak week" | Target.DATE_TARGET_TYPE | "week" | text | n/a
# "Season peak percentage" | Target.CONTINUOUS_TARGET_TYPE | "percent" | float | n/a
# "1 wk ahead" | Target.CONTINUOUS_TARGET_TYPE | "percent" | float | 1
# "2 wk ahead" | "" | "" | "" | 2
# "3 wk ahead" | "" | "" | "" | 3
# "4 wk ahead" | "" | "" | "" | 4
# -------------------------+-------------------------------+-----------+-----------+---------------------
#
# Note that the "Season onset" target is nominal and not date. This is due to how the CDC decided to represent the
# case when predicting no season onset, i.e., the threshold is not exceeded. This is done via a "none" bin where
# both Bin_start_incl and Bin_end_notincl are the strings "none" and not an EW week number. Thus, we have to store
# all bin starts as strings and not dates.
#
#' `forecast_data_from_cdc_csv_file()`helper
#'
#' @return same as `forecast_data_from_cdc_csv_file()`
#' @param season_start_year as passed to `forecast_data_from_cdc_csv_file()`
#' @param cdc_data_frame ""
#' @importFrom rlang .data
forecast_data_from_cdc_data_frame <- function(season_start_year, cdc_data_frame) { # testable internal function that does the work
names(cdc_data_frame) <- sapply(names(cdc_data_frame), tolower)
# validate cdc_data_frame
if (!(inherits(cdc_data_frame, "data.frame"))) {
stop("cdc_data_frame was not a `data.frame`", call. = FALSE)
}
if ((length(cdc_data_frame) == 0) || !all(names(cdc_data_frame) != c("Location", "Target", "Type", "Unit",
"Bin_start_incl", "Bin_end_notincl", "Value"))) {
stop("cdc_data_frame did not have required columns", call. = FALSE)
}
predictions <- list()
cdc_data_frame_grouped <- cdc_data_frame %>%
dplyr::group_by(.data$location, .data$target, .data$type) %>%
dplyr::group_data()
for (group_idx in seq_len(nrow(cdc_data_frame_grouped))) {
group_row <- cdc_data_frame_grouped[group_idx,] # group_row$location, group_row$target, group_row$type
if (!group_row$target %in% c("Season onset", "Season peak week", "Season peak percentage",
"1 wk ahead", "2 wk ahead", "3 wk ahead", "4 wk ahead")) {
stop(paste0("invalid target_name: '", group_row$target, "'"), call. = FALSE)
}
point_values <- list() # NB: should only be one point row, but collect all (but don't validate here)
bincat_cats <- list()
bincat_probs <- list()
for (group_rows_idx in seq_along(group_row$.rows[[1]])) {
cdc_data_frame_idx <- group_row$.rows[[1]][group_rows_idx]
# NB: cdc_row values could come in as numbers or strings, depending on the source csv file values
cdc_row <- cdc_data_frame[cdc_data_frame_idx,] # cdc_row$bin_start_incl, cdc_row$bin_end_notincl, cdc_row$value
if (group_row$type == CDC_POINT_ROW_TYPE) {
point_value <- process_csv_point_row(season_start_year, group_row$target, as.numeric(cdc_row$value))
point_values <- append(point_values, point_value)
} else { # bin row
# recall that the "Season onset" target is nominal and not date. This is due to how the CDC decided to represent
# the case when predicting no season onset, i.e., the threshold is not exceeded. This is done via a "none" bin
# where both Bin_start_incl and Bin_end_notincl are the strings "none" and not an EW week number. Thus we need
# to check for that case and replace with NAs, which is what process_csv_bin_row() expects
bin_start_incl <- if (cdc_row$bin_start_incl == "none") as.numeric(NA) else as.numeric(cdc_row$bin_start_incl)
bin_end_notincl <- if (cdc_row$bin_end_notincl == "none") as.numeric(NA) else as.numeric(cdc_row$bin_end_notincl)
bin_cat_and_prob <- process_csv_bin_row(season_start_year, group_row$target, as.numeric(cdc_row$value),
bin_start_incl, bin_end_notincl)
bincat_cats <- append(bincat_cats, bin_cat_and_prob[[1]])
bincat_probs <- append(bincat_probs, bin_cat_and_prob[[2]])
}
}
# add the actual prediction dicts
if (length(point_values) > 0) { # yes warning
if (length(point_values) > 1) {
stop(paste0("length(point_values) > 1: ", point_values), call. = FALSE)
}
point_value <- point_values[[1]]
prediction <- list("unit" = group_row$location, "target" = group_row$target, "class" = POINT_PREDICTION_CLASS,
"prediction" = list("value" = point_value))
predictions[[length(predictions) + 1]] <- prediction
}
if (length(bincat_cats) >= 1) { # yes warning: "NAs introduced by coercion"
prediction <- list("unit" = group_row$location, "target" = group_row$target, "class" = BIN_PREDICTION_CLASS,
"prediction" = list("cat" = bincat_cats, "prob" = bincat_probs))
predictions[[length(predictions) + 1]] <- prediction
}
}
list("predictions" = predictions)
}
process_csv_point_row <- function(season_start_year, target_name, value) {
# returns: point value for the args
if (target_name == 'Season onset') { # nominal target. value: None or an EW Monday date
if (is.na(value)) {
'none' # convert back from None to original 'none' input
} else { # value is an EW week number (float)
# note that value may be a fraction (e.g., 50.0012056690978, 4.96302456525203), so we round
# the EW number to get an int, but this could cause boundary issues where the value is
# invalid, either:
# 1) < 1 (so use last EW in season_start_year), or:
# 2) > the last EW in season_start_year (so use EW01 of season_start_year + 1)
ew_week <- round(value)
if (ew_week < 1) {
ew_week <- mmwr_weeks_in_year(season_start_year) # wrap back to previous EW
} else if (ew_week > mmwr_weeks_in_year(season_start_year)) {
ew_week <- 1
}
monday_date <- monday_date_from_ew_and_season_start_year(ew_week, season_start_year)
strftime(monday_date, YYYY_MM_DD_DATE_FORMAT)
}
} else if (is.na(value)) {
stop(paste0("None point values are only valid for 'Season onset' targets. target_name='", target_name, "'"),
call. = FALSE)
} else if (target_name == 'Season peak week') { # date target. value: an EW Monday date
# same 'wrapping' logic as above to handle rounding boundaries
ew_week <- round(value)
if (ew_week < 1) {
ew_week <- mmwr_weeks_in_year(season_start_year) # wrap back to previous EW
} else if (ew_week > mmwr_weeks_in_year(season_start_year)) {
ew_week <- 1
}
monday_date <- monday_date_from_ew_and_season_start_year(ew_week, season_start_year)
strftime(monday_date, YYYY_MM_DD_DATE_FORMAT)
} else { # 'Season peak percentage', '1 wk ahead', '2 wk ahead', '3 wk ahead', '4 wk ahead'
value
}
}
process_csv_bin_row <- function(season_start_year, target_name, value, bin_start_incl, bin_end_notincl) {
# returns: 2-tuple for the args: (bin_cat, bin_prob)
if (target_name == 'Season onset') { # nominal target. value: None or an EW Monday date
if (is.na(bin_start_incl) && is.na(bin_end_notincl)) { # "none" bin (probability of no onset)
list('none', value) # convert back from None to original 'none' input
} else if (!is.na(bin_start_incl) && !is.na(bin_end_notincl)) { # regular (non-"none") bin
monday_date <- monday_date_from_ew_and_season_start_year(bin_start_incl, season_start_year)
list(strftime(monday_date, YYYY_MM_DD_DATE_FORMAT), value)
} else {
stop(paste0("got 'Season onset' row but not both start and end were None. bin_start_incl=", bin_start_incl,
", bin_end_notincl=", bin_end_notincl),
call. = FALSE)
}
} else if (is.na(bin_start_incl) || is.na(bin_end_notincl)) {
stop(paste0("None bins are only valid for 'Season onset' targets. target_name='", target_name, "', ",
". bin_start_incl, bin_end_notincl: ", bin_start_incl, ", ", bin_end_notincl),
call. = FALSE)
} else if (target_name == 'Season peak week') { # date target. value: an EW Monday date
monday_date <- monday_date_from_ew_and_season_start_year(bin_start_incl, season_start_year)
list(strftime(monday_date, YYYY_MM_DD_DATE_FORMAT), value)
} else { # 'Season peak percentage', '1 wk ahead', '2 wk ahead', '3 wk ahead', '4 wk ahead'
list(bin_start_incl, value)
}
}
#
# ---- MMWR utils ----
#
YYYY_MM_DD_DATE_FORMAT <- '%Y-%m-%d' # e.g., '2017-01-17'
# This number is the internal reichlab standard: "We used week 30. I don't think this is a standardized concept outside
# of our lab though. We use separate concepts for a "season" and a "year". So, e.g. the "2016/2017 season" starts with
# EW30-2016 and ends with EW29-2017."
SEASON_START_EW_NUMBER <- 30
monday_date_from_ew_and_season_start_year <- function(ew_week, season_start_year) {
# :param ew_week: an epi week from within a cdc csv forecast file. e.g., 1, 30, 52
# :param season_start_year
# :return: a datetime.date that is the Monday of the EW corresponding to the args
if (ew_week < SEASON_START_EW_NUMBER) {
sunday_date <- MMWRweek::MMWRweek2Date(season_start_year + 1, ew_week)
} else {
sunday_date <- MMWRweek::MMWRweek2Date(season_start_year, ew_week)
}
sunday_date + 1 # add one day
}
mmwr_weeks_in_year <- function(year) {
# returns the number of epiweeks in a year. based on `pymmwr.epiweeks_in_year()` - https://github.com/reichlab/pymmwr/blob/b5ebdd88cc1e4d33548010e04b25ece4cb982b8e/pymmwr.py#L83
if (MMWRweek::MMWRweek(MMWRweek::MMWRweek2Date(year, 53))$MMWRyear == year) {
53
} else {
52
}
}
| /scratch/gouwar.j/cran-all/cranData/zoltr/R/cdc.R |
#
# ---- utility functions ----
#
url_for_projects <- function(zoltar_connection) {
paste0(zoltar_connection$host, '/api/projects/')
}
url_for_token_auth <- function(zoltar_connection) {
paste0(zoltar_connection$host, '/api-token-auth/')
}
add_auth_headers <- function(zoltar_connection) {
if (!inherits(zoltar_connection, "ZoltarConnection")) {
stop(paste0("zoltar_connection was not a ZoltarConnection: '", zoltar_connection, "'"), call. = FALSE)
}
if (inherits(zoltar_connection$session, "ZoltarSession")) {
httr::add_headers("Authorization" = paste0("JWT ", zoltar_connection$session$token))
}
}
#' Get JSON for a resource (URL). Authenticates if necessary
#'
#' @return A `list` that contiains JSON information for the passed URL
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param url A string of the resource's URL
get_resource <- function(zoltar_connection, url) {
re_authenticate_if_necessary(zoltar_connection)
message(paste0("get_resource(): GET: ", url))
response <- httr::GET(url = url, add_auth_headers(zoltar_connection))
httr::stop_for_status(response)
httr::content(response, as = "parsed", encoding = "UTF-8")
}
# deletes the resource at the passed URL
delete_resource <- function(zoltar_connection, url) {
re_authenticate_if_necessary(zoltar_connection)
message(paste0("delete_resource(): DELETE: ", url))
response <- httr::DELETE(url = url, add_auth_headers(zoltar_connection))
httr::stop_for_status(response)
}
#
# ---- ZoltarConnection class ----
#
#' Get a connection to a Zoltar host
#'
#' Returns a new connection object, which is the starting point for working with the Zoltar API. Once you have the
#' connection you can call \code{\link{zoltar_authenticate}} on it, and then call \code{\link{projects}} to get a list
#' of Project objects to start working with.
#'
#' A note on URLs: We require a trailing slash ('/') on all URLs. The only exception is the host arg passed to this
#' function. This convention matches Django REST framework one, which is what Zoltar is written in.
#'
#' @return A `ZoltarConnection` object
#' @param host The Zoltar site to connect to. Does *not* include a trailing slash ('/'). Defaults to \url{https://zoltardata.com}
#' @export
#' @examples \dontrun{
#' conn <- new_connection()
#' }
new_connection <- function(host = "https://zoltardata.com") {
self <- structure(environment(), class = "ZoltarConnection")
host <- host
username <- NULL
password <- NULL
session <- NULL
self
}
#' @export
print.ZoltarConnection <-
function(x, ...) {
cat(class(x), " '", x$host, "' ",
if (is.null(x$session)) "not authenticated"
else paste0("authenticated (exp=", token_expiration_date(x$session), " UTC)"),
"\n", sep = '')
}
#' Log in to a Zoltar host
#'
#' Returns a new `ZoltarConnection` object, which is the starting point for working with the Zoltar API.
#' Once you have the connection you can call zoltar_authenticate() on it, and call projects() to get a list of objects
#' to start working with.
#'
#' @return None
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}.
#' @param username Username for the account to use on the connection's host
#' @param password Password ""
#' @export
#' @examples \dontrun{
#' zoltar_authenticate(conn, "USERNAME", "PASSWORD")
#' }
zoltar_authenticate <- function(zoltar_connection, username, password) {
if (!inherits(zoltar_connection, "ZoltarConnection")) {
stop(paste0("zoltar_connection was not a ZoltarConnection: '", zoltar_connection, "'"), call. = FALSE)
}
zoltar_connection$username <- username
zoltar_connection$password <- password
zoltar_connection$session <- new_session(zoltar_connection)
}
re_authenticate_if_necessary <- function(zoltar_connection) {
if (!inherits(zoltar_connection, "ZoltarConnection")) {
stop(paste0("zoltar_connection was not a ZoltarConnection: '", zoltar_connection, "'"), call. = FALSE)
}
if (inherits(zoltar_connection$session, "ZoltarSession") && is_token_expired(zoltar_connection$session)) {
message(paste0("re-authenticating expired token '", zoltar_connection$host, "'"))
zoltar_authenticate(zoltar_connection, zoltar_connection$username, zoltar_connection$password)
}
}
#' Get information about all projects
#'
#' @return A `data.frame` of all projects' contents
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @export
#' @examples \dontrun{
#' the_projects <- projects(conn)
#' }
projects <- function(zoltar_connection) {
projects_json <- get_resource(zoltar_connection, url_for_projects(zoltar_connection))
id_column <- c() # integer
url_column <- c() # character
owner_url_column <- c() # ""
is_public_column <- c() # logical
name_column <- c() # character
description_column <- c() # ""
home_url_column <- c() # ""
time_interval_type_column <- c() # ""
visualization_y_label_column <- c() # ""
core_data_column <- c() # ""
for (project_json in projects_json) {
id_column <- append(id_column, project_json$id)
url_column <- append(url_column, project_json$url)
owner_url_value <- if (is.null(project_json$owner)) NA else project_json$owner
owner_url_column <- append(owner_url_column, owner_url_value)
is_public_column <- append(is_public_column, project_json$is_public)
name_column <- append(name_column, project_json$name)
description_column <- append(description_column, project_json$description)
home_url_column <- append(home_url_column, project_json$home_url)
time_interval_type_column <- append(time_interval_type_column, project_json$time_interval_type)
visualization_y_label_column <- append(visualization_y_label_column, project_json$visualization_y_label)
core_data_column <- append(core_data_column, project_json$core_data)
}
data.frame(id = id_column, url = url_column, owner_url = owner_url_column, public = is_public_column, name = name_column,
description = description_column, home_url = home_url_column,
time_interval_type = time_interval_type_column, visualization_y_label=visualization_y_label_column,
core_data = core_data_column, stringsAsFactors = FALSE)
}
#
# ZoltarSession class. used internally only
#
new_session <- function(zoltar_connection) {
self <- structure(environment(), class = "ZoltarSession")
zoltar_connection <- zoltar_connection
token <- get_token(self) # expects zoltar_connection
self
}
# POSTs to obtain and return a new JWT token string from zoltar. it has decoded contents that look like this:
# - header: {"typ": "JWT", "alg": "HS256"}
# - payload: {"user_id": 3, "username": "model_owner1", "exp": 1558442805, "email": ""}
get_token <- function(zoltar_session) {
zoltar_connection <- zoltar_session$zoltar_connection
token_auth_url <- url_for_token_auth(zoltar_connection)
message(paste0("get_token(): POST: ", token_auth_url))
response <-
httr::POST(
url = token_auth_url,
httr::accept_json(),
body = list(
username = zoltar_connection$username,
password = zoltar_connection$password
)
)
httr::stop_for_status(response)
json_content <- httr::content(response, "parsed")
json_content$token
}
# returns a POSIXct for the zoltar_session's token. see notes in is_token_expired() for details on extracting the date
token_expiration_date <- function(zoltar_session) {
token_split <- strsplit(zoltar_session$token, ".", fixed = TRUE) # 3 parts: header, payload, and signature
payload_encoded <- token_split[[1]][2]
payload_decoded <- base64url::base64_urldecode(payload_encoded)
payload <- jsonlite::fromJSON(payload_decoded)
exp_timestamp_utc <- payload$exp
exp_timestamp_date <- .POSIXct(exp_timestamp_utc, tz = "UTC")
exp_timestamp_date
}
# returns TRUE if zoltar_session's token is expired, and FALSE if still valid. details: based on how Zoltar implements
# JWT, we determine expiration by comparing the current datetime to the token's payload's "exp" field. its value is a
# POSIX timestamp of a UTC date and time as returned by datetime.utcnow().timestamp() - https://docs.python.org/3.6/library/datetime.html#datetime.datetime.utcnow . xx
is_token_expired <- function(zoltar_session) {
token_expiration_date(zoltar_session) <= Sys.time() # now
}
| /scratch/gouwar.j/cran-all/cranData/zoltr/R/connection.R |
#
# ---- forecast functions ----
#
#' Gets a forecast's information
#'
#' @return A `list` of forecast information for the passed forecast_url
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param forecast_url URL of a forecast in zoltar_connection's forecasts
#' @export
#' @examples \dontrun{
#' the_forecast_info <- forecast_info(conn, "http://example.com/api/forecast/1/")
#' }
forecast_info <- function(zoltar_connection, forecast_url) {
the_forecast_info <- get_resource(zoltar_connection, forecast_url)
the_forecast_info$forecast_model_url <- the_forecast_info$forecast_model
the_forecast_info$forecast_model <- NULL
the_forecast_info$created_at <- as.Date(the_forecast_info$created_at) # "2020-03-05T15:47:47.369231-05:00"
the_forecast_info$time_zero$timezero_date <- as.Date(the_forecast_info$time_zero$timezero_date)
the_forecast_info$time_zero$data_version_date <- if (is.null(the_forecast_info$time_zero$data_version_date)) NA
else as.Date(the_forecast_info$time_zero$data_version_date)
the_forecast_info
}
#' Delete a forecast
#'
#' Deletes the forecast with the passed URL. This is permanent and cannot be undone.
#'
#' @return None
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param forecast_url URL of a forecast in zoltar_connection's forecasts
#' @export
#' @examples \dontrun{
#' delete_forecast(conn, "http://example.com/api/forecast/1/")
#' }
delete_forecast <- function(zoltar_connection, forecast_url) {
delete_resource(zoltar_connection, forecast_url)
}
#' Gets a forecast's data
#'
#' @return Forecast data as a `list` in the Zoltar standard format. meta information is ignored. Full documentation at
#' \url{https://docs.zoltardata.com/}.
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param forecast_url URL of a forecast in zoltar_connection's forecasts
#' @export
#' @examples \dontrun{
#' forecast_data <- download_forecast(conn, "http://example.com/api/forecast/1/")
#' }
download_forecast <- function(zoltar_connection, forecast_url) {
forecast_data_url <- paste0(forecast_url, 'data/')
forecast_data <- get_resource(zoltar_connection, forecast_data_url)
if (is.null(forecast_data)) { # true for tests
return(NULL)
}
# convert date-based targets to Date objects
targets_df <- data_frame_from_targets_json(forecast_data$meta$targets)
date_target_names <- targets_df[targets_df$type == "date", "name"]
if (length(date_target_names) != 0) {
for (prediction_element_idx in seq_along(forecast_data$predictions)) {
prediction_element <- forecast_data$predictions[[prediction_element_idx]]
is_date_prediction <- prediction_element$target %in% date_target_names
if (!is_date_prediction) {
next
}
if (prediction_element$class == "point") {
point_value <- as.Date(forecast_data$predictions[[prediction_element_idx]]$prediction$value,
format = YYYY_MM_DD_DATE_FORMAT)
forecast_data$predictions[[prediction_element_idx]]$prediction$value <- point_value
} else if (prediction_element$class == "bin") {
cat_value <- lapply(forecast_data$predictions[[prediction_element_idx]]$prediction$cat,
FUN = function(x) as.Date(x, YYYY_MM_DD_DATE_FORMAT))
forecast_data$predictions[[prediction_element_idx]]$prediction$cat <- cat_value
} else if (prediction_element$class == "sample") {
sample_value <- lapply(forecast_data$predictions[[prediction_element_idx]]$prediction$sample,
FUN = function(x) as.Date(x, YYYY_MM_DD_DATE_FORMAT))
forecast_data$predictions[[prediction_element_idx]]$prediction$sample <- sample_value
} # else "named"
}
}
forecast_data
}
| /scratch/gouwar.j/cran-all/cranData/zoltr/R/forecast.R |
#
# ---- model functions ----
#
YYYY_MM_DD_DATE_FORMAT <- "%Y-%m-%d" # e.g., '2017-01-17'
#' Get information about a model
#'
#' @return A `list` of model information for the passed model_url
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param model_url URL of a model in zoltar_connection's models
#' @export
#' @examples \dontrun{
#' the_model_info <- model_info(conn, "http://www.zoltardata.com/api/model/1/")
#' }
model_info <- function(zoltar_connection, model_url) {
the_model_info <- get_resource(zoltar_connection, model_url)
the_model_info$aux_data_url <- if (is.null(the_model_info$aux_data_url)) NA else the_model_info$aux_data_url
the_model_info$forecasts <- NULL # obtained via forecasts(zoltar_connection, model_url)
the_model_info
}
#' Create a model
#'
#' Creates the model in the passed project using the passed list. Fails if a model with the passed name already exists.
#'
#' @return model_url of the newly-created model
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_url url of a project in zoltar_connection's projects. this is the project the new model will be created
#' in
#' @param model_config A `list` containing a Zoltar model configuration. An example: example-model-config.json .
#' Full documentation at \url{https://docs.zoltardata.com/}.
#' @export
#' @examples \dontrun{
#' new_model_url <- create_model(conn, "https://www.zoltardata.com/project/9/",
#' jsonlite::read_json("example-model-config.json"))
#' }
create_model <- function(zoltar_connection, project_url, model_config) {
re_authenticate_if_necessary(zoltar_connection)
models_url <- paste0(project_url, 'models/')
response <- httr::POST(
url = models_url,
add_auth_headers(zoltar_connection),
body = list(model_config = model_config),
encode = "json")
# the Zoltar API returns 400 if there was an error POSTing. the content is JSON with a $error key that contains the
# error message
json_response <- httr::content(response, "parsed")
if (response$status_code == 400) {
stop(json_response$error, call. = FALSE)
}
json_response$url # throw away rest of json and let model_info() reload/refresh it
}
#' Delete a model
#'
#' Deletes the model with the passed ID. This is permanent and cannot be undone.
#'
#' @return None
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param model_url URL of a model in zoltar_connection's models
#' @export
#' @examples \dontrun{
#' delete_model(conn, "http://www.zoltardata.com/api/model/1/")
#' }
delete_model <- function(zoltar_connection, model_url) {
delete_resource(zoltar_connection, model_url)
}
#' Get a model's forecasts
#'
#' @return A `data.frame` of forecast information for the passed model
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param model_url URL of a model in zoltar_connection's models
#' @export
#' @examples \dontrun{
#' the_forecasts <- forecasts(conn, "http://www.zoltardata.com/api/model/1/")
#' }
forecasts <- function(zoltar_connection, model_url) {
forecasts_url <- paste0(model_url, 'forecasts/')
forecasts_json <- get_resource(zoltar_connection, forecasts_url)
id_column <- c() # integer
url_column <- c() # character
forecast_model_url_column <- c() # ""
source_column <- c() # ""
timezero_url_column <- c() # ""
created_at_column <- c() # Date
notes_column <- c() # character
forecast_data_url_column <- c() # ""
for (forecast_json in forecasts_json) {
id_column <- append(id_column, forecast_json$id)
url_column <- append(url_column, forecast_json$url)
forecast_model_url_column <- append(forecast_model_url_column, forecast_json$forecast_model)
source_column <- append(source_column, forecast_json$source)
timezero_url_column <- append(timezero_url_column, forecast_json$time_zero$url) # "unnest" timezeros to URL
created_at_column <- append(created_at_column, as.Date(forecast_json$created_at)) # "2020-03-05T15:47:47.369231-05:00"
notes_column <- append(notes_column, forecast_json$notes)
forecast_data_url_column <- append(forecast_data_url_column, forecast_json$forecast_data)
}
data.frame(id = id_column, url = url_column, forecast_model_url = forecast_model_url_column, source = source_column,
timezero_url = timezero_url_column, created_at = created_at_column, notes = notes_column,
forecast_data_url = forecast_data_url_column, stringsAsFactors = FALSE)
}
#' Upload a forecast
#'
#' This function submits forecast data to the server for uploading. Returns an UploadFileJob object that can be used to
# 'track the upload's progress. (Uploads are processed in a queue, which means they are delayed until their turn comes
#' up, which depends on the number of current uploads in the queue. Zoltar tracks these via `UploadFileJob` objects.)
#'
#' @return An UploadFileJob URL for the upload
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param model_url URL of a model in zoltar_connection's projects
#' @param timezero_date The date of the project timezero you are uploading for. it is a string in format YYYYMMDD
#' @param forecast_data Forecast data as a `list` in the Zoltar standard format
#' @param notes Optional user notes for the new forecast
#' @export
#' @examples \dontrun{
#' forecast_data <- jsonlite::read_json("docs-predictions.json")
#' upload_file_job_url <- upload_forecast(conn, "http://www.zoltardata.com/api/model/1/",
#' "2017-01-17", forecast_data, "a mid-January forecast")
#' }
upload_forecast <- function(zoltar_connection, model_url, timezero_date, forecast_data, notes = "") {
if (!(inherits(forecast_data, "list"))) {
stop("forecast_data was not a `list`", call. = FALSE)
}
re_authenticate_if_necessary(zoltar_connection)
forecasts_url <- paste0(model_url, 'forecasts/')
message(paste0("upload_forecast(): POST: ", forecasts_url))
temp_json_file <- tempfile(pattern = "forecast", fileext = ".json")
# w/out auto_unbox: primitives are written as lists of one item, e.g.,
# {"unit":["HHS Region 1"], "target":["1 wk ahead"], "class":["bin"], "prediction":{"cat":[[0] ,[0.1]],"prob":[[0.1], [0.9]]}}
jsonlite::write_json(forecast_data, temp_json_file, auto_unbox = TRUE)
response <- httr::POST(
url = forecasts_url,
httr::accept_json(),
add_auth_headers(zoltar_connection),
body = list(data_file = httr::upload_file(temp_json_file), timezero_date = timezero_date, notes = notes))
# the Zoltar API returns 400 if there was an error POSTing. the content is JSON with a $error key that contains the
# error message
json_response <- httr::content(response, "parsed")
if (response$status_code == 400) {
stop(json_response$error, call. = FALSE)
}
json_response$url # throw away rest of json and let upload_file_job_info() reload/refresh it
}
| /scratch/gouwar.j/cran-all/cranData/zoltr/R/model.R |
#
# ---- project functions ----
#
#' Create a project
#'
#' Creates the project using the passed project configuration list. Fails if a project with the passed name already
#' exists.
#'
#' @return project_url of the newly-created project
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_config A `list` containing a Zoltar project configuration. note that this list validated by the
#' server and not here. An example: cdc-project.json Full documentation at \url{https://docs.zoltardata.com/}.
#' @export
#' @examples \dontrun{
#' new_project_url <- create_project(conn, jsonlite::read_json("cdc-project.json"))
#' }
create_project <- function(zoltar_connection, project_config) {
re_authenticate_if_necessary(zoltar_connection)
projects_url <- url_for_projects(zoltar_connection)
response <- httr::POST(
url = projects_url,
add_auth_headers(zoltar_connection),
body = list(project_config = project_config),
encode="json")
# the Zoltar API returns 400 if there was an error POSTing. the content is JSON with a $error key that contains the
# error message
json_response <- httr::content(response, "parsed")
if (response$status_code == 400) {
stop(json_response$error, call. = FALSE)
}
json_response$url # throw away rest of json and let project_info() reload/refresh it
}
#' Delete a project
#'
#' Deletes the project with the passed URL. This is permanent and cannot be undone.
#'
#' @return None
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_url URL of a project in zoltar_connection's projects
#' @export
#' @examples \dontrun{
#' delete_project(conn, "https://www.zoltardata.com/project/9/")
#' }
delete_project <- function(zoltar_connection, project_url) {
delete_resource(zoltar_connection, project_url)
}
#' Get a project's scores
#'
#' @return A `data.frame` of score data for all models in the passed project URL
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_url URL of a project in zoltar_connection's projects
#' @export
#' @examples \dontrun{
#' the_scores <- scores(conn, "https://www.zoltardata.com/project/9/")
#' }
scores <- function(zoltar_connection, project_url) {
scores_url <- paste0(project_url, 'score_data/')
get_resource(zoltar_connection, scores_url)
}
#' Get a project's truth
#'
#' @return A `data.frame` of truth data for the passed project URL
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_url URL of a project in zoltar_connection's projects
#' @export
#' @examples \dontrun{
#' the_truth <- truth(conn, "https://www.zoltardata.com/project/9/")
#' }
truth <- function(zoltar_connection, project_url) {
truth_url <- paste0(project_url, 'truth_data/')
get_resource(zoltar_connection, truth_url)
}
#' Get a project's models
#'
#' @return A `data.frame` of model contents for all models in the passed project
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_url URL of a project in zoltar_connection's projects
#' @export
#' @examples \dontrun{
#' the_models <- models(conn, "https://www.zoltardata.com/project/9/")
#' }
models <- function(zoltar_connection, project_url) {
models_url <- paste0(project_url, 'models/')
models_json <- get_resource(zoltar_connection, models_url)
id_column <- c() # integer
url_column <- c() # character
project_url_column <- c() # ""
owner_url_column <- c() # ""
name_column <- c() # ""
abbreviation_column <- c() # ""
description_column <- c() # ""
home_url_column <- c() # ""
aux_data_url_column <- c() # "". might be NULL. substitute NA if so
for (model_json in models_json) {
id_column <- append(id_column, model_json$id)
url_column <- append(url_column, model_json$url)
project_url_column <- append(project_url_column, model_json$project)
owner_url_value <- if (is.null(model_json$owner)) NA else model_json$owner
owner_url_column <- append(owner_url_column, owner_url_value)
name_column <- append(name_column, model_json$name)
abbreviation_column <- append(abbreviation_column, model_json$abbreviation)
description_column <- append(description_column, model_json$description)
home_url_column <- append(home_url_column, model_json$home_url)
aux_data_value <- if (is.null(model_json$aux_data_url)) NA else model_json$aux_data_url
aux_data_url_column <- append(aux_data_url_column, aux_data_value)
}
data.frame(id = id_column, url = url_column, project_url = project_url_column, owner_url = owner_url_column,
name = name_column, description = description_column, home_url = home_url_column,
aux_data_url = aux_data_url_column, stringsAsFactors = FALSE)
}
#' Get a project's zoltar_units
#'
#' @return A `data.frame` of unit contents for the passed project
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_url URL of a project in zoltar_connection's projects
#' @export
#' @examples \dontrun{
#' the_units <- zoltar_units(conn, "https://www.zoltardata.com/project/9/")
#' }
zoltar_units <- function(zoltar_connection, project_url) {
units_url <- paste0(project_url, 'units/')
units_json <- get_resource(zoltar_connection, units_url)
id_column <- c() # integer
url_column <- c() # character
name_column <- c() # ""
for (unit_json in units_json) {
id_column <- append(id_column, unit_json$id)
url_column <- append(url_column, unit_json$url)
name_column <- append(name_column, unit_json$name)
}
data.frame(id = id_column, url = url_column, name = name_column, stringsAsFactors = FALSE)
}
#' Get a project's targets
#'
#' @return A `data.frame` of target contents for the passed project
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_url URL of a project in zoltar_connection's projects
#' @export
#' @examples \dontrun{
#' the_targets <- targets(conn, "https://www.zoltardata.com/project/9/")
#' }
targets <- function(zoltar_connection, project_url) {
targets_url <- paste0(project_url, 'targets/')
targets_json <- get_resource(zoltar_connection, targets_url)
data_frame_from_targets_json(targets_json)
}
data_frame_from_targets_json <- function(targets_json) {
# helper that can be called independently
id_column <- c() # integer
url_column <- c() # character
name_column <- c() # ""
description_column <- c() # ""
type_column <- c() # ""
is_step_ahead_column <- c() # logical
step_ahead_increment_column <- c() # numeric (NULL if not is_step_ahead)
unit_column <- c() # character (NULL for some target types)
for (target_json in targets_json) {
id_column <- append(id_column, target_json$id)
url_column <- append(url_column, target_json$url)
name_column <- append(name_column, target_json$name)
description_column <- append(description_column, target_json$description)
type_column <- append(type_column, target_json$type)
is_step_ahead_column <- append(is_step_ahead_column, target_json$is_step_ahead)
step_ahead_value <- if (is.null(target_json$step_ahead_increment)) NA else target_json$step_ahead_increment
step_ahead_increment_column <- append(step_ahead_increment_column, step_ahead_value)
unit_value <- if (is.null(target_json$unit)) NA else target_json$unit
unit_column <- append(unit_column, unit_value)
}
data.frame(id = id_column, url = url_column, name = name_column, description = description_column,
type = type_column, is_step_ahead = is_step_ahead_column,
step_ahead_increment = step_ahead_increment_column, unit = unit_column, stringsAsFactors = FALSE)
}
#' Get a project's timezeros
#'
#' @return A `data.frame` of timezero contents for the passed project
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_url URL of a project in zoltar_connection's projects
#' @export
#' @examples \dontrun{
#' the_timezeros <- timezeros(conn, "https://www.zoltardata.com/project/9/")
#' }
timezeros <- function(zoltar_connection, project_url) {
timezeros_url <- paste0(project_url, 'timezeros/')
timezeros_json <- get_resource(zoltar_connection, timezeros_url)
id_column <- c() # integer
url_column <- c() # character
timezero_date_column <- c() # Date
data_version_date_column <- c() # "" (might be NULL)
is_season_start_column <- c() # logical
season_name_column <- c() # character (might be NULL)
for (timezero_json in timezeros_json) {
id_column <- append(id_column, timezero_json$id)
url_column <- append(url_column, timezero_json$url)
timezero_date_column <- append(timezero_date_column, as.Date(timezero_json$timezero_date, YYYY_MM_DD_DATE_FORMAT))
data_version_date_value <- if (is.null(timezero_json$data_version_date)) NA else
as.Date(timezero_json$data_version_date, YYYY_MM_DD_DATE_FORMAT)
data_version_date_column <- append(data_version_date_column, data_version_date_value)
is_season_start_column <- append(is_season_start_column, timezero_json$is_season_start)
season_name_value <- if (is.null(timezero_json$season_name)) NA else timezero_json$season_name
season_name_column <- append(season_name_column, season_name_value)
}
data.frame(id = id_column, url = url_column, timezero_date = timezero_date_column,
data_version_date = data_version_date_column, is_season_start = is_season_start_column,
season_name = season_name_column, stringsAsFactors = FALSE)
}
#
# ---- info functions ----
#
#' Get information about a project
#'
#' @return A `list` of project information for the passed project_url
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param project_url URL of a project in zoltar_connection's projects
#' @export
#' @examples \dontrun{
#' the_project_info <- project_info(conn, "https://www.zoltardata.com/project/9/")
#' }
project_info <- function(zoltar_connection, project_url) {
get_resource(zoltar_connection, project_url)
}
#' Get information about a target
#'
#' @return A `list` of target information for the passed target_url
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param target_url URL of a target in zoltar_connection's targets
#' @export
#' @examples \dontrun{
#' the_target_info <- target_info(conn, "https://www.zoltardata.com/target/3/")
#' }
target_info <- function(zoltar_connection, target_url) {
get_resource(zoltar_connection, target_url)
}
#' Get information about a timezero
#'
#' @return A `list` of timezero information for the passed timezero_url
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param timezero_url URL of a timezero in zoltar_connection's timezeros
#' @export
#' @examples \dontrun{
#' the_timezero_info <- timezero_info(conn, "https://www.zoltardata.com/timezero/3/")
#' }
timezero_info <- function(zoltar_connection, timezero_url) {
get_resource(zoltar_connection, timezero_url)
}
#' Get information about a unit
#'
#' @return A `list` of unit information for the passed unit_url
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param unit_url URL of a unit in zoltar_connection's zoltar_units
#' @export
#' @examples \dontrun{
#' the_unit_info <- unit_info(conn, "https://www.zoltardata.com/unit/3/")
#' }
unit_info <- function(zoltar_connection, unit_url) {
get_resource(zoltar_connection, unit_url)
}
| /scratch/gouwar.j/cran-all/cranData/zoltr/R/project.R |
#
# ---- UploadFileJob functions ----
#
status_as_str <- function(status_int) {
# to map status ints to strings, we simply index into a vector. recall status starts with zero
status_names <-
c(
"PENDING",
"CLOUD_FILE_UPLOADED",
"QUEUED",
"CLOUD_FILE_DOWNLOADED",
"SUCCESS",
"FAILED"
)
status_names[status_int + 1]
}
#' Get an upload's information
#'
#' Gets an upload's information that can be used to track the upload's progress. (Uploads are processed in a queue,
# which means they are delayed until their turn comes up, which depends on the number of current uploads in the queue.)
#'
#' @return A `list` of upload information for the passed upload_file_job_url. it has these names:
#' id, url, status, user, created_at, updated_at, failure_message, filename, input_json, output_json
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param upload_file_job_url URL of a job in zoltar_connection that was uploaded via \code{\link{upload_forecast}}
#' @export
#' @examples \dontrun{
#' the_upload_info <- upload_info(conn, "http://example.com/api/uploadfilejob/2/")
#' }
upload_info <- function(zoltar_connection, upload_file_job_url) {
ufj_json <- get_resource(zoltar_connection, upload_file_job_url)
ufj_json$status <- status_as_str(ufj_json$status)
ufj_json$created_at <- as.Date(ufj_json$created_at)
ufj_json$updated_at <- as.Date(ufj_json$updated_at)
ufj_json
}
#' Get a new forecast upload's url
#'
#' A helper function that returns the URL of a newly-uploaded forecast from upload_info.
#'
#' @return A URL of the new forecast
#' @param zoltar_connection A `ZoltarConnection` object as returned by \code{\link{new_connection}}
#' @param the_upload_info a `list` object as returned by \code{\link{upload_info}}
#' @export
#' @examples \dontrun{
#' new_forecast_url <- upload_info_forecast_url(conn, "http://example.com/api/uploadfilejob/2/")
#' }
upload_info_forecast_url <- function(zoltar_connection, the_upload_info) {
if (is.null(the_upload_info$output_json$forecast_pk)) {
NULL
} else {
paste0(zoltar_connection$host, "/api/forecast/", the_upload_info$output_json$forecast_pk, "/")
}
}
| /scratch/gouwar.j/cran-all/cranData/zoltr/R/upload-file-job.R |
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
| /scratch/gouwar.j/cran-all/cranData/zoltr/R/utils-pipe.R |
#' @keywords internal
"_PACKAGE"
# Suppress R CMD check note
#' @importFrom readr read_csv
#' @importFrom jsonlite read_json
#' @importFrom mockery stub
#' @importFrom webmockr stub_request
NULL
release_questions <- function() {
c(
"Did you update NEWS.Rmd?",
"Did you update DESCRIPTION Version?",
"Did you update vignette dates?",
"Did you re-knit README.Rmd?",
"Did you run devtools::check()?",
"Did you run devtools::document()?",
"Did you run devtools::build_vignettes()?",
"Did you re-run pkgdown::build_site()?"
)
}
| /scratch/gouwar.j/cran-all/cranData/zoltr/R/zoltr.R |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
message=FALSE,
warning=FALSE,
eval = nzchar(Sys.getenv("IS_DEVELOPMENT_MACHINE"))
)
## ----setup, include=FALSE-----------------------------------------------------
library(httr) # o/w devtools::check() gets `could not find function "POST"` error
## ---- include=FALSE-----------------------------------------------------------
library(zoltr)
zoltar_connection <- new_connection(host = Sys.getenv("Z_HOST"))
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
zoltar_connection
## ---- eval=FALSE, include=TRUE------------------------------------------------
# library(zoltr)
# zoltar_connection <- new_connection()
# zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
# zoltar_connection
## -----------------------------------------------------------------------------
the_projects <- projects(zoltar_connection)
str(the_projects)
## -----------------------------------------------------------------------------
project_url <- the_projects[the_projects$name == "Docs Example Project", "url"]
the_project_info <- project_info(zoltar_connection, project_url)
names(the_project_info)
the_project_info$description
the_models <- models(zoltar_connection, project_url)
str(the_models)
## -----------------------------------------------------------------------------
score_data <- scores(zoltar_connection, project_url)
score_data
## -----------------------------------------------------------------------------
model_url <- the_models[the_models$name == "docs forecast model", "url"]
the_model_info <- model_info(zoltar_connection, model_url)
names(the_model_info)
the_model_info$name
the_forecasts <- forecasts(zoltar_connection, model_url)
str(the_forecasts)
## -----------------------------------------------------------------------------
forecast_url <- the_forecasts[1, "url"]
forecast_info <- forecast_info(zoltar_connection, forecast_url)
forecast_data <- download_forecast(zoltar_connection, forecast_url)
length(forecast_data$predictions)
| /scratch/gouwar.j/cran-all/cranData/zoltr/inst/doc/getting-started.R |
---
title: "Getting Started"
author: "Matthew Cornell"
date: "2020-04-01"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
message=FALSE,
warning=FALSE,
eval = nzchar(Sys.getenv("IS_DEVELOPMENT_MACHINE"))
)
```
# Getting Started with zoltr
zoltr is an R package that simplifies access to the [zoltardata.com](https://www.zoltardata.com/) API. This vignette
takes you through the package's main features. So that you can experiment without needing a Zoltar account, we use the
example project from [docs.zoltardata.com](https://docs.zoltardata.com/), which should always be available for public
read-only access.
**NOTE: You will need an account to access the zoltar API - please see [docs.zoltardata.com](https://docs.zoltardata.com/)
for details.**
## Connect to the host and authenticate
The starting point for working with Zoltar's API is a `ZoltarConnection` object, obtained via the `new_connection`
function. Most zoltr functions take a `ZoltarConnection` along with the API _URL_ of the thing of interest, e.g., a
project, model, or forecast. API URLs look like `https://www.zoltardata.com/api/project/3/`, which is that of the
"Docs Example Project". An important note regarding URLs:
zoltr's convention for URLs is to require a trailing slash character ('/') on all URLs. The only exception is the
optional `host` parameter passed to `new_connection()`. Thus, `https://www.zoltardata.com/api/project/3/` is valid,
but `https://www.zoltardata.com/api/project/3` is not.
You can obtain a URL using some of the `*_info` functions, and you can always use the web interface to navigate to the
item of interest and look at its URL in the browser address field. Keep in mind that you'll need to add `api` to the
browsable address, along with the trailing slash character. For example, if you browsed the _Docs Example Project_
project at (say) `https://www.zoltardata.com/project/3` then its API for use in zoltr would be
`https://www.zoltardata.com/api/project/3/`.
As noted above, all API calls require an account. To access your project, you'll first need to authenticate via the
`zoltar_authenticate()` function. Pass it the username and password for your account. Notes:
- Be careful to store and use your username and password so that they're not accessible to others. A good practice is to
put them them in your `.Renviron` file and then use `Sys.getenv()` to retrieve them, as outlined in
[Use Environment variables](https://db.rstudio.com/best-practices/managing-credentials/#use-environment-variables) and
as done below.
- The Zoltar service uses a "token"-based scheme for authentication. These tokens have a five minute expiration for
security, which requires re-authentication after that period of time. The zoltr library takes care of
re-authenticating as needed by passing your username and password back to the server to get another token. Note that
the connection object returned by the `new_connection` function stores a token internally, so be careful if saving
that object into a file.
- Below you will see the host shown as _example.com_ - this is a temporary one that was used to generate this
documentation.
For this and other vignettes, you will need to create an [`.Renviron`](https://csgillespie.github.io/efficientR/3-3-r-startup.html#renviron)
file that contains `Z_USERNAME` and `Z_PASSWORD` variables that match your account settings (note the `Z_` prefix).
Then you'll be able to create an authenticated connection:
<!-- run but don't show. NB: will run only if above eval=TRUE -->
```{r setup, include=FALSE}
library(httr) # o/w devtools::check() gets `could not find function "POST"` error
```
<!-- run but don't show. NB: will run only if above eval=TRUE -->
```{r, include=FALSE}
library(zoltr)
zoltar_connection <- new_connection(host = Sys.getenv("Z_HOST"))
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
zoltar_connection
```
<!-- show but don't run-->
```{r, eval=FALSE, include=TRUE}
library(zoltr)
zoltar_connection <- new_connection()
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
zoltar_connection
```
## Get a list of all projects on the host
Now that you have a connection, you can use the `projects()` function to get all projects as a `data.frame`. Note that
it will only list those that you are authorized to access, i.e., all public projects plus any private ones that you
own or are a model owner.
```{r}
the_projects <- projects(zoltar_connection)
str(the_projects)
```
## Get a project to work with and list its info, models, and scores
Let's start by getting a public project to work with. We will search the projects list for it by name. Then we will pass
its URL to the `project_info()` function to get a `list` of details, and then pass it to the `models()` function to get
a `data.frame` of its models.
```{r}
project_url <- the_projects[the_projects$name == "Docs Example Project", "url"]
the_project_info <- project_info(zoltar_connection, project_url)
names(the_project_info)
the_project_info$description
the_models <- models(zoltar_connection, project_url)
str(the_models)
```
There is other project-related information that you can access, such as its configuration (`zoltar_units()`,
`targets()`, and `timezeros()` - concepts that are explained at [docs.zoltardata.com](https://docs.zoltardata.com/) ),
`scores()` and `truth()`. As an example, let's get its score data. (Note that available scores are limited due to the
nature of the example project.)
```{r}
score_data <- scores(zoltar_connection, project_url)
score_data
```
## Get a model to work with and list its info and forecasts
Now let's work with a particular model, getting its URL by name and then passing it to the `model_info()` function to
get details. Then use the `forecasts()` function to get a `data.frame` of that model's forecasts (there is only one).
Note that obtaining the model's URL is straightforward because it is provided in the `url` column of `the_models`.
```{r}
model_url <- the_models[the_models$name == "docs forecast model", "url"]
the_model_info <- model_info(zoltar_connection, model_url)
names(the_model_info)
the_model_info$name
the_forecasts <- forecasts(zoltar_connection, model_url)
str(the_forecasts)
```
## Finally, download the forecast's data
You can get forecast data using the `download_forecast()` function, which is in a nested `list` format. Please see
[docs.zoltardata.com](https://docs.zoltardata.com/) for forecast format details.
```{r}
forecast_url <- the_forecasts[1, "url"]
forecast_info <- forecast_info(zoltar_connection, forecast_url)
forecast_data <- download_forecast(zoltar_connection, forecast_url)
length(forecast_data$predictions)
```
| /scratch/gouwar.j/cran-all/cranData/zoltr/inst/doc/getting-started.Rmd |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
message=FALSE,
warning=FALSE,
eval = nzchar(Sys.getenv("IS_DEVELOPMENT_MACHINE"))
)
## ----setup, include=FALSE-----------------------------------------------------
library(httr) # o/w devtools::check() gets `could not find function "POST"` error
## ---- include=FALSE-----------------------------------------------------------
library(zoltr)
zoltar_connection <- new_connection(host = Sys.getenv("Z_HOST"))
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
## ---- eval=FALSE, include=TRUE------------------------------------------------
# library(zoltr)
# zoltar_connection <- new_connection()
# zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
## -----------------------------------------------------------------------------
project_config <- jsonlite::read_json("docs-project.json") # "name": "My project"
project_url <- create_project(zoltar_connection, project_config)
the_project_info <- project_info(zoltar_connection, project_url)
## -----------------------------------------------------------------------------
model_config <- list("name" = "a model_name",
"abbreviation" = "an abbreviation",
"team_name" = "a team_name",
"description" = "a description",
"home_url" = "http://example.com/",
"aux_data_url" = "http://example.com/")
model_url <- create_model(zoltar_connection, project_url, model_config)
## -----------------------------------------------------------------------------
busy_poll_upload_file_job <- function(zoltar_connection, upload_file_job_url) {
cat(paste0("polling for status change. upload_file_job: ", upload_file_job_url, "\n"))
while (TRUE) {
status <- upload_info(zoltar_connection, upload_file_job_url)$status
cat(paste0(status, "\n"))
if (status == "FAILED") {
cat(paste0("x failed\n"))
break
}
if (status == "SUCCESS") {
break
}
Sys.sleep(1)
}
}
## -----------------------------------------------------------------------------
forecast_data <- jsonlite::read_json("docs-predictions.json")
upload_file_job_url <- upload_forecast(zoltar_connection, model_url, "2011-10-02", forecast_data)
busy_poll_upload_file_job(zoltar_connection, upload_file_job_url)
## -----------------------------------------------------------------------------
the_upload_info <- upload_info(zoltar_connection, upload_file_job_url)
forecast_url <- upload_info_forecast_url(zoltar_connection, the_upload_info)
the_forecast_info <- forecast_info(zoltar_connection, forecast_url)
the_forecasts <- forecasts(zoltar_connection, the_forecast_info$forecast_model_url)
str(the_forecasts)
## -----------------------------------------------------------------------------
delete_project(zoltar_connection, project_url)
| /scratch/gouwar.j/cran-all/cranData/zoltr/inst/doc/project-owners.R |
---
title: "Project Owners"
author: "Matthew Cornell"
date: "2020-04-01"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Project Owners}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
message=FALSE,
warning=FALSE,
eval = nzchar(Sys.getenv("IS_DEVELOPMENT_MACHINE"))
)
```
# zoltr for Project Owners
Welcome to the zoltr vignette for project owners and forecasters. You should read this if you are interested in creating
and managing your own [zoltardata.com](https://www.zoltardata.com/) projects using this package to access them via the
Zoltar API. Building on the _Getting Started_ vignette, this one covers creating projects and models, and uploading
forecasts.
Before starting you should have an account on [zoltardata.com](https://www.zoltardata.com/), and an
[`.Renviron`](https://csgillespie.github.io/efficientR/3-3-r-startup.html#renviron) file set up as described in
_Getting Started_.
## Connect to the host and authenticate
<!-- run but don't show. NB: will run only if above eval=TRUE -->
```{r setup, include=FALSE}
library(httr) # o/w devtools::check() gets `could not find function "POST"` error
```
<!-- run but don't show. NB: will run only if above eval=TRUE -->
```{r, include=FALSE}
library(zoltr)
zoltar_connection <- new_connection(host = Sys.getenv("Z_HOST"))
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
```
<!-- show but don't run-->
```{r, eval=FALSE, include=TRUE}
library(zoltr)
zoltar_connection <- new_connection()
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
```
## Create a sandbox project to play with
Let's use the `create_project()` function to make a temporary project to work with. (Note that if you're repeating this
step and need to delete a previously-created project, you can either use the web UI's delete button on the project
detail page or call the zoltr `delete_project()` function to do it programmatically.) `create_project()` takes a
`project_config` parameter that is a `list` specifying everything Zoltar needs to create a project, including meta
information like name, whether it's public, etc. In addition it lists the units, targets, and timezeros to create. The
new project's URL is returned, which you can pass to other functions. Here we use `docs-project.json`, which is the one
that creates the example documentation project.
```{r}
project_config <- jsonlite::read_json("docs-project.json") # "name": "My project"
project_url <- create_project(zoltar_connection, project_config)
the_project_info <- project_info(zoltar_connection, project_url)
```
## Add a model to the project and then upload a forecast into it
We can use the `create_model()` function to create a model in a particular project. Like `create_project()`, it takes
a `list` that is the configuration to use when creating the model. There is an example at `example-model-config.json`,
but here we will construct the `list` ourselves.
```{r}
model_config <- list("name" = "a model_name",
"abbreviation" = "an abbreviation",
"team_name" = "a team_name",
"description" = "a description",
"home_url" = "http://example.com/",
"aux_data_url" = "http://example.com/")
model_url <- create_model(zoltar_connection, project_url, model_config)
```
Now let's upload a forecast to the model using `upload_forecast()` and then see how to list all of a model's forecasts
(in this case just the one). Keep in mind that Zoltar enqueues long operations like forecast uploading, which keeps the
site responsive but makes the Zoltar API a little more complicated. Rather than having the `upload_forecast()` function
_block_ until the upload is done, you instead get a quick response in the form of an `UploadFileJob` URL that you can
pass to the `upload_info()` function to check its status and find out when the upload is pending, done, or failed).
(This is called _polling_ the host to ask the status.) Here we poll every second using a helper function:
```{r}
busy_poll_upload_file_job <- function(zoltar_connection, upload_file_job_url) {
cat(paste0("polling for status change. upload_file_job: ", upload_file_job_url, "\n"))
while (TRUE) {
status <- upload_info(zoltar_connection, upload_file_job_url)$status
cat(paste0(status, "\n"))
if (status == "FAILED") {
cat(paste0("x failed\n"))
break
}
if (status == "SUCCESS") {
break
}
Sys.sleep(1)
}
}
```
`upload_forecast()` takes the `model_url` to upload to, the `timezero_date` in the project to associate the forecast
with, and the `forecast_data` itself. The latter is a nested `list` of _predictions_ as documented in
[docs.zoltardata.com](https://docs.zoltardata.com/), but you can learn about it by looking at the example
`docs-predictions.json`. Briefly, you can see that there is a `predictions` list of `prediction elements` (the `meta`
section is ignored), each of which encodes data for a particular unit and target combination. Each `prediction element`
has a class that's one of four possibilities: `bin`, `named`, `point`, and `sample`. The structure of the
`prediction element's` contents (the `prediction` section) is determined by the particular class. For example, a `point`
just has a `value`, but a `bin` has a table of `cat` and `prob` values.
Here we will upload the `docs-predictions.json` file. Note that the passed `timezero_date` matches one of the timezeros
in `docs-project.json`, the file that was used to create the project. It is an error otherwise.
```{r}
forecast_data <- jsonlite::read_json("docs-predictions.json")
upload_file_job_url <- upload_forecast(zoltar_connection, model_url, "2011-10-02", forecast_data)
busy_poll_upload_file_job(zoltar_connection, upload_file_job_url)
```
Hopefully you'll see some number of "QUEUED" entries followed by a "SUCCESS" one. (How long it takes will depend on how
much other work Zoltar is handling.)
Get the new forecast's URL from the `UploadFileJob` object and then call the `forecasts()` function to get a
`data.frame` of that model's forecasts (just the one in our case).
```{r}
the_upload_info <- upload_info(zoltar_connection, upload_file_job_url)
forecast_url <- upload_info_forecast_url(zoltar_connection, the_upload_info)
the_forecast_info <- forecast_info(zoltar_connection, forecast_url)
the_forecasts <- forecasts(zoltar_connection, the_forecast_info$forecast_model_url)
str(the_forecasts)
```
## Clean up by deleting the sandbox project
**NB: This will delete all of the data associated with the project without warning, including models and forecasts.**
```{r}
delete_project(zoltar_connection, project_url)
```
| /scratch/gouwar.j/cran-all/cranData/zoltr/inst/doc/project-owners.Rmd |
---
title: "Getting Started"
author: "Matthew Cornell"
date: "2020-04-01"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
message=FALSE,
warning=FALSE,
eval = nzchar(Sys.getenv("IS_DEVELOPMENT_MACHINE"))
)
```
# Getting Started with zoltr
zoltr is an R package that simplifies access to the [zoltardata.com](https://www.zoltardata.com/) API. This vignette
takes you through the package's main features. So that you can experiment without needing a Zoltar account, we use the
example project from [docs.zoltardata.com](https://docs.zoltardata.com/), which should always be available for public
read-only access.
**NOTE: You will need an account to access the zoltar API - please see [docs.zoltardata.com](https://docs.zoltardata.com/)
for details.**
## Connect to the host and authenticate
The starting point for working with Zoltar's API is a `ZoltarConnection` object, obtained via the `new_connection`
function. Most zoltr functions take a `ZoltarConnection` along with the API _URL_ of the thing of interest, e.g., a
project, model, or forecast. API URLs look like `https://www.zoltardata.com/api/project/3/`, which is that of the
"Docs Example Project". An important note regarding URLs:
zoltr's convention for URLs is to require a trailing slash character ('/') on all URLs. The only exception is the
optional `host` parameter passed to `new_connection()`. Thus, `https://www.zoltardata.com/api/project/3/` is valid,
but `https://www.zoltardata.com/api/project/3` is not.
You can obtain a URL using some of the `*_info` functions, and you can always use the web interface to navigate to the
item of interest and look at its URL in the browser address field. Keep in mind that you'll need to add `api` to the
browsable address, along with the trailing slash character. For example, if you browsed the _Docs Example Project_
project at (say) `https://www.zoltardata.com/project/3` then its API for use in zoltr would be
`https://www.zoltardata.com/api/project/3/`.
As noted above, all API calls require an account. To access your project, you'll first need to authenticate via the
`zoltar_authenticate()` function. Pass it the username and password for your account. Notes:
- Be careful to store and use your username and password so that they're not accessible to others. A good practice is to
put them them in your `.Renviron` file and then use `Sys.getenv()` to retrieve them, as outlined in
[Use Environment variables](https://db.rstudio.com/best-practices/managing-credentials/#use-environment-variables) and
as done below.
- The Zoltar service uses a "token"-based scheme for authentication. These tokens have a five minute expiration for
security, which requires re-authentication after that period of time. The zoltr library takes care of
re-authenticating as needed by passing your username and password back to the server to get another token. Note that
the connection object returned by the `new_connection` function stores a token internally, so be careful if saving
that object into a file.
- Below you will see the host shown as _example.com_ - this is a temporary one that was used to generate this
documentation.
For this and other vignettes, you will need to create an [`.Renviron`](https://csgillespie.github.io/efficientR/3-3-r-startup.html#renviron)
file that contains `Z_USERNAME` and `Z_PASSWORD` variables that match your account settings (note the `Z_` prefix).
Then you'll be able to create an authenticated connection:
<!-- run but don't show. NB: will run only if above eval=TRUE -->
```{r setup, include=FALSE}
library(httr) # o/w devtools::check() gets `could not find function "POST"` error
```
<!-- run but don't show. NB: will run only if above eval=TRUE -->
```{r, include=FALSE}
library(zoltr)
zoltar_connection <- new_connection(host = Sys.getenv("Z_HOST"))
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
zoltar_connection
```
<!-- show but don't run-->
```{r, eval=FALSE, include=TRUE}
library(zoltr)
zoltar_connection <- new_connection()
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
zoltar_connection
```
## Get a list of all projects on the host
Now that you have a connection, you can use the `projects()` function to get all projects as a `data.frame`. Note that
it will only list those that you are authorized to access, i.e., all public projects plus any private ones that you
own or are a model owner.
```{r}
the_projects <- projects(zoltar_connection)
str(the_projects)
```
## Get a project to work with and list its info, models, and scores
Let's start by getting a public project to work with. We will search the projects list for it by name. Then we will pass
its URL to the `project_info()` function to get a `list` of details, and then pass it to the `models()` function to get
a `data.frame` of its models.
```{r}
project_url <- the_projects[the_projects$name == "Docs Example Project", "url"]
the_project_info <- project_info(zoltar_connection, project_url)
names(the_project_info)
the_project_info$description
the_models <- models(zoltar_connection, project_url)
str(the_models)
```
There is other project-related information that you can access, such as its configuration (`zoltar_units()`,
`targets()`, and `timezeros()` - concepts that are explained at [docs.zoltardata.com](https://docs.zoltardata.com/) ),
`scores()` and `truth()`. As an example, let's get its score data. (Note that available scores are limited due to the
nature of the example project.)
```{r}
score_data <- scores(zoltar_connection, project_url)
score_data
```
## Get a model to work with and list its info and forecasts
Now let's work with a particular model, getting its URL by name and then passing it to the `model_info()` function to
get details. Then use the `forecasts()` function to get a `data.frame` of that model's forecasts (there is only one).
Note that obtaining the model's URL is straightforward because it is provided in the `url` column of `the_models`.
```{r}
model_url <- the_models[the_models$name == "docs forecast model", "url"]
the_model_info <- model_info(zoltar_connection, model_url)
names(the_model_info)
the_model_info$name
the_forecasts <- forecasts(zoltar_connection, model_url)
str(the_forecasts)
```
## Finally, download the forecast's data
You can get forecast data using the `download_forecast()` function, which is in a nested `list` format. Please see
[docs.zoltardata.com](https://docs.zoltardata.com/) for forecast format details.
```{r}
forecast_url <- the_forecasts[1, "url"]
forecast_info <- forecast_info(zoltar_connection, forecast_url)
forecast_data <- download_forecast(zoltar_connection, forecast_url)
length(forecast_data$predictions)
```
| /scratch/gouwar.j/cran-all/cranData/zoltr/vignettes/getting-started.Rmd |
---
title: "Project Owners"
author: "Matthew Cornell"
date: "2020-04-01"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Project Owners}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
message=FALSE,
warning=FALSE,
eval = nzchar(Sys.getenv("IS_DEVELOPMENT_MACHINE"))
)
```
# zoltr for Project Owners
Welcome to the zoltr vignette for project owners and forecasters. You should read this if you are interested in creating
and managing your own [zoltardata.com](https://www.zoltardata.com/) projects using this package to access them via the
Zoltar API. Building on the _Getting Started_ vignette, this one covers creating projects and models, and uploading
forecasts.
Before starting you should have an account on [zoltardata.com](https://www.zoltardata.com/), and an
[`.Renviron`](https://csgillespie.github.io/efficientR/3-3-r-startup.html#renviron) file set up as described in
_Getting Started_.
## Connect to the host and authenticate
<!-- run but don't show. NB: will run only if above eval=TRUE -->
```{r setup, include=FALSE}
library(httr) # o/w devtools::check() gets `could not find function "POST"` error
```
<!-- run but don't show. NB: will run only if above eval=TRUE -->
```{r, include=FALSE}
library(zoltr)
zoltar_connection <- new_connection(host = Sys.getenv("Z_HOST"))
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
```
<!-- show but don't run-->
```{r, eval=FALSE, include=TRUE}
library(zoltr)
zoltar_connection <- new_connection()
zoltar_authenticate(zoltar_connection, Sys.getenv("Z_USERNAME"), Sys.getenv("Z_PASSWORD"))
```
## Create a sandbox project to play with
Let's use the `create_project()` function to make a temporary project to work with. (Note that if you're repeating this
step and need to delete a previously-created project, you can either use the web UI's delete button on the project
detail page or call the zoltr `delete_project()` function to do it programmatically.) `create_project()` takes a
`project_config` parameter that is a `list` specifying everything Zoltar needs to create a project, including meta
information like name, whether it's public, etc. In addition it lists the units, targets, and timezeros to create. The
new project's URL is returned, which you can pass to other functions. Here we use `docs-project.json`, which is the one
that creates the example documentation project.
```{r}
project_config <- jsonlite::read_json("docs-project.json") # "name": "My project"
project_url <- create_project(zoltar_connection, project_config)
the_project_info <- project_info(zoltar_connection, project_url)
```
## Add a model to the project and then upload a forecast into it
We can use the `create_model()` function to create a model in a particular project. Like `create_project()`, it takes
a `list` that is the configuration to use when creating the model. There is an example at `example-model-config.json`,
but here we will construct the `list` ourselves.
```{r}
model_config <- list("name" = "a model_name",
"abbreviation" = "an abbreviation",
"team_name" = "a team_name",
"description" = "a description",
"home_url" = "http://example.com/",
"aux_data_url" = "http://example.com/")
model_url <- create_model(zoltar_connection, project_url, model_config)
```
Now let's upload a forecast to the model using `upload_forecast()` and then see how to list all of a model's forecasts
(in this case just the one). Keep in mind that Zoltar enqueues long operations like forecast uploading, which keeps the
site responsive but makes the Zoltar API a little more complicated. Rather than having the `upload_forecast()` function
_block_ until the upload is done, you instead get a quick response in the form of an `UploadFileJob` URL that you can
pass to the `upload_info()` function to check its status and find out when the upload is pending, done, or failed).
(This is called _polling_ the host to ask the status.) Here we poll every second using a helper function:
```{r}
busy_poll_upload_file_job <- function(zoltar_connection, upload_file_job_url) {
cat(paste0("polling for status change. upload_file_job: ", upload_file_job_url, "\n"))
while (TRUE) {
status <- upload_info(zoltar_connection, upload_file_job_url)$status
cat(paste0(status, "\n"))
if (status == "FAILED") {
cat(paste0("x failed\n"))
break
}
if (status == "SUCCESS") {
break
}
Sys.sleep(1)
}
}
```
`upload_forecast()` takes the `model_url` to upload to, the `timezero_date` in the project to associate the forecast
with, and the `forecast_data` itself. The latter is a nested `list` of _predictions_ as documented in
[docs.zoltardata.com](https://docs.zoltardata.com/), but you can learn about it by looking at the example
`docs-predictions.json`. Briefly, you can see that there is a `predictions` list of `prediction elements` (the `meta`
section is ignored), each of which encodes data for a particular unit and target combination. Each `prediction element`
has a class that's one of four possibilities: `bin`, `named`, `point`, and `sample`. The structure of the
`prediction element's` contents (the `prediction` section) is determined by the particular class. For example, a `point`
just has a `value`, but a `bin` has a table of `cat` and `prob` values.
Here we will upload the `docs-predictions.json` file. Note that the passed `timezero_date` matches one of the timezeros
in `docs-project.json`, the file that was used to create the project. It is an error otherwise.
```{r}
forecast_data <- jsonlite::read_json("docs-predictions.json")
upload_file_job_url <- upload_forecast(zoltar_connection, model_url, "2011-10-02", forecast_data)
busy_poll_upload_file_job(zoltar_connection, upload_file_job_url)
```
Hopefully you'll see some number of "QUEUED" entries followed by a "SUCCESS" one. (How long it takes will depend on how
much other work Zoltar is handling.)
Get the new forecast's URL from the `UploadFileJob` object and then call the `forecasts()` function to get a
`data.frame` of that model's forecasts (just the one in our case).
```{r}
the_upload_info <- upload_info(zoltar_connection, upload_file_job_url)
forecast_url <- upload_info_forecast_url(zoltar_connection, the_upload_info)
the_forecast_info <- forecast_info(zoltar_connection, forecast_url)
the_forecasts <- forecasts(zoltar_connection, the_forecast_info$forecast_model_url)
str(the_forecasts)
```
## Clean up by deleting the sandbox project
**NB: This will delete all of the data associated with the project without warning, including models and forecasts.**
```{r}
delete_project(zoltar_connection, project_url)
```
| /scratch/gouwar.j/cran-all/cranData/zoltr/vignettes/project-owners.Rmd |
#' Region representing London in projected coordinate system
#'
#' `london_a()` and `london_c()` return the city boundaries and centre
#' point of London, respectively.
#'
#' @note `london_a()` returns a projected version of `lnd` in `spDataLarge`.
#' See the `data-raw` folder in the package's repo to reproduce these datasets
#' The `lonlat` versions of the data have coordinates in units of degrees.
#'
#' @docType data
#' @keywords datasets
#' @name london_area
#' @aliases london_cent london_c london_a london_cent_lonlat london_area_lonlat
#' @export
#' @examples
#' plot(london_a(), reset = FALSE)
#' plot(london_c(), add = TRUE)
london_a = function() {
sf::st_set_crs(zonebuilder::london_area, 27700)
}
#' @rdname london_area
#' @export
london_c = function() {
sf::st_set_crs(zonebuilder::london_cent, 27700)
}
#' The first 100 triangular numbers
#'
#' The first 100 in the sequence of [triangular numbers](https://en.wikipedia.org/wiki/Triangular_number)
#'
#' @note See the `data-raw` folder in the package's repo to reproduce these datasets
#'
#' @docType data
#' @keywords datasets
#' @name zb_100_triangular_numbers
NULL
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/data.R |
#' Make doughnuts
#'
#' @inheritParams zb_zone
#'
#' @return An `sf` data frame
#' @export
#' @examples
#' zb_plot(zb_doughnut(london_c(), london_a()))
zb_doughnut = function(x = NULL,
area = NULL,
n_circles = NA,
distance = 1,
distance_growth = 1) {
zb_zone(x = x, area = area, n_circles = n_circles, distance = distance, distance_growth = distance_growth, n_segments = 1)
}
create_rings = function(point, n_circles, distance) {
csdistance = cumsum(distance)
circles = lapply(csdistance * 1000, function(d) {
doughnut_i = sf::st_buffer(point, d)
})
doughnuts_non_center = mapply(function(x, y) sf::st_sf(geometry = sf::st_difference(x, y)),
circles[-1],
circles[-n_circles],
SIMPLIFY = FALSE)
doughnuts = do.call(rbind,
c(list(sf::st_sf(geometry = circles[[1]])),
doughnuts_non_center))
doughnuts
}
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/doughut.R |
geo_select_aeq.sf = function (shp) {
#cent <- sf::st_geometry(shp)
coords <- sf::st_coordinates(shp)
coords_mat <- matrix(coords[, 1:2], ncol = 2)
midpoint <- apply(coords_mat, 2, mean)
aeqd <- sprintf("+proj=aeqd +lat_0=%s +lon_0=%s +x_0=0 +y_0=0",
midpoint[2], midpoint[1])
sf::st_crs(aeqd)
}
geo_select_aeq.sfc = function (shp) {
#cent <- sf::st_geometry(shp)
coords <- sf::st_coordinates(shp)
coords_mat <- matrix(coords[, 1:2], ncol = 2)
midpoint <- apply(coords_mat, 2, mean)
aeqd <- sprintf("+proj=aeqd +lat_0=%s +lon_0=%s +x_0=0 +y_0=0",
midpoint[2], midpoint[1])
sf::st_crs(aeqd)
}
geo_select_aeq = function (shp) {
UseMethod("geo_select_aeq")
}
geo_project = function(shp) {
crs = geo_select_aeq(shp)
st_transform(shp, crs = crs)
}
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/from_stplanr.R |
get_angles = function(n_segments = 4, starting_angle = -45, angles_mid = FALSE) {
a = seq(starting_angle, starting_angle + 360, length.out = n_segments + 1)
if (angles_mid) a = a - (360 / n_segments) / 2
a / 180 * pi
}
doughnut_areas = function(n_circles, distance) {
csdistance = c(0, cumsum(distance))
sapply(2:(n_circles+1), function(i) {
(pi * ((csdistance[i]) ^ 2)) - (pi * ((csdistance[i-1]) ^ 2))
})
}
# n_circles = 10
# x = london_area
# point = london_area_midpoint
find_distance_equal_dohnut = function(x, n_circles, point) {
if(is.null(point)) point = sf::st_centroid(x)
boundary_points = sf::st_cast(x, "POINT")
distances_to_points = sf::st_distance(boundary_points, point)
max_distance = as.numeric(max(distances_to_points)) / 1000
# / cos(pi / 180 * 45) # add multiplier to account for hypotenuse issue
max_distance / (n_circles)
}
# get_distances(1, 1, 10)
# get_distances(2, 1, 10)
# get_distances(1, 2, 10)
# get_distances(.1, .1, 10)
get_distances = function(distance, distance_growth, n_circles) {
distance + (0:(n_circles-1)) * distance_growth
}
# x = london_area
# number_of_circles(x, 1, 1, sf::st_centroid(x))
# number_of_circles(x, 0.1, 0.1, sf::st_centroid(x))
number_of_circles = function(area, distance, distance_growth, x) {
boundary_points = suppressWarnings(sf::st_cast(area, "POINT"))
distances_to_points = sf::st_distance(boundary_points, x)
max_distance = as.numeric(max(distances_to_points)) / 1000
csdistances = cumsum(get_distances(distance, distance_growth, 100))
which(
zonebuilder::zb_100_triangular_numbers * distance > max_distance
)[1] + 1
# / cos(pi / 180 * 45) # add multiplier to account for hypotenuse issue
}
# distances = function(distance, distance_growth) {
#
# }
number_of_segments = function(n_circles = 10, distance = rep(1, n_circles)) {
areas = doughnut_areas(n_circles = n_circles, distance = distance)
areas / areas[1]
}
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/helper_functions.R |
zb_clock_labels = function(n_circles, segment_center = FALSE) {
do.call(rbind, lapply(1:n_circles, function(i) {
if (i==1L && !segment_center) {
data.frame(circle_id = i, segment_id = 0, label = "A", stringsAsFactors = FALSE)
} else {
data.frame(circle_id = i, segment_id = 1:12, label = paste0(LETTERS[i], sprintf("%02d", 1:12)), stringsAsFactors = FALSE)
}
}))
}
# zb_quadrant_labels(5)
zb_quadrant_labels = function(n_circles, n_segments = 12, segment_center = FALSE, quadrants = c("N", "E", "S", "W")) {
# check n_segments
if (any((n_segments %% 4) != 0 & n_segments != 1)) stop("n_segments should be equal to 1 or a multiple of 4")
n_segments = rep(n_segments, length.out = n_circles)
if (!segment_center) n_segments[1] = 1
two_decimals_required = any(n_segments >= 40)
do.call(rbind, mapply(function(i, j) {
ring = LETTERS[i]
quad = quadrants[ceiling(((1:j)/j) * 4)]
seg = (((1:j - 1)/j) %% 0.25) * j + 1
if (two_decimals_required) {
seg = sprintf("%02d", seg)
}
labels = if (j == 1) {
ring
} else if (j == 4) {
paste0(ring, quad)
} else {
paste0(ring, quad, seg)
}
if (j==1) {
segment_id = 0
} else {
segment_id = 1:j
}
data.frame(circle_id = i, segment_id = segment_id, label = labels, stringsAsFactors = FALSE)
}, 1:n_circles, n_segments, SIMPLIFY = FALSE))
}
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/labels.R |
#' Generate colors for zones
#'
#' This function generates colors for zones.
#'
#' @param z An `sf` object containing zones covering the region
#' @param palette Palette type, one of \code{"hcl"} (a palette based on the HCL color space), \code{"rings"} (a palette which colors the rings using the YlOrBr color brewer palette), \code{"dartboard"} (a palette which resembles a dartboard)
#' @return A vector of colors
#' @export
#' @importFrom RColorBrewer brewer.pal
#'
#' @examples
#' z = zb_zone(london_c(), london_a())
#' zb_color(z)
#' plot(z[, "circle_id"], col = zb_color(z))
zb_color = function(z, palette = c("rings", "hcl", "dartboard")) {
palette = match.arg(palette)
if (palette == "hcl") {
z$h = z$segment_id * 30
z$l = pmin(10 + z$circle_id * 15, 100)
z$c = 70 + ((100-z$l) / 80 * 30)
z$c[z$segment_id == 0] = 0
hcl(h = z$h, c = z$c, l = z$l)
} else if (palette == "rings") {
RColorBrewer::brewer.pal(9, "YlOrBr")[pmin(9,z$circle_id+1)]
} else if (palette == "dartboard") {
z$blackred = ((z$segment_id %% 2) == 0)
z$blackwhite = ((z$circle_id %% 2) == 0)
ifelse(z$blackred, ifelse(z$blackwhite, "#181818", "#C62627"), ifelse(z$blackwhite, "#EAD0AE", "#0BA158"))
}
}
#' View zones
#'
#' This function opens an interactive map of the zones
#'
#' @param z An `sf` object containing zones covering the region
#' @param alpha Alpha transparency, number between 0 (fully transparent) and 1 (not transparent)
#' @param palette Palette type, one of \code{"hcl"} (a palette based on the HCL color space), \code{"rings"} (a palette which colors the rings using the YlOrBr color brewer palette), \code{"dartboard"} (a palette which resembles a dartboard)
#' @param title The title of the plot
#' @return An interactive map created with `tmap`
#' @export
#' @examples
#' \donttest{
#' z = zb_zone(london_c(), london_a())
#' zb_view(z, palette = "rings")
#' }
zb_view = function(z, alpha = 0.4, palette = c("rings", "hcl", "dartboard"), title = NULL) {
palette = match.arg(palette)
if (requireNamespace("tmap")) {
suppressMessages(tmap::tmap_mode("view"))
tmap::tmap_options(show.messages = FALSE)
cent = sf::st_set_crs(sf::st_set_geometry(z, "centroid"), sf::st_crs(z))
check_and_fix = tmap::tmap_options()$check.and.fix
if(!check_and_fix) {
message("Updating tmap settings with:\ntmap::tmap_options(check.and.fix = TRUE)")
tmap::tmap_options(check.and.fix = TRUE)
}
z$color = zb_color(z, palette)
tm = tmap::tm_basemap("OpenStreetMap") +
tmap::tm_shape(z) +
tmap::tm_fill("color", alpha = alpha, id = "label", group = "colors", popup.vars = c("circle_id", "segment_id", "label")) +
tmap::tm_borders(group = "Borders", col = "black", lwd = 1.5) +
tmap::tm_shape(cent) +
tmap::tm_text("label", col = "black", size = "circle_id", group = "Labels") +
tmap::tm_scale_bar()
if (!is.null(title)) {
tm + tmap::tm_layout(title = title)
} else {
tm
}
} else {
stop("Please install tmap")
}
}
#' Plot zones
#'
#' This function opens a static map of the zones
#'
#' @param z An `sf` object containing zones covering the region
#' @param palette Palette type, one of \code{"hcl"} (a palette based on the HCL color space), \code{"rings"} (a palette which colors the rings using the YlOrBr color brewer palette), \code{"dartboard"} (a palette which resembles a dartboard)
#' @param title Plot title
#' @param text_size Vector of two numeric values that determine the relative text sizes. The first determines the smallest text size and the second one the largest text size. The largest text size is used for the outermost circle, and the smallest for the central circle in case there are 9 or more circles. If there are less circles, the relative text size is larger (see source code for exact method)
#' @param zone_label_thres This number determines in which zones labels are printed, namely each zone for which the relative area size is larger than `zone_label_thres`.
#' @importFrom graphics par mtext
#' @return A static plot created using R's base `graphics` package
#' @export
#' @examples
#' zb_plot(zb_zone(london_c()))
zb_plot = function(z, palette = c("rings", "hcl", "dartboard"), title = NULL, text_size = c(0.3, 1), zone_label_thres = 0.002) {
palette = match.arg(palette)
z$color = zb_color(z, palette)
areas = as.numeric(sf::st_area(z))
areas = areas / sum(areas)
sel = areas > zone_label_thres
cent = sf::st_set_crs(sf::st_set_geometry(z, "centroid"), sf::st_crs(z))
oldpar = par(no.readonly = TRUE) # code line i
on.exit(par(oldpar)) # code line i + 1
p = graphics::par(mar=c(.2,.2,.2,.2))
plot(sf::st_geometry(z), col = z$color, border = "grey40")
co = st_coordinates(cent[sel,])
mx = max(z$circle_id[sel])
cex = seq(text_size[1], text_size[2], length.out = 9)[pmin(9, z$circle_id[sel] + (9-mx))]
text(co[, 1], co[, 2], cex = cex, labels = z$label[sel])
if (!is.null(title)) graphics::mtext(title, 3, adj=0, line=-1)
graphics::par(p)
}
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/plot.R |
#' Divide a region into quadrats
#'
#' @param x x
#' @param ncol ncol
#' @param nrow nrow
#' @param intersection intersection
#'
#' @return An sf object
#' @export
#'
#' @examples
#' x = london_a()
#' c = sf::st_centroid(london_a())
#' plot(zb_quadrat(x, ncol = 2), col = 2:5)
#' plot(c, add = TRUE, col = "white")
#' plot(zb_quadrat(x, ncol = 3))
#' plot(zb_quadrat(x, ncol = 4))
#' plot(zb_quadrat(x, ncol = 4, intersection = FALSE))
zb_quadrat = function(x, ncol, nrow = NULL, intersection = TRUE) {
g = sf::st_make_grid(x = x, n = ncol)
if(!intersection) {
return(g)
}
sf::st_intersection(x, g)
} | /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/quadrat.R |
#' Make segments
#'
#' @inheritParams zb_zone
#'
#' @return An `sf` data frame
#'
#' @export
#' @examples
#' zb_plot(zb_segment(london_c(), london_a()))
zb_segment = function(x = NULL,
area = NULL,
n_segments = 12,
distance = NA) {
if (is.na(distance)) distance = ifelse(is.null(area), 15, 100) # 15 is the same as default ClockBoard with 5 rings, 100 is chosen to be large enough to cover arae
zb_zone(x = x, area = area, n_circles = 1, distance = distance, n_segments = n_segments, segment_center = TRUE)
}
create_segments = function(x, n_segments = 4, starting_angle = -45, distance = 100000) {
if (n_segments == 1) return(NULL)
fr_matrix = matrix(sf::st_coordinates(x), ncol = 2)
#angles_deg = seq(0, to = 360, by = 360 / n_segments) + starting_angle
#angles_rad = angles_deg / 180 * pi
angles_rad = get_angles(n_segments = n_segments, starting_angle = starting_angle)
x_coord_to = distance * cos(angles_rad - 0.5 * pi) + fr_matrix[, 1]
y_coord_to = distance * -sin(angles_rad - 0.5 * pi) + fr_matrix[, 2]
to_matrix = cbind(x_coord_to, y_coord_to)
to_matrix_next = to_matrix[c(2:nrow(to_matrix), 1), ]
coord_matrix_list = lapply(1:n_segments, function(x)
rbind(fr_matrix, to_matrix[x, ], to_matrix_next[x, ], fr_matrix))
poly_list = lapply(coord_matrix_list, function(x) sf::st_polygon(list(x)))
sf::st_sfc(poly_list, crs = sf::st_crs(x))
}
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/segment.R |
utils::globalVariables(c("number_of_segments", "zb_100_triangular_numbers"))
NULL | /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/utils.R |
#' Create lines radiating at equal angles from a point
#'
#' @param point Center point
#' @param n Number of lines
#' @param starting_angle Starting angle
#' @param distance Distance
#'
#' @return Objects of class `sfc` containing linestring geometries
#' @export
#'
#' @examples
#' point = sf::st_centroid(london_a())
#' n = 4
#' l = zb_lines(point, n)
#' plot(l)
zb_lines = function(point, n, starting_angle = 45, distance = 100000) {
fr_matrix = matrix(sf::st_coordinates(point), ncol = 2)
angles_deg = seq(0, to = 360, by = 360 / n) + starting_angle
angles_rad = angles_deg / 180 * pi
x_coord_to = distance * cos(angles_rad) + fr_matrix[, 1]
y_coord_to = distance * sin(angles_rad) + fr_matrix[, 2]
to_matrix = cbind(x_coord_to, y_coord_to)
line_matrix_list = lapply(1:n, function(x) rbind(fr_matrix, to_matrix[x, ]))
sf::st_sfc(lapply(line_matrix_list, sf::st_linestring), crs = sf::st_crs(point))
}
# test: break up our doughnut
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/zb_lines.R |
#' Generate zones covering a region of interest
#'
#' This function first divides geographic space into [annuli](https://en.wikipedia.org/wiki/Annulus_(mathematics))
#' (concentric 2d rings or 'doughnuts') and then subdivides each annulus
#' into a number of segments.
#'
#' By default 12 segments are used for each annuli, resulting in a zoning system that can
#' be used to refer to segments in [clock position](https://en.wikipedia.org/wiki/Clock_position),
#' with 12 representing North, 3 representing East, 6 Sounth and 9 Western segments.
#'
#' @param x Centre point. Should be an \code{\link[sf:sf]{sf}} or \code{\link[sf:sfc]{sfc}} object containing one point, or a name of a city (which is looked up with OSM geocoding).
#' @param area (optional) Area. Should be an \code{\link[sf:sf]{sf}} or \code{\link[sf:sfc]{sfc}} object containing one (multi) polygon
#' @param n_circles Number of rings including the central circle. By default 5, unless \code{area} is specified (then it is set automatically to fill the area).
#' @param n_segments (optional) Number of segments. The number of segments. Either one number which determines the number of segments applied to all circles, or a vector with a number for each circle (which should be a multiple of 4, see also the argument \code{labeling}). By default, the central circle is not segmented (see the argument \code{segment_center}).
#' @param distance Distance The distances between the circles. For the center circle, it is the distance between the center and the circle. If only one number is specified, \code{distance_growth} determines the increment at which the distances grow for the outer circles.
#' @param distance_growth The rate at which the distances between the circles grow. Only applicable when \code{distance} is one number and \code{n_circles > 1}. See also \code{distance}.
#' @param labeling The labeling of the zones. Either \code{"clock"} which uses the clock ananolgy (i.e. hours 1 to 12) or \code{"NESW"} which uses the cardinal directions N, E, S, W. If the number of segments is 12, the clock labeling is used, and otherwise NESW. Note that the number of segments should be a multiple of four. If, for instance the number of segments is 8, than the segments are labeled N1, N2, E1, E2, S1, S2, W1, and W2.
#' @param starting_angle The angle of the first of the radii that create the segments (degrees). By default, it is either 15 when \code{n_segments} is 12 (i.e. the ClockBoard setting) and -45 otherwise.
#' @param segment_center Should the central circle be divided into segments? `FALSE` by default.
#' @param intersection Should the zones be intersected with the area? \code{TRUE} by default.
#' @param city (optional) Name of the city. If specified, it adds a column `city` to the returned `sf` object.
#'
#' @return An `sf` object containing zones covering the region
#' @export
#' @import sf
#' @importFrom graphics plot text
#' @importFrom grDevices hcl
#' @examples
#' # default settings
#' z = zb_zone(london_c(), london_a())
#' \donttest{
#' zb_plot(z)
#' if (require(tmap)) {
#' zb_view(z)
#'
#' z = zb_zone("Berlin")
#' zb_view(z)
#'}
#'
#' # variations
#' zb_plot(zb_zone(london_c(), london_a(), n_circles = 2))
#' zb_plot(zb_zone(london_c(), london_a(), n_circles = 4, distance = 2, distance_growth = 0))
#' zb_plot(zb_zone(london_c(), london_a(), n_circles = 3, n_segments = c(1,4,8)))
#' }
zb_zone = function(x = NULL,
area = NULL,
n_circles = NA,
n_segments = 12,
distance = 1,
distance_growth = 1,
labeling = NA,
starting_angle = NA,
segment_center = FALSE,
intersection = TRUE,
city = NULL) {
# checks and preprosessing x and area
if (is.null(x) && is.null(area)) stop("Please specify either x or area")
if (!is.null(area)) {
area = sf::st_geometry(area)
if (!inherits(area, c("sfc_POLYGON", "sfc_MULTIPOLYGON"))) stop("area is not a (multi)polygon")
if (!(length(area) == 1)) stop("area should contain only one (multi)polygon")
if (is.na(sf::st_crs(area))) stop("crs of area is unkown")
}
if (is.null(x)) {
x = sf::st_centroid(area)
} else {
if (!inherits(x, c("sf", "sfc"))) {
if (is.character(x)) {
if (!requireNamespace("tmaptools")) {
stop("Please install tmaptools first")
} else {
x = tmaptools::geocode_OSM(x, as.sf = TRUE)
}
} else {
stop("x should be an sf(c) object or a city name")
}
}
x = sf::st_geometry(x)
if (!inherits(x, "sfc_POINT")) stop("x is not a point")
if (!(length(x) == 1)) stop("x should contain only one point")
if (is.na(sf::st_crs(x))) stop("crs of x is unkown")
if (!is.null(area) && !identical(sf::st_crs(area), sf::st_crs(x))) {
area = sf::st_transform(area, sf::st_crs(x))
}
}
if (!is.null(area) && !sf::st_contains(area, x, sparse = FALSE)[1]) stop("x is not located in area")
# other checks / preprosessing
if (is.na(n_circles)) {
if (!is.null(area)) {
n_circles = number_of_circles(area, distance, distance_growth, x)
} else {
n_circles = 5
}
}
if (n_circles == 1 && n_segments > 1 && !segment_center) {
message("Please set segment_center = TRUE to divide the centre into multiple segments")
}
if (length(distance) != n_circles) {
distance = get_distances(distance, distance_growth, n_circles)
}
if (is.na(labeling)) labeling = ifelse(all(n_segments == 12), "clock", "NESW")
if (is.na(starting_angle)) starting_angle = ifelse(labeling == "clock", 15, -45)
# project if needed (and reproject back at the end)
orig_crs = sf::st_crs(x)
if (sf::st_is_longlat(orig_crs)) {
crs = geo_select_aeq(x)
x = sf::st_transform(x, crs = crs)
if (!is.null(area)) area = sf::st_transform(area, crs = crs)
}
# create doughnuts
doughnuts = create_rings(x, n_circles, distance)
# update n_circles
n_circles = nrow(doughnuts)
# clock_labels = (identical(n_segments, 12))
# if (is.na(starting_angle)) starting_angle = ifelse(clock_labels, 15, -45)
# alternatives: add argument use_clock_labels? or another function with different params?
n_segments = rep(n_segments, length.out = n_circles)
if (!segment_center) n_segments[1] = 1
# create segments
segments = lapply(n_segments,
create_segments,
x = x,
# starting_angle = ifelse(clock_labels, 15, -45))
starting_angle = starting_angle)
# transform to sf and number them
segments = lapply(segments, function(x) {
if (is.null(x)) return(x)
y = sf::st_as_sf(x)
y$segment_id = 1:nrow(y)
y
})
# intersect doughnuts with x (the area polygon)
if(!is.null(area) && intersection) {
if (!all(sf::st_is_valid(area))) {
if (!requireNamespace("lwgeom")) {
stop("Combining polygons failed. Please install lwgeom and try again")
} else {
x = sf::st_make_valid(x)
}
}
area = st_union(st_buffer(area, dist = 0.01)) #0.01 (in most crs's 1 cm) is arbitrary chosen, but works to resolve strange artefacts
zones_ids = which(sapply(sf::st_intersects(doughnuts, area), length) > 0)
doughnuts = suppressWarnings(sf::st_intersection(doughnuts, area))
segments = segments[zones_ids]
} else {
zones_ids = 1:n_circles
}
# intersect the result with segments
doughnut_segments = do.call(rbind, mapply(function(i, x, y) {
if (is.null(y)) {
x$segment_id = 0
x$circle_id = i
x
} else {
if (i==1 && !segment_center) {
res = x
res$segment_id = 0
res$circle_id = i
} else {
res = suppressWarnings(sf::st_intersection(x, y))
res$circle_id = i
}
res
}
}, zones_ids, split(doughnuts, 1:length(zones_ids)), segments, SIMPLIFY = FALSE))
# doughnut_segments$segment_id = formatC(doughnut_segments$segment_id, width = 2, flag = 0)
# doughnut_segments$circle_id = formatC(doughnut_segments$circle_id, width = 2, flag = 0)
# attach labels
if (labeling == "clock") {
labels_df = zb_clock_labels(n_circles, segment_center = segment_center)
} else {
labels_df = zb_quadrant_labels(n_circles, n_segments, segment_center)
}
df = merge(doughnut_segments, labels_df, by = c("circle_id", "segment_id"))
df = df[c("label", "circle_id", "segment_id")]
order_id = order(df$circle_id * 100 + df$segment_id)
z = sf::st_transform(df[order_id, ], crs = orig_crs)
if (!all(sf::st_is_valid(z))) {
if (!requireNamespace("lwgeom")) {
warning("sf object invalid. To fix it, install lwgeom, and rerun zb_zone")
} else {
z = sf::st_make_valid(z)
z = suppressWarnings(st_cast(z, "MULTIPOLYGON")) # st_make_valid may return geometrycollections with empty points/lines
}
}
z$centroid = sf::st_geometry(st_centroid_within_poly(z))
if (!is.null(city)) {
z$city = city
}
z
}
st_centroid_within_poly <- function (poly) {
# check if centroid is in polygon
centroid <- suppressWarnings(sf::st_centroid(poly))
in_poly <- diag(sf::st_within(centroid, poly, sparse = F))
if (any(!in_poly)) {
suppressWarnings({
centroid$geometry[!in_poly] <- st_point_on_surface(poly[!in_poly,])$geometry
})
}
return(centroid)
}
# Create zones of equal area (to be documented)
# z = zb_zone(london_a(), n_circles = 8, distance_growth = 0, equal_area = TRUE) # bug with missing pies
# suggestion: split out new new function, reduce n. arguments
# plot(z, col = 1:nrow(z))
zb_zone_equal_area = function(x = NULL,
point = NULL,
n_circles = NULL,
# n_segments = c(1, (1:(n_circles - 1)) * 4), # NA
n_segments = NA,
distance = 1,
distance_growth = 1,
intersection = TRUE) {
# Functions to calculate distances
n_segments = number_of_segments(n_circles = n_circles, distance = distance)
zb_zone(x, point, n_circles, n_segments, distance, intersection = intersection)
}
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/R/zone.R |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
eval = FALSE,
collapse = TRUE,
comment = "#>"
)
## ---- eval=FALSE--------------------------------------------------------------
# remotes::install_github("zonebuilders/zonebuilder")
## ----setup--------------------------------------------------------------------
# library(zonebuilder) # for the zoning system
# library(sf) # for processing spatial data
# library(dplyr) # for processing general data
# library(tmap) # for visualizing spatial data
## -----------------------------------------------------------------------------
# NLD_cities = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/NLD_cities.Rds"))
# NLD_wijk_od = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/NLD_wijk_od.Rds"))
# NLD_wijk_centroids = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/NLD_wijk_centroids.Rds"))
## -----------------------------------------------------------------------------
# NLD_cities %>% arrange(desc(population))
## -----------------------------------------------------------------------------
# tmap_mode("view") # enable interactive mode in tmap
# qtm(NLD_cities, symbols.size = "population")
## -----------------------------------------------------------------------------
# zbs = do.call(rbind, lapply(1:nrow(NLD_cities), function(i) {
# ci = NLD_cities[i, ]
#
# # Amsterdam 5, Eindhoven-Rotterdam 4, Roermond-Zeeland 2, others 3
# nrings = ifelse(ci$population < 60000, 2,
# ifelse(ci$population < 220000, 3,
# ifelse(ci$population < 800000, 4, 5)))
#
# zb = zb_zone(x = ci, n_circles = nrings) %>%
# mutate(name = ci$name,
# labelplus = paste(ci$name, label, sep = "_"))
#
# zb
# }))
## -----------------------------------------------------------------------------
# tm_basemap("OpenStreetMap") +
# tm_shape(zbs) +
# tm_polygons(col = "circle_id", id = "labelplus", style = "cat", palette = "YlOrBr", alpha = 0.7) +
# tm_scale_bar()
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/inst/doc/demo_dutch_cities.R |
---
title: "Combining zoning systems"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Combining zoning systems}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r, include = FALSE}
knitr::opts_chunk$set(
eval = FALSE,
collapse = TRUE,
comment = "#>"
)
```
This vignettes demonstrates possibilities when zoning systems from different cities meet.
It raises the question: how should different systems be combined geographically?
You need the latest version of the package:
```{r, eval=FALSE}
remotes::install_github("zonebuilders/zonebuilder")
```
For this demo, we need the following libraries:
```{r setup}
library(zonebuilder) # for the zoning system
library(sf) # for processing spatial data
library(dplyr) # for processing general data
library(tmap) # for visualizing spatial data
```
We will apply the zoning system to the main Dutch cities, and analyse commuting patterns between the zones. The data can be read as follows:
```{r}
NLD_cities = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/NLD_cities.Rds"))
NLD_wijk_od = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/NLD_wijk_od.Rds"))
NLD_wijk_centroids = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/NLD_wijk_centroids.Rds"))
```
Let's take a look at the `NLD_cities` data:
```{r}
NLD_cities %>% arrange(desc(population))
```
...and plot it on an interactive map:
```{r}
tmap_mode("view") # enable interactive mode in tmap
qtm(NLD_cities, symbols.size = "population")
```
The following code chunk generated zones for the Dutch cities:
```{r}
zbs = do.call(rbind, lapply(1:nrow(NLD_cities), function(i) {
ci = NLD_cities[i, ]
# Amsterdam 5, Eindhoven-Rotterdam 4, Roermond-Zeeland 2, others 3
nrings = ifelse(ci$population < 60000, 2,
ifelse(ci$population < 220000, 3,
ifelse(ci$population < 800000, 4, 5)))
zb = zb_zone(x = ci, n_circles = nrings) %>%
mutate(name = ci$name,
labelplus = paste(ci$name, label, sep = "_"))
zb
}))
```
```{r}
tm_basemap("OpenStreetMap") +
tm_shape(zbs) +
tm_polygons(col = "circle_id", id = "labelplus", style = "cat", palette = "YlOrBr", alpha = 0.7) +
tm_scale_bar()
```
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/inst/doc/demo_dutch_cities.Rmd |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
eval = FALSE,
collapse = TRUE,
comment = "#>"
)
## ---- eval=FALSE--------------------------------------------------------------
# remotes::install_github("zonebuilders/zonebuilder")
# remotes::install_github("itsleeds/pct")
## ----setup--------------------------------------------------------------------
# library(zonebuilder)
# library(dplyr)
# library(tmap)
# tmap_mode("view")
## ---- eval=FALSE--------------------------------------------------------------
# zones_west_yorkshire = pct::get_pct_zones("west-yorkshire")
# zones_leeds_official = zones_west_yorkshire %>% filter(lad_name == "Leeds")
## ---- eval=FALSE--------------------------------------------------------------
# leeds_centroid = tmaptools::geocode_OSM(q = "Leeds", as.sf = TRUE)
## ---- echo=FALSE, eval=FALSE--------------------------------------------------
# saveRDS(zones_leeds_official, "zones_leeds_official.Rds")
# piggyback::pb_upload("zones_leeds_official.Rds")
# piggyback::pb_download_url("zones_leeds_official.Rds")
# saveRDS(zones_leeds_zb, "zones_leeds_zb.Rds")
# piggyback::pb_upload("zones_leeds_zb.Rds")
## -----------------------------------------------------------------------------
# leeds_centroid = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/leeds_centroid.Rds"))
# zones_leeds_official = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/zones_leeds_official.Rds"))
# zone_outline = zones_leeds_official %>%
# sf::st_buffer(dist = 0.0001) %>%
# sf::st_union()
# zones_leeds_zb = zb_zone(x = zone_outline, point = leeds_centroid)
# tm_shape(zones_leeds_zb) + tm_borders() +
# tm_text("label")
## -----------------------------------------------------------------------------
# city_name = "Erbil"
# city_centre = tmaptools::geocode_OSM(city_name, as.sf = TRUE)
# zones_erbil = zb_zone(point = city_centre, n_circles = 5)
# tm_shape(zones_erbil) + tm_borders() +
# tm_text("label") +
# tm_basemap(server = leaflet::providers$OpenStreetMap)
# # zb_view(zones_erbil)
## -----------------------------------------------------------------------------
# city_name = "Dhaka"
# city_centre = tmaptools::geocode_OSM(city_name, as.sf = TRUE)
# zones_dhaka = zb_zone(point = city_centre, n_circles = 5)
# tm_shape(zones_dhaka) + tm_borders() +
# tm_text("label") +
# tm_basemap(server = leaflet::providers$OpenStreetMap)
## ---- eval=FALSE, echo=FALSE--------------------------------------------------
# # Aim: get the largest cities in the world
# cities_worldwide = rnaturalearth::ne_download(scale = 10, type = "populated_places")
#
# city_names = c(
# "Dheli",
# "Mexico City",
# "Tokyo",
# "Beijing",
# )
#
# city_name = "Dheli"
# city_centre = tmaptools::geocode_OSM(city_name, as.sf = TRUE)
# zones_dhaka = zb_zone(point = city_centre, n_circles = 5)
# tm_shape(zones_dhaka) + tm_borders() +
# tm_text("label")
## ---- eval=FALSE--------------------------------------------------------------
# devtools::install_github("itsleeds/geofabrik")
# library(geofabrik)
# leeds_shop_polygons = get_geofabrik(leeds_centroid, layer = "multipolygons", key = "shop", value = "supermarket")
## ---- eval=FALSE, echo=FALSE--------------------------------------------------
# saveRDS(leeds_shop_polygons, "leeds_shop_polygons.Rds")
# piggyback::pb_upload("leeds_shop_polygons.Rds")
# piggyback::pb_download_url("leeds_shop_polygons.Rds")
# saveRDS(leeds_centroid, "leeds_centroid.Rds")
# piggyback::pb_upload("leeds_centroid.Rds")
# piggyback::pb_download_url("leeds_centroid.Rds")
# # leeds_roads = get_geofabrik(name = leeds_centroid)
# # leeds_shop_points = get_geofabrik(leeds_centroid, layer = "points", key = "amenity", value = "shop")
## -----------------------------------------------------------------------------
# leeds_shop_polygons = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/leeds_shop_polygons.Rds"))
# z = zb_zone(zones_leeds_official, point = leeds_centroid, n_circles = 5)
# z_supermarkets = aggregate(leeds_shop_polygons["shop"], z, FUN = length)
# tm_shape(z_supermarkets) +
# tm_polygons("shop", alpha = 0.5, title = "N. Supermarkets")
## -----------------------------------------------------------------------------
#
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/inst/doc/hackathon.R |
---
title: "Zonebuilder hackathon"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Zonebuilder hackathon}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
eval = FALSE,
collapse = TRUE,
comment = "#>"
)
```
## Introduction
**zonebuilder** is a package for exploring zoning systems.
This document contains ideas on challenges that can be tackled using zoning systems, example code to get started and suggestions of how to get involved.
## Setup
To ensure that you have the necessary software installed, try running the following lines of code in an R console (you need the latest version of the package):
```{r, eval=FALSE}
remotes::install_github("zonebuilders/zonebuilder")
remotes::install_github("itsleeds/pct")
```
```{r setup}
library(zonebuilder)
library(dplyr)
library(tmap)
tmap_mode("view")
```
Ideas for hackathon:
- Explore results from automated zoning of a range of cities
- How many supermarkets in different zones of the city?
- Explore how mode and distance of travel changes depending on city zones
- Explore how to calculate traveltimes from zone to zone for different travel modalities
- Explore how traveltimes from cityzones to citycentre for different modalities for multiple cities affect number of commuters
- Find a datadriven method for defining the city centre (e.g. density of adresses, population density, building date, number of companies, number of nodes of the road infrastructure).
- Number of houses vs estimated population in different zones using UK data
- Demonstrate aggregagation of OD data into zoning system
```{r, eval=FALSE}
zones_west_yorkshire = pct::get_pct_zones("west-yorkshire")
zones_leeds_official = zones_west_yorkshire %>% filter(lad_name == "Leeds")
```
```{r, eval=FALSE}
leeds_centroid = tmaptools::geocode_OSM(q = "Leeds", as.sf = TRUE)
```
```{r, echo=FALSE, eval=FALSE}
saveRDS(zones_leeds_official, "zones_leeds_official.Rds")
piggyback::pb_upload("zones_leeds_official.Rds")
piggyback::pb_download_url("zones_leeds_official.Rds")
saveRDS(zones_leeds_zb, "zones_leeds_zb.Rds")
piggyback::pb_upload("zones_leeds_zb.Rds")
```
You can get and plot the output of the preceding code chunk with:
```{r}
leeds_centroid = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/leeds_centroid.Rds"))
zones_leeds_official = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/zones_leeds_official.Rds"))
zone_outline = zones_leeds_official %>%
sf::st_buffer(dist = 0.0001) %>%
sf::st_union()
zones_leeds_zb = zb_zone(x = zone_outline, point = leeds_centroid)
tm_shape(zones_leeds_zb) + tm_borders() +
tm_text("label")
```
## Explore results of automated zoning system
### Generate zones for different cities
The zoning systems works well to represent cities that have a clear centre (monocentric cities) with city zones connected by radial and circular orbital routes, such as Erbil:
```{r}
city_name = "Erbil"
city_centre = tmaptools::geocode_OSM(city_name, as.sf = TRUE)
zones_erbil = zb_zone(point = city_centre, n_circles = 5)
tm_shape(zones_erbil) + tm_borders() +
tm_text("label") +
tm_basemap(server = leaflet::providers$OpenStreetMap)
# zb_view(zones_erbil)
```
The zoning system works less well for other cities, e.g. cities with asymetric and polycentric urban morphologies such as Dhakar, shown below.
```{r}
city_name = "Dhaka"
city_centre = tmaptools::geocode_OSM(city_name, as.sf = TRUE)
zones_dhaka = zb_zone(point = city_centre, n_circles = 5)
tm_shape(zones_dhaka) + tm_borders() +
tm_text("label") +
tm_basemap(server = leaflet::providers$OpenStreetMap)
```
```{r, eval=FALSE, echo=FALSE}
# Aim: get the largest cities in the world
cities_worldwide = rnaturalearth::ne_download(scale = 10, type = "populated_places")
city_names = c(
"Dheli",
"Mexico City",
"Tokyo",
"Beijing",
)
city_name = "Dheli"
city_centre = tmaptools::geocode_OSM(city_name, as.sf = TRUE)
zones_dhaka = zb_zone(point = city_centre, n_circles = 5)
tm_shape(zones_dhaka) + tm_borders() +
tm_text("label")
```
### How many supermarkets in different zones of the city?
```{r, eval=FALSE}
devtools::install_github("itsleeds/geofabrik")
library(geofabrik)
leeds_shop_polygons = get_geofabrik(leeds_centroid, layer = "multipolygons", key = "shop", value = "supermarket")
```
```{r, eval=FALSE, echo=FALSE}
saveRDS(leeds_shop_polygons, "leeds_shop_polygons.Rds")
piggyback::pb_upload("leeds_shop_polygons.Rds")
piggyback::pb_download_url("leeds_shop_polygons.Rds")
saveRDS(leeds_centroid, "leeds_centroid.Rds")
piggyback::pb_upload("leeds_centroid.Rds")
piggyback::pb_download_url("leeds_centroid.Rds")
# leeds_roads = get_geofabrik(name = leeds_centroid)
# leeds_shop_points = get_geofabrik(leeds_centroid, layer = "points", key = "amenity", value = "shop")
```
We have pre-saved the results as follows:
```{r}
leeds_shop_polygons = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/leeds_shop_polygons.Rds"))
z = zb_zone(zones_leeds_official, point = leeds_centroid, n_circles = 5)
z_supermarkets = aggregate(leeds_shop_polygons["shop"], z, FUN = length)
tm_shape(z_supermarkets) +
tm_polygons("shop", alpha = 0.5, title = "N. Supermarkets")
```
<!--  -->
#### Explore how mode and distance of travel changes depending on city zones
```{r}
```
Robin to create UK example
#### Demo Dutch cities and commuting
See [demo Dutch cities vignette](https://zonebuilders.github.io/zonebuilder/articles/demo_dutch_cities.html)
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/inst/doc/hackathon.Rmd |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
eval = FALSE,
echo = FALSE,
collapse = TRUE,
comment = "#>",
message = FALSE
)
library(zonebuilder)
library(tmap)
library(dplyr)
## ---- eval=FALSE--------------------------------------------------------------
# # To uses josis template:
# remotes::install_github("robinlovelace/rticles", ref = "josis")
# refs = RefManageR::ReadZotero(group = "418217", .params = list(collection = "8S8LR8TK", limit = 100))
# RefManageR::WriteBib(refs, "vignettes/references.bib")
# # Set-up notes for pdf version
# # convert .tex to .md
# # system("pandoc -s -r latex paper.tex -o paper.md")
# # # copy josis specific files and ignore them
# # f = list.files("~/other-repos/rticles/inst/rmarkdown/templates/josis/skeleton", pattern = "josis", full.names = TRUE)
# # file.copy(f, "vignettes")
# rmarkdown::render(input = "vignettes/paper.Rmd", output_file = "../zonebuilder-paper.pdf")
# browseURL("zonebuilder-paper.pdf")
# piggyback::pb_upload("zonebuilder-paper.pdf")
## ----options, fig.cap="Illustration of ideas explored in the lead-up to the development of the ClockBoard zoning system, highlighting the incremental and iterative evolution of the approach.", out.width="32%", fig.show='hold'----
# # z1 = zb_zone(x = london_c(), n_segments = 1)
# # m1 = qtm(z1, title = "(A) Concentric Annuli")
# # sf::sf_use_s2(use_s2 = FALSE)
# z1 = zb_zone(london_c(), n_segments = 1, distance_growth = 0)
# z1_areas = sf::st_area(z1)
# z1_areas_relative = as.numeric(z1_areas / z1_areas[1])
# zb_plot(z1, title = "(A) Concentric Annuli")
# zb_plot(zb_segment(london_c(), n_segments = 12), title = "(B) Clock segments")
# qtm(zb_zone(london_c(), n_segments = z1_areas_relative, labeling = "clock", distance_growth = 0), title = "(C) Equal area zones", fill = NULL)
## ----t1-----------------------------------------------------------------------
# txt = "Number of rings,Diameter across (km),Area (sq km)
# 1,1,2,3.14
# 2,2,6,28.27
# 3,3,12,113.10
# 4,4,20,314.16
# 5,5,30,706.86
# 6,6,42,1385.44
# 7,7,56,2463.01
# 8,8,72,4071.50
# 9,9,90,6361.73"
# t1 = read.csv(text = txt, check.names = FALSE)
# knitr::kable(t1, booktabs = TRUE, caption = "Key attributes of first 9 rings used in the ClockBoard zoning system.")
## ---- fig.width=7, message=FALSE, warning=FALSE-------------------------------
# # download preprocessed data (processing script /data-raw/crashes.R)
# df = readRDS(gzcon(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/ksi_bkm_zone.rds")))
# uk = readRDS(gzcon(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/uk.rds")))
# thames = readRDS(gzcon(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/thames.rds")))
#
# # filter: set zones with less than 10,000 km of cycling per yer to NA
# df_filtered = df %>%
# mutate(ksi_bkm = ifelse((bkm_yr * 1e09) < 2e04, NA, ksi_bkm))
#
# tmap_mode("plot")
# tm_shape(uk) +
# tm_fill(col = "white") +
# tm_shape(df_filtered, is.master = TRUE) +
# tm_polygons("ksi_bkm", breaks = c(0, 1000, 2500, 5000, 7500, 12500), textNA = "Too little cycling", title = "Killed and seriously injured cyclists\nper billion cycled kilometers") +
# tm_facets(by = "city", ncol=4) +
# tm_shape(uk) +
# tm_borders(lwd = 1, col = "black", lty = 3) +
# tm_shape(thames) +
# tm_lines(lwd = 1, col = "black", lty = 3) +
# tm_layout(bg.color = "lightblue")
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/inst/doc/paper.R |
---
title: "ClockBoard: a zoning system for urban analysis"
bibliography: references.bib
# # For R package vignette
output: bookdown::html_vignette2
vignette: >
%\VignetteIndexEntry{ClockBoard: a zoning system for urban analysis}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
pkgdown:
as_is: true
set_null_theme: false
# # For paper: Comment the above and uncomment below for LaTeX version
# output:
# bookdown::pdf_book:
# base_format: rticles::josis_article
# author1: Robin Lovelace
# affil1: University of Leeds
# author2: Martijn Tennekes
# affil2: Department, Institution, Country TBC
keywords: "zoning, areal data, zoning systems, modifiable area unit problem"
abstract: |
Zones are the building blocks of urban analysis. Fields ranging from
demographics to transport planning routinely use zones --- spatially
contiguous areal units that break-up continuous space into discrete
chunks --- as the foundation for diverse analysis techniques. Key
methods such as origin-destination analysis and choropleth mapping
rely on zones with appropriate sizes, shapes and coverage. However,
existing zoning systems are sub-optimal in many urban analysis
contexts, for three main reasons: 1) administrative zoning systems are
often based on somewhat arbitrary factors; 2) evidence-based zoning
systems are often highly variable in size and shape, reducing their
utility for inter-city comparison; and 3) the resolution of existing
zoning systems is often too low for certain urban analysis, especially
in low income nations. To tackle these three key issues we developed a
flexible, open and scalable solution: the ClockBoard zoning system.
ClockBoard consists of 12 segments divided by concentric rings of
increasing distance, creating a consistent visual frame of reference
for cities that is reminiscent of a clock and a dartboard. This paper
outlines the design, potential uses and merits of the ClockBoard
zoning system and discusses future avenues for research and
development of new zoning systems based on the experience.
runningtitle: "The ClockBoard Zoning System"
# Explore visual editor options: sentence based line breaks...
# changing annoying arbitrary character based linebreaks:
editor_options:
markdown:
wrap: sentence
# That automagically reformatted the Rmd doc!
# Result: it works!
---
```{r, include = FALSE}
knitr::opts_chunk$set(
eval = FALSE,
echo = FALSE,
collapse = TRUE,
comment = "#>",
message = FALSE
)
library(zonebuilder)
library(tmap)
library(dplyr)
```
```{r, eval=FALSE}
# To uses josis template:
remotes::install_github("robinlovelace/rticles", ref = "josis")
refs = RefManageR::ReadZotero(group = "418217", .params = list(collection = "8S8LR8TK", limit = 100))
RefManageR::WriteBib(refs, "vignettes/references.bib")
# Set-up notes for pdf version
# convert .tex to .md
# system("pandoc -s -r latex paper.tex -o paper.md")
# # copy josis specific files and ignore them
# f = list.files("~/other-repos/rticles/inst/rmarkdown/templates/josis/skeleton", pattern = "josis", full.names = TRUE)
# file.copy(f, "vignettes")
rmarkdown::render(input = "vignettes/paper.Rmd", output_file = "../zonebuilder-paper.pdf")
browseURL("zonebuilder-paper.pdf")
piggyback::pb_upload("zonebuilder-paper.pdf")
```
# Introduction
Zoning systems have long been used for practical purposes.
They have been integral to land ownership, rents and urban policies for centuries, forming the basis of a range of social and economic practices.
Historical examples highlighting the importance of zone layouts include 'tithe maps' determining land ownership and taxes in 18th Century England [@bryant_worcestershire_2007] and legally defined urban land use zones to tame chaotic urban growth expansion in the exploding US cities in the early 1900s [@baker_zoning_1925].
In the 19th Century, zoning systems became known for political reasons, with 'gerrymandering' entering public discourse and academic research following Elbridge Gerry's apparent attempt to gain political advantage by creating an electoral district in an odd shape that was said to resemble a salamander (hence the term's name) in 1812 [@orr_persistence_1969].
The gerrymandering problem has since been the topic of countless academic papers.
The gerrymandering problem (in itself is a manifestation of the modifiable area unit problem) can be described as a mathematical optimization problem: "$n$ units are grouped into $k$ zones such that some cost function is optimized, subject to constraints on the topology of the zones" [@chou_taming_2006]. In fact, this problem is concise definition of the broader "zoning problem" that starts from the assumption that zones are to be composed of one or more basic statistical units (BSUs). Although the range of outcomes is a finite combinatorial optimisation problem (which combination of BSU-zone aggregations satisfy/optimise some pre-determined criteria) the problem is still hard: "there are a tremendously large number of alternative partitions, a similar number of different results, and only a slightly smaller number of different interpretations" [@openshaw_optimal_1977].
The problem that we tackle in this paper is different, however: it is the division of geographic space into zones **starting from a blank slate**, without reference to pre-existing areal units.
The focus of much preceding zoning research on BSU partitioning can be explained by the fact that much geographic data available to academics comes in 'pre-packaged' small areas and because creating zones from nothing is a harder problem.
We disagree with the statement that "existence of individual or non-spatially aggregated data is rare in geography" [@openshaw_optimal_1977], pointing to car crashes, shop locations, species identification data and dozens of other phenomena that can be understood as 'point pattern processes'.
And with advances in computer hardware and software, the 'starting from scratch' approach to zoning system is more feasible.
A number of approaches have tackled the question of how to best divide up geographical space for analysis and visualisation purposes, with a variety of applications.
Functional zone classification is common in the field of remote sensing and associated sub-fields involved in analysing and classifying raster datasets [@ciglic_evaluating_2019; @hesselbarth_landscapemetrics_2019].
While such pixel-based approaches can yield complex and flexible results (depending on the geographic resolution of the input data), they are still constrained by the building blocks of the pixels, which can be seen as a particular type of areal unit, a uniformly sized and shaped BSU.
In this paper we are interested in the division of *continuous space* into completely new areal systems.
This has been done using contour lines to represent lines of equal height, and the concept's generalisation to lines of equal journey time from locations (isochrones) [@long_modeling_2018], population density (isopleths) [@lin_cartographic_2017] and model parameters which continuous geographical space [@paez_exploring_2006].
The boundaries created by these various 'iso' maps are 'procedurally generated' areal units of the type that this paper focuses, but their variability and often irregular shapes make them impractical for many types of urban analysis.
Procedural generation, which involves the generation of data through a repeated and sometimes randomised computational process has long been used to represent physical phenomena [@onrust_ecologically_2017]. The approach has been used to generate spatial entities including roads [@galin_procedural_2010], indoor layouts of buildings [@anderson_augmented_2018] and urban layouts [@mustafa_procedural_2020]. Algorithms have also been developed to place linear features on a map, as illustrated by an algorithm that optimizes the placement of overlapping linear features for cartographic visualisation [@teulade-denantes_routes_2015].
However, no previous research has demonstrated the creation of zoning systems specifically for the purposes of urban analysis.
New visualisation techniques are needed to represent new (or newly quantifiable) concepts and emerging datasets (such as OpenStreetMap) in urban analysis analysis .
The visualisation of direction has been driven by new navigational requirements and datasets, with circular compasses and displays common in land and sea navigational systems since the mid 1900s [@honick_pictorial_1967]. Circular visualisation techniques, in the form of rose diagrams, were used in a more recent study to indicate the most common road directions relative to North [@boeing_spatial_2021]. The resulting visualisations are attractive and easy to interpret, but are not geographical, in the sense that they cannot meaningfully be overlaid on mapped data. The approach we present in this paper is more closely analogous to 'grid sample' approaches used in ecological and population research [@hirzel_which_2002] . Historically, environmental researchers have used rectangular (and usually square) grids to divide up space and decide sampling strategies. Limitations associated with this simplistic strategy have been documented since at least the 1960s, with a prominent paper on geographic sampling strategies outlining advantages and disadvantages of simple random, systematic and stratified sampling techniques in 1967 [@holmes_problems_1967]. Starting with data at the level of raster grid cells and BSUs, a related approach is to sample from within available 'pixels' to generate a representative sample [@thomson_gridsample_2017].
Unlike BSU based zoning systems, grid sampling strategies require no prior zones.
Unlike 'procedurally generated' areas, grid-based strategies generate areal units of consistent sizes and shapes.
However, grid-based strategies are limited in their applicability to urban research because they seldom generate geographically contiguous results and do not account for the strong tendency of human settlements to have a (more-or-less clearly demarcated) central location with higher levels of activity.
Pre-existing zoning systems are often based on administrative regions.
Although those zoning systems are usually in line with the hierarchical organization structure of governmental organizations, and therefore may work well for policy making, there are a couple of downsides to using such zoning systems.
First of all, since a city and its politics change over time, the administrative regions often change accordingly.
This make it harder to do time series analysis.
Since the administrative regions have heterogeneous characteristics, for instance population size, area size, proximity to the city centre, comparing different administrative regions within a city is not straightforward.
Moreover, comparing administrative regions across cities is even more challenging since average scale of an administrative region may vary a lot across cities.
Grid tiles are popular in spatial statistics for a number of reasons.
Most importantly the tiles have a constant area size, which makes comparably possible.
Moreover, the grid tiles will not change over time like administrative regions.
However, one downside is that a grid requires a coordinate reference system (CRS), enforcing (approximately) equal area size.
For continents or large countries, a CRS is always a compromise.
Therefore, the areas of the tiles may vary, or the shape of the tiles may be sheared or warped.
Another downside from a statistical point of view is that population densities are not uniform within a urban area, but concentrated around a centre.
As a consequence, high resolution statistics is preferable in the dense areas, i.e. the centre, and lower resolution statistics in other parts of the city.
That is the reason why administrative regions are often smaller in dense areas.
The approach presented in this paper aims to minimise input data requirements, generate consistent zones comparable between widely varying urban systems, and provide geographically contiguous areal units.
The motivations for generating a new zoning system and use cases envisioned include:
- Locating cities.
Automated zoning systems based on a clear centrepoint can support map interpretation by making it immediately clear where the city centre is, and what the scale of the city is.
- Reference system of everyday live.
The zone name contains information about the distance to the center as well as the cardinal direction.
E.g "I live in C12 and work in B3." or "The train station is in the center and our hotel is in B7".
Moreover, the zones indicate whether walking and cycling is a feasibly option regarding the distance.
- Aggregation for descriptive statistics / comparability over cities.
By using the zoning system to aggregate statistics (e.g. on population density, air quality, bicycle use, number of dwellings), cities can easily be compared to each other.
- Modelling urban cities.
The zoning system can be used to model urban mobility.
The paper is structured as follows.
The next section outlines the approach, which requires only 2 inputs: the coordinates of the central place in the urban system under investigation, and the minimum radius from that central point that the zoning system should extend.
Section 3 describes a number of potential applications, ranging from rudimentary navigation and location identification to mobility analysis.
Finally, in Section 4, we discuss limitations of the approach and possible directions of research and development to generate additional zoning systems for urban analysis.
# The ClockBoard zoning system
The aim of the ClockBoard zoning system outlined in this paper is to tackle the issues outlined above, with an approach that is free, open, reproducible and easy to extend.
Specifically, we developed the system to considering urban analysis research and visualisation requirements, leading to the following high-level criteria.
Zoning systems for urban analysis should:
- contain intuitively named zones, enabling public communication of research, e.g. with reference common perceptions of space in terms of distance from the city centre and direction relative to North
- be easy to visualise without too many (100+) or too few (less than 10) zones
- include zones of consistent and useful sizes, for example with zone areas increasing with distance from the urban centres to reflect relatively high densities in central locations
- be 'scale agnostic', capable of representing a range of urban forms ranging from extensive cities such as Mexico City to compact cities such as Hong Kong
- be extensible and based on open source software, enabling others to create alternative zoning systems suited to diverse needs
After a process of iteration in which we considered many zoning options (some of which are illustrated in Figure \@ref(fig:options)) we settled on a system that we have called 'ClockBoard' (for reasons that will become apparent) and which has the following characteristics.
The zoning system is based on **concentric rings** --- formally called 'concentric annuli' --- which emphasise central locations and have been used to explore the relationships between the characteristics of 'focal trees' and surrounding trees in ecological research [@wills_persistence_2016], as shown in Figure \@ref(fig:options) (a).
The zoning system is based on **segments** defined by radial lines emanating from the central point of the settlement (or other geographic entity) to be divided into zones, as shown in Figure \@ref(fig:options) (b).
From that point, we experimented with a range of ways of dividing the concentric annuli into different zones by modifying the distances between rings (the annuli borders) and the number of segments per annulus (see Figure \@ref(fig:options) c).
It became apparent that zoning systems based on the two organising principles (and modifiable parameters) of concentric annuli and segments held promise, but selecting appropriate settings for each was key to the development of a zoning system that would meet the criteria outlined above.
<!-- commented out the next sentence as it's kind of obvious -->
<!-- The key parameters of annuli distances and number of segments are discussed below. -->
```{r options, fig.cap="Illustration of ideas explored in the lead-up to the development of the ClockBoard zoning system, highlighting the incremental and iterative evolution of the approach.", out.width="32%", fig.show='hold'}
# z1 = zb_zone(x = london_c(), n_segments = 1)
# m1 = qtm(z1, title = "(A) Concentric Annuli")
# sf::sf_use_s2(use_s2 = FALSE)
z1 = zb_zone(london_c(), n_segments = 1, distance_growth = 0)
z1_areas = sf::st_area(z1)
z1_areas_relative = as.numeric(z1_areas / z1_areas[1])
zb_plot(z1, title = "(A) Concentric Annuli")
zb_plot(zb_segment(london_c(), n_segments = 12), title = "(B) Clock segments")
qtm(zb_zone(london_c(), n_segments = z1_areas_relative, labeling = "clock", distance_growth = 0), title = "(C) Equal area zones", fill = NULL)
```
## Annuli distances
The radius of each annuli in the zoning system can be incremented by a fixed amount, as shown in previous figures.
In cases where high geographic resolution is important near the centre of the study region, such as when designing transport systems into the central zone of a city planning, increasing distances between each radius may be desirable.
We experimented with various ways of incrementing the annuli width and suggest linear increases in width as a sensible default for a simple zoning system.
This linear growth leads to distances between each annuli boundary increasing in line with the steps in the [triangular number sequence](https://en.wikipedia.org/wiki/Triangular_number) [@ross_dicuil_2019], as outlined in Table \@ref(tab:t1).
```{r t1}
txt = "Number of rings,Diameter across (km),Area (sq km)
1,1,2,3.14
2,2,6,28.27
3,3,12,113.10
4,4,20,314.16
5,5,30,706.86
6,6,42,1385.44
7,7,56,2463.01
8,8,72,4071.50
9,9,90,6361.73"
t1 = read.csv(text = txt, check.names = FALSE)
knitr::kable(t1, booktabs = TRUE, caption = "Key attributes of first 9 rings used in the ClockBoard zoning system.")
```
## Number of segments
What it looks like with 4 segments.
The ClockBoard zoning system has 12 segments, representing a compromise between specificity of zone identification and ease of comprehension (imagine a system with 256 segments and saying "I'm in zone E173"!) and understanding:
the 12 segments of a clock face are well understood.
## City extents
# Applications
## Navigation and location
## Exploring city scale data
univariate description
\- Population density in London - Social (e.g. religion) and demographic distributions
## Inter-city statistical comparison
```{r, fig.width=7, message=FALSE, warning=FALSE}
# download preprocessed data (processing script /data-raw/crashes.R)
df = readRDS(gzcon(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/ksi_bkm_zone.rds")))
uk = readRDS(gzcon(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/uk.rds")))
thames = readRDS(gzcon(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/thames.rds")))
# filter: set zones with less than 10,000 km of cycling per yer to NA
df_filtered = df %>%
mutate(ksi_bkm = ifelse((bkm_yr * 1e09) < 2e04, NA, ksi_bkm))
tmap_mode("plot")
tm_shape(uk) +
tm_fill(col = "white") +
tm_shape(df_filtered, is.master = TRUE) +
tm_polygons("ksi_bkm", breaks = c(0, 1000, 2500, 5000, 7500, 12500), textNA = "Too little cycling", title = "Killed and seriously injured cyclists\nper billion cycled kilometers") +
tm_facets(by = "city", ncol=4) +
tm_shape(uk) +
tm_borders(lwd = 1, col = "black", lty = 3) +
tm_shape(thames) +
tm_lines(lwd = 1, col = "black", lty = 3) +
tm_layout(bg.color = "lightblue")
```
<!-- {#fig:cyclAccidents width="\\textwidth"} -->
## Mobility analysis
# Discussion and conclusions
Pros:
- Most cities have a radial plan around a central area, which is often a historic centre or a central business area. Typically, this centre is not only the geographic centre, but also the busiest area in terms of daytime population. Often the main nodes in the urban transport network are also located in or near the city centre. Note that many cities already consist of concentric rings, separated by a ring road. (See also <https://en.wikipedia.org/wiki/City_centre> which describes the centre as the heart of the city)
Cons:
- Some cities have two or more centres.
Many cities have a central business discrict or financial discrict which not always coinsides with the historic city centre.
- In urban areas with nearby cities, it may not always be clear where one cities ends and another begins.
Also, small cities may be located within the metropolitan area of a larger city (e.g. the Dutch cities The Hague/Delft)
<!-- World cities todo: create appendix-->
<!-- ============ -->
<!-- {#fig:cities1 -->
<!-- width="\\textwidth"} -->
<!-- {#fig:cities2 width="\\textwidth"} -->
<!-- {#fig:cities3 width="\\textwidth"} -->
<!-- # Old paper -->
<!-- # Introduction -->
<!-- ### Current situation -->
<!-- Statistics, policy making and transport planning are often based on administrative regions. However, there are a couple of downsides to using administrative regions. First of all, since a city and its politics change over time, the administrative regions often change accordingly. This make it harder to do time series analysis. Since the administrative regions have heterogeneous characteristics, for instance population size, area size, proximity to the city centre, comparing different administrative regions within a city is not straightforward. Moreover, comparing administrative regions across cities is even more challenging since average scale of an administrative region may vary a lot across cities. -->
<!-- Another downside from a statistical point of view is that population densities are not uniform within a urban area, but concentrated around a centre. As a consequence, high resolution statistics is preferable in the dense areas, i.e. the centre, and lower resolution statitics in other parts of the city. That is the reason why administrative regions are often smaller in dense areas. -->
<!-- ### Why is it needed/handy? -->
<!-- * Locating cities. By having a ClockBoard zoning system for a city, it is immediate clear where the city centre is, and what the scale of the city is. -->
<!-- * Reference system of everyday live. The zone name contains information about the distance to the center as well as the cardinal direction. E.g "I live in C12 and work in B3." or "The train station is in the center and our hotel is in B7". Moreover, the zones indicate whether walking and cycling is a feasibly option regarding the distance. -->
<!-- * Aggregation for descriptive statistics / comparability over cities. By using the zoning system to aggregate statistics (e.g. on population density, air quality, bicycle use, number of dwellings), cities can easily be compared to each other. -->
<!-- * Modelling urban cities. The zoning system can be used to model urban mobility. -->
<!-- ### Arguments / discussion -->
<!-- Pros: -->
<!-- * Most cities have a radial plan around a central area, which is often a historic centre or a central business area. -->
<!-- Typically, this centre is not only the geographic centre, but also the busiest area in terms of daytime population. Often the main nodes in the urban transport network are also located in or near the city centre. Note that many cities already consist of concentric rings, separated by a ring road. (See also https://en.wikipedia.org/wiki/City_centre which describes the centre as the heart of the city) -->
<!-- Cons: -->
<!-- * Some cities have two or more centres. Many cities have a central business discrict or financial discrict which not always coinsides with the historic city centre. -->
<!-- * In urban areas with nearby cities, it may not always be clear where one cities ends and another begins. Also, small cities may be located within the metropolitan area of a larger city (e.g. the Dutch cities The Hague/Delft) -->
<!-- Comparing two cities can be difficult due to incomparable zoning systems. -->
<!-- Take the example of city A is provided in a detailed zoning system composed of hundreds of small, irregular areas while city B is composed only of 9 irregular zones -->
<!-- TODO: reference to https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0050606 -->
# References
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/inst/doc/paper.Rmd |
library(tidyverse)
library(sf)
cities = rnaturalearth::ne_download("large", type = "populated_places", returnclass = "sf")
bristol_midpoint = cities %>% filter(NAME == "Bristol") %>%
filter(POP_MAX == max(POP_MAX))
mapview::mapview(bristol_midpoint)
bristol_midpoint_aeq = bristol_midpoint %>%
st_transform(stplanr::geo_select_aeq(.))
mapview::mapview(bristol_midpoint_aeq)
z = zb_zone(point = bristol_midpoint_aeq, n_circles = 20)
library(tmap)
tmap_mode("view")
qtm(z)
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/inst/test-cities.R |
---
title: "Combining zoning systems"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Combining zoning systems}
%\VignetteEncoding{UTF-8}
%\VignetteEngine{knitr::rmarkdown}
editor_options:
chunk_output_type: console
---
```{r, include = FALSE}
knitr::opts_chunk$set(
eval = FALSE,
collapse = TRUE,
comment = "#>"
)
```
This vignettes demonstrates possibilities when zoning systems from different cities meet.
It raises the question: how should different systems be combined geographically?
You need the latest version of the package:
```{r, eval=FALSE}
remotes::install_github("zonebuilders/zonebuilder")
```
For this demo, we need the following libraries:
```{r setup}
library(zonebuilder) # for the zoning system
library(sf) # for processing spatial data
library(dplyr) # for processing general data
library(tmap) # for visualizing spatial data
```
We will apply the zoning system to the main Dutch cities, and analyse commuting patterns between the zones. The data can be read as follows:
```{r}
NLD_cities = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/NLD_cities.Rds"))
NLD_wijk_od = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/NLD_wijk_od.Rds"))
NLD_wijk_centroids = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/NLD_wijk_centroids.Rds"))
```
Let's take a look at the `NLD_cities` data:
```{r}
NLD_cities %>% arrange(desc(population))
```
...and plot it on an interactive map:
```{r}
tmap_mode("view") # enable interactive mode in tmap
qtm(NLD_cities, symbols.size = "population")
```
The following code chunk generated zones for the Dutch cities:
```{r}
zbs = do.call(rbind, lapply(1:nrow(NLD_cities), function(i) {
ci = NLD_cities[i, ]
# Amsterdam 5, Eindhoven-Rotterdam 4, Roermond-Zeeland 2, others 3
nrings = ifelse(ci$population < 60000, 2,
ifelse(ci$population < 220000, 3,
ifelse(ci$population < 800000, 4, 5)))
zb = zb_zone(x = ci, n_circles = nrings) %>%
mutate(name = ci$name,
labelplus = paste(ci$name, label, sep = "_"))
zb
}))
```
```{r}
tm_basemap("OpenStreetMap") +
tm_shape(zbs) +
tm_polygons(col = "circle_id", id = "labelplus", style = "cat", palette = "YlOrBr", alpha = 0.7) +
tm_scale_bar()
```
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/vignettes/demo_dutch_cities.Rmd |
---
title: "Zonebuilder hackathon"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Zonebuilder hackathon}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
eval = FALSE,
collapse = TRUE,
comment = "#>"
)
```
## Introduction
**zonebuilder** is a package for exploring zoning systems.
This document contains ideas on challenges that can be tackled using zoning systems, example code to get started and suggestions of how to get involved.
## Setup
To ensure that you have the necessary software installed, try running the following lines of code in an R console (you need the latest version of the package):
```{r, eval=FALSE}
remotes::install_github("zonebuilders/zonebuilder")
remotes::install_github("itsleeds/pct")
```
```{r setup}
library(zonebuilder)
library(dplyr)
library(tmap)
tmap_mode("view")
```
Ideas for hackathon:
- Explore results from automated zoning of a range of cities
- How many supermarkets in different zones of the city?
- Explore how mode and distance of travel changes depending on city zones
- Explore how to calculate traveltimes from zone to zone for different travel modalities
- Explore how traveltimes from cityzones to citycentre for different modalities for multiple cities affect number of commuters
- Find a datadriven method for defining the city centre (e.g. density of adresses, population density, building date, number of companies, number of nodes of the road infrastructure).
- Number of houses vs estimated population in different zones using UK data
- Demonstrate aggregagation of OD data into zoning system
```{r, eval=FALSE}
zones_west_yorkshire = pct::get_pct_zones("west-yorkshire")
zones_leeds_official = zones_west_yorkshire %>% filter(lad_name == "Leeds")
```
```{r, eval=FALSE}
leeds_centroid = tmaptools::geocode_OSM(q = "Leeds", as.sf = TRUE)
```
```{r, echo=FALSE, eval=FALSE}
saveRDS(zones_leeds_official, "zones_leeds_official.Rds")
piggyback::pb_upload("zones_leeds_official.Rds")
piggyback::pb_download_url("zones_leeds_official.Rds")
saveRDS(zones_leeds_zb, "zones_leeds_zb.Rds")
piggyback::pb_upload("zones_leeds_zb.Rds")
```
You can get and plot the output of the preceding code chunk with:
```{r}
leeds_centroid = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/leeds_centroid.Rds"))
zones_leeds_official = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/zones_leeds_official.Rds"))
zone_outline = zones_leeds_official %>%
sf::st_buffer(dist = 0.0001) %>%
sf::st_union()
zones_leeds_zb = zb_zone(x = zone_outline, point = leeds_centroid)
tm_shape(zones_leeds_zb) + tm_borders() +
tm_text("label")
```
## Explore results of automated zoning system
### Generate zones for different cities
The zoning systems works well to represent cities that have a clear centre (monocentric cities) with city zones connected by radial and circular orbital routes, such as Erbil:
```{r}
city_name = "Erbil"
city_centre = tmaptools::geocode_OSM(city_name, as.sf = TRUE)
zones_erbil = zb_zone(point = city_centre, n_circles = 5)
tm_shape(zones_erbil) + tm_borders() +
tm_text("label") +
tm_basemap(server = leaflet::providers$OpenStreetMap)
# zb_view(zones_erbil)
```
The zoning system works less well for other cities, e.g. cities with asymetric and polycentric urban morphologies such as Dhakar, shown below.
```{r}
city_name = "Dhaka"
city_centre = tmaptools::geocode_OSM(city_name, as.sf = TRUE)
zones_dhaka = zb_zone(point = city_centre, n_circles = 5)
tm_shape(zones_dhaka) + tm_borders() +
tm_text("label") +
tm_basemap(server = leaflet::providers$OpenStreetMap)
```
```{r, eval=FALSE, echo=FALSE}
# Aim: get the largest cities in the world
cities_worldwide = rnaturalearth::ne_download(scale = 10, type = "populated_places")
city_names = c(
"Dheli",
"Mexico City",
"Tokyo",
"Beijing",
)
city_name = "Dheli"
city_centre = tmaptools::geocode_OSM(city_name, as.sf = TRUE)
zones_dhaka = zb_zone(point = city_centre, n_circles = 5)
tm_shape(zones_dhaka) + tm_borders() +
tm_text("label")
```
### How many supermarkets in different zones of the city?
```{r, eval=FALSE}
devtools::install_github("itsleeds/geofabrik")
library(geofabrik)
leeds_shop_polygons = get_geofabrik(leeds_centroid, layer = "multipolygons", key = "shop", value = "supermarket")
```
```{r, eval=FALSE, echo=FALSE}
saveRDS(leeds_shop_polygons, "leeds_shop_polygons.Rds")
piggyback::pb_upload("leeds_shop_polygons.Rds")
piggyback::pb_download_url("leeds_shop_polygons.Rds")
saveRDS(leeds_centroid, "leeds_centroid.Rds")
piggyback::pb_upload("leeds_centroid.Rds")
piggyback::pb_download_url("leeds_centroid.Rds")
# leeds_roads = get_geofabrik(name = leeds_centroid)
# leeds_shop_points = get_geofabrik(leeds_centroid, layer = "points", key = "amenity", value = "shop")
```
We have pre-saved the results as follows:
```{r}
leeds_shop_polygons = readRDS(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/leeds_shop_polygons.Rds"))
z = zb_zone(zones_leeds_official, point = leeds_centroid, n_circles = 5)
z_supermarkets = aggregate(leeds_shop_polygons["shop"], z, FUN = length)
tm_shape(z_supermarkets) +
tm_polygons("shop", alpha = 0.5, title = "N. Supermarkets")
```
<!--  -->
#### Explore how mode and distance of travel changes depending on city zones
```{r}
```
Robin to create UK example
#### Demo Dutch cities and commuting
See [demo Dutch cities vignette](https://zonebuilders.github.io/zonebuilder/articles/demo_dutch_cities.html)
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/vignettes/hackathon.Rmd |
---
title: "ClockBoard: a zoning system for urban analysis"
bibliography: references.bib
# # For R package vignette
output: bookdown::html_vignette2
vignette: >
%\VignetteIndexEntry{ClockBoard: a zoning system for urban analysis}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
pkgdown:
as_is: true
set_null_theme: false
# # For paper: Comment the above and uncomment below for LaTeX version
# output:
# bookdown::pdf_book:
# base_format: rticles::josis_article
# author1: Robin Lovelace
# affil1: University of Leeds
# author2: Martijn Tennekes
# affil2: Department, Institution, Country TBC
keywords: "zoning, areal data, zoning systems, modifiable area unit problem"
abstract: |
Zones are the building blocks of urban analysis. Fields ranging from
demographics to transport planning routinely use zones --- spatially
contiguous areal units that break-up continuous space into discrete
chunks --- as the foundation for diverse analysis techniques. Key
methods such as origin-destination analysis and choropleth mapping
rely on zones with appropriate sizes, shapes and coverage. However,
existing zoning systems are sub-optimal in many urban analysis
contexts, for three main reasons: 1) administrative zoning systems are
often based on somewhat arbitrary factors; 2) evidence-based zoning
systems are often highly variable in size and shape, reducing their
utility for inter-city comparison; and 3) the resolution of existing
zoning systems is often too low for certain urban analysis, especially
in low income nations. To tackle these three key issues we developed a
flexible, open and scalable solution: the ClockBoard zoning system.
ClockBoard consists of 12 segments divided by concentric rings of
increasing distance, creating a consistent visual frame of reference
for cities that is reminiscent of a clock and a dartboard. This paper
outlines the design, potential uses and merits of the ClockBoard
zoning system and discusses future avenues for research and
development of new zoning systems based on the experience.
runningtitle: "The ClockBoard Zoning System"
# Explore visual editor options: sentence based line breaks...
# changing annoying arbitrary character based linebreaks:
editor_options:
markdown:
wrap: sentence
# That automagically reformatted the Rmd doc!
# Result: it works!
---
```{r, include = FALSE}
knitr::opts_chunk$set(
eval = FALSE,
echo = FALSE,
collapse = TRUE,
comment = "#>",
message = FALSE
)
library(zonebuilder)
library(tmap)
library(dplyr)
```
```{r, eval=FALSE}
# To uses josis template:
remotes::install_github("robinlovelace/rticles", ref = "josis")
refs = RefManageR::ReadZotero(group = "418217", .params = list(collection = "8S8LR8TK", limit = 100))
RefManageR::WriteBib(refs, "vignettes/references.bib")
# Set-up notes for pdf version
# convert .tex to .md
# system("pandoc -s -r latex paper.tex -o paper.md")
# # copy josis specific files and ignore them
# f = list.files("~/other-repos/rticles/inst/rmarkdown/templates/josis/skeleton", pattern = "josis", full.names = TRUE)
# file.copy(f, "vignettes")
rmarkdown::render(input = "vignettes/paper.Rmd", output_file = "../zonebuilder-paper.pdf")
browseURL("zonebuilder-paper.pdf")
piggyback::pb_upload("zonebuilder-paper.pdf")
```
# Introduction
Zoning systems have long been used for practical purposes.
They have been integral to land ownership, rents and urban policies for centuries, forming the basis of a range of social and economic practices.
Historical examples highlighting the importance of zone layouts include 'tithe maps' determining land ownership and taxes in 18th Century England [@bryant_worcestershire_2007] and legally defined urban land use zones to tame chaotic urban growth expansion in the exploding US cities in the early 1900s [@baker_zoning_1925].
In the 19th Century, zoning systems became known for political reasons, with 'gerrymandering' entering public discourse and academic research following Elbridge Gerry's apparent attempt to gain political advantage by creating an electoral district in an odd shape that was said to resemble a salamander (hence the term's name) in 1812 [@orr_persistence_1969].
The gerrymandering problem has since been the topic of countless academic papers.
The gerrymandering problem (in itself is a manifestation of the modifiable area unit problem) can be described as a mathematical optimization problem: "$n$ units are grouped into $k$ zones such that some cost function is optimized, subject to constraints on the topology of the zones" [@chou_taming_2006]. In fact, this problem is concise definition of the broader "zoning problem" that starts from the assumption that zones are to be composed of one or more basic statistical units (BSUs). Although the range of outcomes is a finite combinatorial optimisation problem (which combination of BSU-zone aggregations satisfy/optimise some pre-determined criteria) the problem is still hard: "there are a tremendously large number of alternative partitions, a similar number of different results, and only a slightly smaller number of different interpretations" [@openshaw_optimal_1977].
The problem that we tackle in this paper is different, however: it is the division of geographic space into zones **starting from a blank slate**, without reference to pre-existing areal units.
The focus of much preceding zoning research on BSU partitioning can be explained by the fact that much geographic data available to academics comes in 'pre-packaged' small areas and because creating zones from nothing is a harder problem.
We disagree with the statement that "existence of individual or non-spatially aggregated data is rare in geography" [@openshaw_optimal_1977], pointing to car crashes, shop locations, species identification data and dozens of other phenomena that can be understood as 'point pattern processes'.
And with advances in computer hardware and software, the 'starting from scratch' approach to zoning system is more feasible.
A number of approaches have tackled the question of how to best divide up geographical space for analysis and visualisation purposes, with a variety of applications.
Functional zone classification is common in the field of remote sensing and associated sub-fields involved in analysing and classifying raster datasets [@ciglic_evaluating_2019; @hesselbarth_landscapemetrics_2019].
While such pixel-based approaches can yield complex and flexible results (depending on the geographic resolution of the input data), they are still constrained by the building blocks of the pixels, which can be seen as a particular type of areal unit, a uniformly sized and shaped BSU.
In this paper we are interested in the division of *continuous space* into completely new areal systems.
This has been done using contour lines to represent lines of equal height, and the concept's generalisation to lines of equal journey time from locations (isochrones) [@long_modeling_2018], population density (isopleths) [@lin_cartographic_2017] and model parameters which continuous geographical space [@paez_exploring_2006].
The boundaries created by these various 'iso' maps are 'procedurally generated' areal units of the type that this paper focuses, but their variability and often irregular shapes make them impractical for many types of urban analysis.
Procedural generation, which involves the generation of data through a repeated and sometimes randomised computational process has long been used to represent physical phenomena [@onrust_ecologically_2017]. The approach has been used to generate spatial entities including roads [@galin_procedural_2010], indoor layouts of buildings [@anderson_augmented_2018] and urban layouts [@mustafa_procedural_2020]. Algorithms have also been developed to place linear features on a map, as illustrated by an algorithm that optimizes the placement of overlapping linear features for cartographic visualisation [@teulade-denantes_routes_2015].
However, no previous research has demonstrated the creation of zoning systems specifically for the purposes of urban analysis.
New visualisation techniques are needed to represent new (or newly quantifiable) concepts and emerging datasets (such as OpenStreetMap) in urban analysis analysis .
The visualisation of direction has been driven by new navigational requirements and datasets, with circular compasses and displays common in land and sea navigational systems since the mid 1900s [@honick_pictorial_1967]. Circular visualisation techniques, in the form of rose diagrams, were used in a more recent study to indicate the most common road directions relative to North [@boeing_spatial_2021]. The resulting visualisations are attractive and easy to interpret, but are not geographical, in the sense that they cannot meaningfully be overlaid on mapped data. The approach we present in this paper is more closely analogous to 'grid sample' approaches used in ecological and population research [@hirzel_which_2002] . Historically, environmental researchers have used rectangular (and usually square) grids to divide up space and decide sampling strategies. Limitations associated with this simplistic strategy have been documented since at least the 1960s, with a prominent paper on geographic sampling strategies outlining advantages and disadvantages of simple random, systematic and stratified sampling techniques in 1967 [@holmes_problems_1967]. Starting with data at the level of raster grid cells and BSUs, a related approach is to sample from within available 'pixels' to generate a representative sample [@thomson_gridsample_2017].
Unlike BSU based zoning systems, grid sampling strategies require no prior zones.
Unlike 'procedurally generated' areas, grid-based strategies generate areal units of consistent sizes and shapes.
However, grid-based strategies are limited in their applicability to urban research because they seldom generate geographically contiguous results and do not account for the strong tendency of human settlements to have a (more-or-less clearly demarcated) central location with higher levels of activity.
Pre-existing zoning systems are often based on administrative regions.
Although those zoning systems are usually in line with the hierarchical organization structure of governmental organizations, and therefore may work well for policy making, there are a couple of downsides to using such zoning systems.
First of all, since a city and its politics change over time, the administrative regions often change accordingly.
This make it harder to do time series analysis.
Since the administrative regions have heterogeneous characteristics, for instance population size, area size, proximity to the city centre, comparing different administrative regions within a city is not straightforward.
Moreover, comparing administrative regions across cities is even more challenging since average scale of an administrative region may vary a lot across cities.
Grid tiles are popular in spatial statistics for a number of reasons.
Most importantly the tiles have a constant area size, which makes comparably possible.
Moreover, the grid tiles will not change over time like administrative regions.
However, one downside is that a grid requires a coordinate reference system (CRS), enforcing (approximately) equal area size.
For continents or large countries, a CRS is always a compromise.
Therefore, the areas of the tiles may vary, or the shape of the tiles may be sheared or warped.
Another downside from a statistical point of view is that population densities are not uniform within a urban area, but concentrated around a centre.
As a consequence, high resolution statistics is preferable in the dense areas, i.e. the centre, and lower resolution statistics in other parts of the city.
That is the reason why administrative regions are often smaller in dense areas.
The approach presented in this paper aims to minimise input data requirements, generate consistent zones comparable between widely varying urban systems, and provide geographically contiguous areal units.
The motivations for generating a new zoning system and use cases envisioned include:
- Locating cities.
Automated zoning systems based on a clear centrepoint can support map interpretation by making it immediately clear where the city centre is, and what the scale of the city is.
- Reference system of everyday live.
The zone name contains information about the distance to the center as well as the cardinal direction.
E.g "I live in C12 and work in B3." or "The train station is in the center and our hotel is in B7".
Moreover, the zones indicate whether walking and cycling is a feasibly option regarding the distance.
- Aggregation for descriptive statistics / comparability over cities.
By using the zoning system to aggregate statistics (e.g. on population density, air quality, bicycle use, number of dwellings), cities can easily be compared to each other.
- Modelling urban cities.
The zoning system can be used to model urban mobility.
The paper is structured as follows.
The next section outlines the approach, which requires only 2 inputs: the coordinates of the central place in the urban system under investigation, and the minimum radius from that central point that the zoning system should extend.
Section 3 describes a number of potential applications, ranging from rudimentary navigation and location identification to mobility analysis.
Finally, in Section 4, we discuss limitations of the approach and possible directions of research and development to generate additional zoning systems for urban analysis.
# The ClockBoard zoning system
The aim of the ClockBoard zoning system outlined in this paper is to tackle the issues outlined above, with an approach that is free, open, reproducible and easy to extend.
Specifically, we developed the system to considering urban analysis research and visualisation requirements, leading to the following high-level criteria.
Zoning systems for urban analysis should:
- contain intuitively named zones, enabling public communication of research, e.g. with reference common perceptions of space in terms of distance from the city centre and direction relative to North
- be easy to visualise without too many (100+) or too few (less than 10) zones
- include zones of consistent and useful sizes, for example with zone areas increasing with distance from the urban centres to reflect relatively high densities in central locations
- be 'scale agnostic', capable of representing a range of urban forms ranging from extensive cities such as Mexico City to compact cities such as Hong Kong
- be extensible and based on open source software, enabling others to create alternative zoning systems suited to diverse needs
After a process of iteration in which we considered many zoning options (some of which are illustrated in Figure \@ref(fig:options)) we settled on a system that we have called 'ClockBoard' (for reasons that will become apparent) and which has the following characteristics.
The zoning system is based on **concentric rings** --- formally called 'concentric annuli' --- which emphasise central locations and have been used to explore the relationships between the characteristics of 'focal trees' and surrounding trees in ecological research [@wills_persistence_2016], as shown in Figure \@ref(fig:options) (a).
The zoning system is based on **segments** defined by radial lines emanating from the central point of the settlement (or other geographic entity) to be divided into zones, as shown in Figure \@ref(fig:options) (b).
From that point, we experimented with a range of ways of dividing the concentric annuli into different zones by modifying the distances between rings (the annuli borders) and the number of segments per annulus (see Figure \@ref(fig:options) c).
It became apparent that zoning systems based on the two organising principles (and modifiable parameters) of concentric annuli and segments held promise, but selecting appropriate settings for each was key to the development of a zoning system that would meet the criteria outlined above.
<!-- commented out the next sentence as it's kind of obvious -->
<!-- The key parameters of annuli distances and number of segments are discussed below. -->
```{r options, fig.cap="Illustration of ideas explored in the lead-up to the development of the ClockBoard zoning system, highlighting the incremental and iterative evolution of the approach.", out.width="32%", fig.show='hold'}
# z1 = zb_zone(x = london_c(), n_segments = 1)
# m1 = qtm(z1, title = "(A) Concentric Annuli")
# sf::sf_use_s2(use_s2 = FALSE)
z1 = zb_zone(london_c(), n_segments = 1, distance_growth = 0)
z1_areas = sf::st_area(z1)
z1_areas_relative = as.numeric(z1_areas / z1_areas[1])
zb_plot(z1, title = "(A) Concentric Annuli")
zb_plot(zb_segment(london_c(), n_segments = 12), title = "(B) Clock segments")
qtm(zb_zone(london_c(), n_segments = z1_areas_relative, labeling = "clock", distance_growth = 0), title = "(C) Equal area zones", fill = NULL)
```
## Annuli distances
The radius of each annuli in the zoning system can be incremented by a fixed amount, as shown in previous figures.
In cases where high geographic resolution is important near the centre of the study region, such as when designing transport systems into the central zone of a city planning, increasing distances between each radius may be desirable.
We experimented with various ways of incrementing the annuli width and suggest linear increases in width as a sensible default for a simple zoning system.
This linear growth leads to distances between each annuli boundary increasing in line with the steps in the [triangular number sequence](https://en.wikipedia.org/wiki/Triangular_number) [@ross_dicuil_2019], as outlined in Table \@ref(tab:t1).
```{r t1}
txt = "Number of rings,Diameter across (km),Area (sq km)
1,1,2,3.14
2,2,6,28.27
3,3,12,113.10
4,4,20,314.16
5,5,30,706.86
6,6,42,1385.44
7,7,56,2463.01
8,8,72,4071.50
9,9,90,6361.73"
t1 = read.csv(text = txt, check.names = FALSE)
knitr::kable(t1, booktabs = TRUE, caption = "Key attributes of first 9 rings used in the ClockBoard zoning system.")
```
## Number of segments
What it looks like with 4 segments.
The ClockBoard zoning system has 12 segments, representing a compromise between specificity of zone identification and ease of comprehension (imagine a system with 256 segments and saying "I'm in zone E173"!) and understanding:
the 12 segments of a clock face are well understood.
## City extents
# Applications
## Navigation and location
## Exploring city scale data
univariate description
\- Population density in London - Social (e.g. religion) and demographic distributions
## Inter-city statistical comparison
```{r, fig.width=7, message=FALSE, warning=FALSE}
# download preprocessed data (processing script /data-raw/crashes.R)
df = readRDS(gzcon(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/ksi_bkm_zone.rds")))
uk = readRDS(gzcon(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/uk.rds")))
thames = readRDS(gzcon(url("https://github.com/zonebuilders/zonebuilder/releases/download/0.0.1/thames.rds")))
# filter: set zones with less than 10,000 km of cycling per yer to NA
df_filtered = df %>%
mutate(ksi_bkm = ifelse((bkm_yr * 1e09) < 2e04, NA, ksi_bkm))
tmap_mode("plot")
tm_shape(uk) +
tm_fill(col = "white") +
tm_shape(df_filtered, is.master = TRUE) +
tm_polygons("ksi_bkm", breaks = c(0, 1000, 2500, 5000, 7500, 12500), textNA = "Too little cycling", title = "Killed and seriously injured cyclists\nper billion cycled kilometers") +
tm_facets(by = "city", ncol=4) +
tm_shape(uk) +
tm_borders(lwd = 1, col = "black", lty = 3) +
tm_shape(thames) +
tm_lines(lwd = 1, col = "black", lty = 3) +
tm_layout(bg.color = "lightblue")
```
<!-- {#fig:cyclAccidents width="\\textwidth"} -->
## Mobility analysis
# Discussion and conclusions
Pros:
- Most cities have a radial plan around a central area, which is often a historic centre or a central business area. Typically, this centre is not only the geographic centre, but also the busiest area in terms of daytime population. Often the main nodes in the urban transport network are also located in or near the city centre. Note that many cities already consist of concentric rings, separated by a ring road. (See also <https://en.wikipedia.org/wiki/City_centre> which describes the centre as the heart of the city)
Cons:
- Some cities have two or more centres.
Many cities have a central business discrict or financial discrict which not always coinsides with the historic city centre.
- In urban areas with nearby cities, it may not always be clear where one cities ends and another begins.
Also, small cities may be located within the metropolitan area of a larger city (e.g. the Dutch cities The Hague/Delft)
<!-- World cities todo: create appendix-->
<!-- ============ -->
<!-- {#fig:cities1 -->
<!-- width="\\textwidth"} -->
<!-- {#fig:cities2 width="\\textwidth"} -->
<!-- {#fig:cities3 width="\\textwidth"} -->
<!-- # Old paper -->
<!-- # Introduction -->
<!-- ### Current situation -->
<!-- Statistics, policy making and transport planning are often based on administrative regions. However, there are a couple of downsides to using administrative regions. First of all, since a city and its politics change over time, the administrative regions often change accordingly. This make it harder to do time series analysis. Since the administrative regions have heterogeneous characteristics, for instance population size, area size, proximity to the city centre, comparing different administrative regions within a city is not straightforward. Moreover, comparing administrative regions across cities is even more challenging since average scale of an administrative region may vary a lot across cities. -->
<!-- Another downside from a statistical point of view is that population densities are not uniform within a urban area, but concentrated around a centre. As a consequence, high resolution statistics is preferable in the dense areas, i.e. the centre, and lower resolution statitics in other parts of the city. That is the reason why administrative regions are often smaller in dense areas. -->
<!-- ### Why is it needed/handy? -->
<!-- * Locating cities. By having a ClockBoard zoning system for a city, it is immediate clear where the city centre is, and what the scale of the city is. -->
<!-- * Reference system of everyday live. The zone name contains information about the distance to the center as well as the cardinal direction. E.g "I live in C12 and work in B3." or "The train station is in the center and our hotel is in B7". Moreover, the zones indicate whether walking and cycling is a feasibly option regarding the distance. -->
<!-- * Aggregation for descriptive statistics / comparability over cities. By using the zoning system to aggregate statistics (e.g. on population density, air quality, bicycle use, number of dwellings), cities can easily be compared to each other. -->
<!-- * Modelling urban cities. The zoning system can be used to model urban mobility. -->
<!-- ### Arguments / discussion -->
<!-- Pros: -->
<!-- * Most cities have a radial plan around a central area, which is often a historic centre or a central business area. -->
<!-- Typically, this centre is not only the geographic centre, but also the busiest area in terms of daytime population. Often the main nodes in the urban transport network are also located in or near the city centre. Note that many cities already consist of concentric rings, separated by a ring road. (See also https://en.wikipedia.org/wiki/City_centre which describes the centre as the heart of the city) -->
<!-- Cons: -->
<!-- * Some cities have two or more centres. Many cities have a central business discrict or financial discrict which not always coinsides with the historic city centre. -->
<!-- * In urban areas with nearby cities, it may not always be clear where one cities ends and another begins. Also, small cities may be located within the metropolitan area of a larger city (e.g. the Dutch cities The Hague/Delft) -->
<!-- Comparing two cities can be difficult due to incomparable zoning systems. -->
<!-- Take the example of city A is provided in a detailed zoning system composed of hundreds of small, irregular areas while city B is composed only of 9 irregular zones -->
<!-- TODO: reference to https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0050606 -->
# References
| /scratch/gouwar.j/cran-all/cranData/zonebuilder/vignettes/paper.Rmd |
crossproduct <- function( v, w )
{
out = c( v[2]*w[3] - v[3]*w[2], -v[1]*w[3] + v[3]*w[1], v[1]*w[2] - v[2]*w[1] )
return( out )
}
# allcrossproducts() computes the 3D cross product for every pair of columns of a 3xN matrix
#
# A a 3xN matrix
#
# returns a 3 x N(N-1)/2 matrix with the column order the same as allpairs()
allcrossproducts <- function( A )
{
ok = is.double(A) && is.matrix(A) && nrow(A)==3
if( ! ok )
return(NULL)
out = .Call( C_allcrossproducts, A )
return( out )
}
# vec positive integer scalar or vector.
# A scalar is interpreted as a length, and equivalent to the vector being 1:n
#
# returns an integer matrix with dimension n*(n-1)/2, with rows being all pairs in standard order
allpairs <- function( vec )
{
n = length(vec)
if( n == 1L )
n = as.integer(vec)
if( n < 2L ) return(NULL)
out = .Call( C_allpairs, n )
if( 1L < length(vec) )
{
# transfer from 1:n to vec[]
# this can take over 1 msec, when n >= 320
dim.saved = dim(out)
out = vec[ out ]
dim(out) = dim.saved
}
return(out)
}
# crossprods 3 x N(N-1) matrix with unitized crossproduct pairs in the columns
# hyperplane list with integer vectors, defining non-trivial hyperplanes from the ground set, length M
# crossprodsref 3 x M matrix, with the "reference" crossproducts in the columns, one for each hyperplane
# ground N vector of increasing integers forming the ground set
snapcrossprods <- function( crossprods, hyperplane, crossprodsref, ground )
{
n = length(ground)
if( ncol(crossprods) != n*(n-1)/2 ) return(NULL)
m = length(hyperplane)
if( ncol(crossprodsref) != m ) return(NULL)
idxfromground = idxfromgroundfun( ground )
out = crossprods
for( k in 1:m )
{
# get the "reference" crossproduct
cpref = crossprodsref[ , k ]
imax = which.max( abs(cpref) )
signref = sign( cpref[imax] )
idxraw = idxfromground[ hyperplane[[k]] ]
# copy cpref to all the corresponding columns, while changing the sign when appropriate
idxpair = allpairs( idxraw )
for( i in 1:nrow(idxpair) )
{
j = PAIRINDEX( idxpair[i,1], idxpair[i,2], n )
out[ ,j] = sign( out[imax,j] ) * signref * cpref
}
}
return( out )
}
############### deadwood below ####################################
# crossproducts2() computes the 3D cross product for every pair of columns of a 3xN matrix
#
# A a 3xN matrix
#
# returns a data.frame with 2 columns:
# idx column indexes j1 and j2
# crossprod the cross product of columns j1 and j2
crossproducts2 <- function( A )
{
ok = is.double(A) && is.matrix(A) && nrow(A)==3
if( ! ok )
{
return(NULL)
}
p12 = base::crossprod( A[1, ,drop=F], A[2, ,drop=F] ) #; print( str(p12) )
p13 = base::crossprod( A[1, ,drop=F], A[3, ,drop=F] )
p23 = base::crossprod( A[2, ,drop=F], A[3, ,drop=F] )
d12 = p12 - t(p12)
d13 = p13 - t(p13)
d23 = p23 - t(p23)
n = ncol(A)
if( requireNamespace( 'arrangements', quietly=TRUE ) )
idx = arrangements::combinations(n,2) # faster
else
idx = t( utils::combn(n,2) ) # matrix of pairs. slower
return( t( cbind( d23[idx], -d13[idx], d12[idx] ) ) )
out = data.frame( row.names=1:nrow(idx) )
out$idx = idx
out$crossprod = cbind( d23[idx], -d13[idx], d12[idx] )
return(out)
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/crossproduct.R |
######## a few classic zonohedra from https://www.ics.uci.edu/~eppstein/junkyard/ukraine/ukraine.html ######
# returns a list of 3xN matrices, with class "genlist" prepended
makeClassics <- function()
{
phi = (1 + sqrt(5))/2
s2 = sqrt(2)
s3 = sqrt(3)
out = list()
# cube 3 generators
W = diag(3)
shortname = "C"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "cube"
out[[shortname]] = W
# rhombic dodecahedron 4 generators
W = matrix( c(1,1,1, 1,-1,1, 1,1,-1, 1,-1,-1), nrow=3, byrow=FALSE )
W = reorderGenerators( W ) #; print( W )
shortname = "RD"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "rhombic dodecahedron"
out[[shortname]] = W
# Bilinski dodecahedron 4 generators
W = matrix( c(1,phi,0, phi,0,1, 0,1,phi, -1,phi,0), nrow=3, byrow=FALSE )
shortname = "BD"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "Bilinski dodecahedron"
out[[shortname]] = W
# rhombic icosahedron 5 generators
W = matrix( c(1,phi,0, phi,0,1, 0,1,phi, -1,phi,0, phi,0,-1), nrow=3, byrow=FALSE )
W = reorderGenerators( W ) #; print( W )
shortname = "RI"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "rhombic icosahedron"
out[[shortname]] = W
# rhombo-hexagonal dodecahedron 5 generators
W = matrix( c(0,s3,1, s3,0,1, 0,-s3,1, -s3,0,1, 0,0,2), nrow=3, byrow=FALSE )
W = reorderGenerators( W ) #; print( W )
shortname = "RHD"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "rhombo-hexagonal dodecahedron"
out[[shortname]] = W
# rhombic triacontahedron 6 generators
W = matrix( c(1,phi,0, phi,0,1, 0,1,phi, -1,phi,0, phi,0,-1, 0,-1,phi), nrow=3, byrow=FALSE )
W = reorderGenerators( W ) #; print( W )
shortname = "RT"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "rhombic triacontahedron"
out[[shortname]] = W
# truncated octahedron 6 generators
W = matrix( c(1,1,0, 1,-1,0, 1,0,1, 1,0,-1, 0,1,1, 0,1,-1), nrow=3, byrow=FALSE )
W = reorderGenerators( W ) #; print( W )
shortname = "TO"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "truncated octahedron"
out[[shortname]] = W
# truncated rhombic dodecahedron 7 generators
W = matrix( c(1,1,1, 1,1,-1, 1,-1,1, 1,-1,-1, s3,0,0, 0,s3,0, 0,0,s3), nrow=3, byrow=FALSE )
W = reorderGenerators( W ) #; print( W )
shortname = "TRD"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "truncated rhombic dodecahedron"
out[[shortname]] = W
# truncated cuboctahedron 9 generators
W = matrix( c(1,1,0, 1,-1,0, 1,0,1, 1,0,-1, 0,1,1, 0,1,-1, s2,0,0, 0,s2,0, 0,0,s2), nrow=3, byrow=FALSE )
W = reorderGenerators( W ) #; print( W )
shortname = "TC"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "truncated cuboctahedron"
out[[shortname]] = W
# rhombic enneacontahedron 10 generators
W = c( 1,1,1, 1,-1,1, -1,1,1, -1,-1,1,
0,phi,phi-1, 0,phi,1-phi,
phi-1,0,phi, phi-1,0,-phi,
phi,phi-1,0, phi,1-phi,0
)
W = matrix( W, nrow=3, byrow=FALSE ) #; print(W)
shortname = "RE"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "rhombic enneacontahedron"
out[[shortname]] = W
# rhombic hectotriadiohedron 12 generators
r4 = 1 + s2
W = c( 1, 1, r4,
1, -1, r4,
1, 1, -r4,
1, -1, -r4,
r4, 1, 1,
r4, 1, -1,
-r4, 1, 1,
-r4, 1, -1,
1, r4, 1,
-1, r4, 1,
1, -r4, 1,
-1, -r4, 1
)
W = matrix( W, nrow=3, byrow=FALSE ) #; print(W)
shortname = "RH"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "rhombic hectotriadiohedron"
out[[shortname]] = W
# truncated icosidodecahedron 15 generators
W = c( 1,phi,phi-1, 1,-phi,phi-1,
1,-phi,1-phi, 1,phi,1-phi,
phi,1-phi, 1,phi,1-phi,-1,
phi,phi-1,-1, phi,phi-1,1,
phi-1,1,phi, phi-1,-1,-phi,
phi-1,1,-phi, phi-1,-1,phi,
2,0,0, 0,2,0, 0,0,2 )
W = matrix( W, nrow=3, byrow=FALSE ) #; print(W)
W = reorderGenerators( W ) #; print( W )
shortname = "TI"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "truncated icosidodecahedron"
out[[shortname]] = W
# truncated small rhombicosidodecahedron 21 generators
W = c( 1,0,-phi, 1,0,phi,
0,-phi,1, 0,phi,1,
-phi,1,0, phi,1,0,
1,phi,phi-1, 1,-phi,phi-1,
1,-phi,1-phi, 1,phi,1-phi,
phi,1-phi,1, phi,1-phi,-1,
phi,phi-1,-1, phi,phi-1,1,
phi-1,1,phi, phi-1,-1,-phi,
phi-1,1,-phi, phi-1,-1,phi,
2,0,0, 0,2,0, 0,0,2 )
W = matrix( W, nrow=3, byrow=FALSE )
shortname = "TSR"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "truncated small rhombicosidodecahedron"
out[[shortname]] = W
class( out ) = c( "genlist", class(out) )
return( out )
}
makeColorimetry <- function()
{
out = list()
# xyz at 5nm
path = "../inst/extdata/xyz1931.5nm.txt"
W = as.matrix( read.table( path, sep=' ', header=T ) )
rownames(W) = W[ ,1]
W = t( W[ ,2:4] )
shortname = "xyz1931.5nm"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "xyz at 5nm step (1931)"
out[[shortname]] = W
# xyz at 1nm
path = "../inst/extdata/ciexyz31_1.csv"
W = as.matrix( read.table( path, sep=',', header=T ) )
rownames(W) = W[ ,1]
W = t( W[ ,2:4] )
shortname = "xyz1931.1nm"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "xyz at 1nm step"
out[[shortname]] = W
# lms at 1nm
path = "../inst/extdata/lms2000.1nm.csv"
df = read.table( path, header=T, sep=',' ) #; print( str(df) )
W = as.matrix( df[ , 2:ncol(df) ] ) #; print( str(data) )
W = t(W)
W[ is.na(W) ] = 0
colnames(W) = df[ ,1]
shortname = "lms2000.1nm"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "lms at 1nm step"
out[[shortname]] = W
# xyz at 5nm, modified by Judd and Voss
path = "../inst/extdata/ciexyzjv.csv"
df = read.table( path, header=T, sep=',' ) #; print( str(df) )
W = as.matrix( df[ , 2:ncol(df) ] ) #; print( str(data) )
W = t(W)
W[ is.na(W) ] = 0
colnames(W) = df[ ,1]
shortname = "ciexyzjv.5nm"
attr( W, "shortname" ) = shortname
attr( W, "fullname" ) = "xyz at 5nm step (1978)"
out[[shortname]] = W
class( out ) = c( "genlist", class(out) )
return( out )
}
# matgen 3 x N matrix of generators
reorderGenerators <- function( matgen )
{
require( zonohedra )
zono = zonohedron( matgen )
if( ! is_pointed(zono) ) return( matgen ) # no change
normal = supportingnormal0( zono )
if( any( is.na(normal) ) ) return( NULL ) # should not happen
rot3x3 = goodframe3x3( normal )
genrot = crossprod( rot3x3, matgen ) # same as t(rot3x3) %*% matgen
z = genrot[3, ]
# these must be all positive
if( ! all( 0 < z ) ) return( NULL ) # should not happen
x = genrot[1, ]
y = genrot[2, ]
theta = atan2( y, x )
# sort in cclockwise order
perm = order( theta ) #; print( perm )
out = matgen[ , perm]
return( out )
}
saveDatasets <- function( .path="../data/zonohedra.rda" )
{
savevec = character(0)
classics.genlist = makeClassics()
savevec = c( savevec, "classics.genlist" )
colorimetry.genlist = makeColorimetry()
savevec = c( savevec, "colorimetry.genlist" )
## finally ready to save it
save( list=savevec, file=.path, compress='xz' ) # 'xz' 'gzip' FALSE
return( invisible(TRUE) )
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/datasets.R |
duplicated.matrix = function (x, incomparables = FALSE, MARGIN = 1L, fromLast = FALSE, signif=Inf, ...)
{
if (!is.matrix(x) || !is.atomic(x) || !identical(incomparables, FALSE) || ((nzeroMarg <-MARGIN[1L]!=0L) && MARGIN[1L]!=1L && MARGIN[1L]!=2L) || length(MARGIN)!=1L || (nzeroMarg && dim(x)[-MARGIN]==1L) )
return(base::duplicated.matrix(x, incomparables, MARGIN, fromLast, ...))
if(is.null(signif)) signif = .Call(C_dbl_dig)
if (signif < Inf && (is.numeric(x) || is.complex(x) ) ) x = signif(x, signif)
if (nzeroMarg) {
.Call(C_dupAtomMatHash, x, as.integer(MARGIN), as.logical(fromLast))
}else{
att=attributes(x); dim(x)=c(as.integer(prod(att$dim)), 1L)
res=.Call(C_dupAtomMatHash, x, MARGIN=1L, as.logical(fromLast))
if(any(att$class=='factor')){
att$class= setdiff(att$class, c('ordered','factor','matrix'))
if(length(att$class)==0L) att$class=NULL
att$levels=NULL
}
attributes(res)=att
res
}
}
unique.matrix=function (x, incomparables = FALSE, MARGIN = 1, fromLast = FALSE, signif=Inf, ...)
{
if (!is.matrix(x) || !is.atomic(x) || !identical(incomparables, FALSE) || (MARGIN[1L]!=1L && MARGIN[1L]!=2L) || length(MARGIN)!=1L || dim(x)[-MARGIN]==1L )
return(base::unique.matrix(x, incomparables, MARGIN, fromLast, ...))
if(is.null(signif)) signif = .Call(C_dbl_dig)
if (signif < Inf && (is.numeric(x) || is.complex(x) ) ) x = signif(x, signif)
dups=.Call(C_dupAtomMatHash, x, as.integer(MARGIN), as.logical(fromLast))
if(MARGIN==1L) x[!dups,,drop=FALSE] else x[,!dups,drop=FALSE]
}
anyDuplicated.matrix=function(x, incomparables = FALSE, MARGIN = 1, fromLast = FALSE, signif=Inf, ...)
{
if (!is.matrix(x) || !is.atomic(x) || !identical(incomparables, FALSE) || ((nzeroMarg <-MARGIN[1L]!=0L) && MARGIN[1L]!=1L && MARGIN[1L]!=2L) || length(MARGIN)!=1L || prod(dim(x)[-MARGIN])==1L )
return(base::anyDuplicated.matrix(x, incomparables, MARGIN, fromLast, ...))
if(is.null(signif)) signif = .Call(C_dbl_dig)
if (signif < Inf && (is.numeric(x) || is.complex(x) ) ) x = signif(x, signif)
if (nzeroMarg) {
.Call(C_anyDupAtomMatHash, x, as.integer(MARGIN), as.logical(fromLast))
}else{
dx=dim(x); dim(x)=c(as.integer(prod(dx)), 1L)
.Call(C_anyDupAtomMatHash, x, MARGIN=1L, as.logical(fromLast))
}
}
grpDuplicated <- function( x, ... )
{
UseMethod('grpDuplicated')
}
grpDuplicated.default <- function( x, ... )
{
if( (!is.vector(x) && !is.factor(x)) || !is.atomic(x) )
{
log_level( ERROR, '"grpDuplicated" currently only supports atomic vectors/matrices with "incomparables=FALSE"')
#.NotYetImplemented() # return(base::anyDuplicated.matrix(x, incomparables, MARGIN, fromLast, ...))
return(NULL)
}
if( TRUE )
{
dim(x) = c(1L,length(x))
.Call(C_grpDupAtomMatHash, x, 2L ) # return( ) adds a few microseconds !!
}
else
{
dim(x) = c(length(x), 1L)
this.call = match.call()
this.call[[1L]]=as.name('grpDuplicated.matrix')
this.call$x=x
this.call$MARGIN=1L
eval(this.call)
}
}
grpDuplicated.matrix <- function( x, MARGIN=1, ... )
{
if (!is.matrix(x) || !is.atomic(x) || ((nzeroMarg <- MARGIN[1L]!=0L) && MARGIN[1L]!=1L && MARGIN[1L]!=2L) || length(MARGIN)!=1L ) {
message('"grpDuplicated.matrix" currently only supports atomic vectors/matrices with "incomparables=FALSE"')
.NotYetImplemented() # return(base::anyDuplicated.matrix(x, incomparables, MARGIN, fromLast, ...))
}
#if(is.null(signif)) signif = .Call(C_dbl_dig)
#if (signif < Inf && (is.numeric(x) || is.complex(x) ) ) x = signif(x, signif)
if (nzeroMarg)
{
# this C function automatically adds the "ngroups" attribute
ans = .Call(C_grpDupAtomMatHash, x, as.integer(MARGIN) )
# if(fromLast) ans[]=(attr(ans, 'nlevels'):1L)[ans] # ensure the group ids agree with row/col index of result from "unique"
}
else
{
ans = .Call(C_grpDupAtomMatHash, x, MARGIN=1L )
att = attributes(x); dim(x)=c(as.integer(prod(att$dim)), 1L)
att$ngroups = attr( ans, 'ngroups')
attributes(ans)=att
}
ans
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/duplicated.matrix.R |
g.microbenchmark = FALSE # logical value, whether the package microbenchmark is loaded. It must be unlocked.
.onLoad <- function( libname, pkgname )
{
# unlockBinding( "g.microbenchmark", asNamespace('colorSpec') ) # asNamespace(pkgname) here generates a NOTE !
g.microbenchmark <<- requireNamespace( 'microbenchmark', quietly=TRUE ) #; cat( "g.microbenchmark=", g.microbenchmark, '\n' )
if( requireNamespace( 'logger', quietly=FALSE ) )
{
# log_formatter( formatter_mine )
log_formatter( logger::formatter_sprintf, namespace="zonohedra" ) # force sprintf(), even if glue is installed
log_layout( layout_mine, namespace="zonohedra" ) # put fn() between timestamp and the msg
log_appender( appender_mine, namespace="zonohedra" ) # maybe stop on ERROR or FATAL
log_threshold( WARN, namespace="zonohedra" ) # default is INFO
}
}
.onAttach <- function( libname, pkgname )
{
info = library( help='zonohedra' ) #eval(pkgname) ?
info = format( info )
mask = grepl( "^(Version|Built)", info ) #Title
info = gsub( "[ ]+", ' ', info[mask] )
# mess = sprintf( "Attaching %s", pkgname )
mess = paste( c( "Package: zonohedra", "Author: Glenn Davis", info ), collapse='. ' ) #; cat(mess)
packageStartupMessage( mess )
#initOptions()
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/events.R |
g.options <- list( stoponerror = TRUE # must be logical
)
# put fn() between timestamp and the msg
layout_mine <- structure(
function(level, msg, namespace="zonohedra",
.logcall = sys.call(), .topcall = sys.call(-1), .topenv = parent.frame())
{
# cat( "obj_addr()=", obj_addr( .topcall[[1L]] ), '\n' )
# cat( "deparse1 =", deparse1( .topcall[[1L]] ), '\n' )
fn = deparse1( .topcall[[1L]] )
paste0( attr(level, 'level'), ' [', format(Sys.time(), "%Y-%m-%d %H:%M:%S"), '] ', namespace, "::", fn, '(). ', msg )
},
generator = quote(layout_mine())
)
appender_mine <- structure(
function(lines)
{
cat(lines, file = stderr(), sep = '\n' )
# test for STOP
if( any( grepl("^(ERR|FATAL)",lines ) ) )
{
stop( "Stopping, because level is ERROR or FATAL.", call.=FALSE )
}
},
generator = quote(appender_mine())
)
###### deadwood below #####################
if( FALSE )
{
formatter_mine <- structure(
function(fmt, ..., .logcall = sys.call(), .topcall = sys.call(-1), .topenv = parent.frame())
{
out = eval(sprintf(fmt, ...), envir = .topenv)
print( str( .topcall ) )
# paste0( sprintf("%s(). ", , eval(sprintf(fmt, ...), envir = .topenv), sep='' )
out
},
generator = quote(formatter_mine())
)
formatter_mine <- function(fmt, ..., .logcall = sys.call(), .topcall = sys.call(-1), .topenv = parent.frame() )
{
out = eval(sprintf(fmt, ...), envir = .topenv) ; cat(out,'\n')
#cat( deparse1( .topcall[[1L]] ), '\n' ) # Inf recursion
#cat( deparse1( .logcall[[1L]] ), '\n' ) # Inf recursion too
cat( "obj_addr()=", obj_addr( .topcall[[1L]] ), '\n' )
sp = sys.parents()
for( k in sp )
{
cat( "k=", k, " obj_addr()=", obj_addr( sys.call(k) ), '\n' )
}
# paste0( sprintf("%s(). ", , eval(sprintf(fmt, ...), envir = .topenv), sep='' )
out
}
make_formatter_mine <- function()
{
out <- function(fmt, ..., .logcall = sys.call(), .topcall = sys.call(-1), .topenv = parent.frame())
{
out = eval(sprintf(fmt, ...), envir = .topenv) ; cat(out,'\n')
cat( deparse( .topcall[[1]] ), '\n' )
# paste0( sprintf("%s(). ", , eval(sprintf(fmt, ...), envir = .topenv), sep='' )
out
}
return( out )
}
} | /scratch/gouwar.j/cran-all/cranData/zonohedra/R/logger.R |
# matroid is an S3 class and a list with these items:
#
# ground a positive integer vector in ascending order giving the ground set
# hyperplane a list of integer vectors - the hyperplanes that define the matroid. All are subsets of the ground set.
# rank an integer - the rank
# loop an integer vector - the loops - each is a point in the ground set. Can be empty.
# multiple a list of integer vectors - each is a group of (non-trivial) multiple points in ground set. Can be empty.
# matrix the real matrix of generators - only if the matroid is constructed from a matrix
# multiplesupp a data.frame with #rows = length(multiple), and these columns:
# contiguous are the points of the group contiguous in the ground set minus loops, in the cyclic sense
# these columns are only present when $matrix is present.
# colidx index of corresponding column in output matrix - this is the index of the simplified generators
# cmax coordinate with largest absolute value, used to compute the following
# mixed logical, which is TRUE iff the group has "mixed directions"
# major the longer vector in the zonoseg spanned by the generators in the group, always non-zero
# minor the shorter vector in the zonoseg; this is non-zero iff the group has "mixed directions"
# The row index in multiplesupp matches the list index of multiple. We need a name for this index.
# crossprods 3 x n(n-1)/2 matrix of all possible crossproducts after unitizing. Only present for simple rank 3.
# If i<j and the i'th and j'th generators are g_i and g_j,
# then the column of crossprods[] corresponding to i and j is filled with g_i X g_j unitized.
# crossprodidx integer LUT from hyperplane index to the column index of crossprods. Only present for simple rank 3.
# Used in getnormal.matroid()
# hyperplaneidx integer LUT from crossprods column index to hyperplane index. Only present for simple rank 3.
# this can be used to take a pair of points and find the unique hyperplane that contains them.
# Used in getmetrics.zonohedron()
# simplified a matroid = the simplification of the original matroid - only if the matroid is not simple already
# x a numeric matrix with 1,2, or 3 rows
# e0 used when nrow(x) >= 1
# e1 used when nrow(x) >= 2
# e2 used when nrow(x) == 3
#
# ground integer vector, strictly increasing positive integers with length(ground) == ncol(x)
#
matroid.matrix <- function( x, e0=0, e1=1.e-6, e2=1.e-10, ground=NULL, ... )
{
#cat( "matroid.matrix\n" )
#cat( "e0=", e0, "e1=", e1, "e2=", e2, "ground=", ground, '\n' )
time0 = gettime()
ok = is.numeric(x) && (nrow(x) %in% 1:3) && (nrow(x) <= ncol(x))
if( ! ok )
{
log_level( ERROR, "matrix x is invalid." )
return(NULL)
}
if( is.integer(x) ) storage.mode(x) = 'double'
elist = list(e0,e1,e2)
ok = all( sapply(elist,is.numeric) ) && all( sapply(elist,length) == 1L )
if( ! ok )
{
log_level( ERROR, "One of e0,e1,e2 is invalid." )
return(NULL)
}
# process the ground set
if( is.null(ground) )
{
ground = intfromchar( colnames(x) )
if( is.null(ground) )
ground = 1:ncol(x)
}
if( length(ground) != ncol(x) )
{
log_level( ERROR, "ground is invalid, because the length=%d is incorrect. It must be %d.",
length(ground), ncol(x) )
return(NULL)
}
if( ! all( 0 < diff(ground) ) )
{
log_level( ERROR, "ground is invalid, because it is not in strictly increasing order." )
return(NULL)
}
# look for small columns in x; these are the loops
loopmask = apply( x, MARGIN=2, function(vec) { max(abs(vec)) } ) <= e0
loopraw = which( loopmask )
# check for too many loops
ok = length(loopraw) <= ncol(x) - nrow(x)
if( ! ok )
{
log_level( ERROR, "Too many loops: %d.", length(loopraw) )
return(NULL)
}
# cleanup the colnames of x
colnames(x) = as.character( ground )
# convert from matrix column indexes to ground indexes
loop = ground[ loopraw ]
# extract the nonloops, and the matrix of nonloops
gnd.noloops = ground[ ! loopmask ]
x.noloops = x[ , ! loopmask, drop=FALSE ]
#print( x.noloops )
# this group of statements works even when rank is 1
xunit = normalizeColumns( x.noloops ) #; print( xunit )
grp = findColumnGroups( xunit, e1, oriented=FALSE ) #; print(grp)
# nonloop is a list of vectors, and not a vector
nonloop = setlistfromvec( grp, gnd.noloops )
condata = condenseMatrix( x, ground, nonloop ) # condenseMatrix( x.noloops, gnd.noloops, nonloop )
x.simple = condata$matrix
gnd.simple = sapply( nonloop, function(v) { v[1] } )
lenvec = lengths( nonloop ) #sapply( nonloop, length )
multiple = nonloop[ 2 <= lenvec ]
lmdata = list( loop=loop, multiple=multiple )
issimple = length(loop)==0 && length(multiple)==0
if( nrow(x) == 1 )
{
# almost trivial case rank=1
# the loops are the single hyperplane,
# and the non-loops are a single multiple group
#out = matroid.list( list(loop), ground=ground )
out = matroid1( loop, gnd.noloops, ground, x, condata$multiplesupp )
if( ! issimple )
{
out$simplified = matroid1( integer(0), gnd.simple[1], gnd.simple, x.simple )
# record the original loops and multiples as an attribute
# attr(out$simplified,"lmdata") = lmdata
attr(out$simplified$hyperplane,"lmdata") = lmdata
}
return( out )
}
if( length(nonloop) < 2 )
{
log_level( ERROR, "The matrix is rank-deficient." )
return(NULL)
}
time1 = gettime()
timelm = time1 - time0
if( nrow(x) == 2 )
{
# case rank=2 is fairly short
out = matroid2( loop, nonloop, ground, x, condata$multiplesupp )
if( ! issimple )
{
# extract only the 1st point from each hyperplane
hyperfirst = lapply( nonloop, function(v) { v[1] } )
# cat( "======\n" ) ; print( nonloop ) ; print( hyperfirst )
#matrix_simp = condenseMatrix( x, out$ground, nonloop )
out$simplified = matroid2( integer(0), hyperfirst, gnd.simple, x.simple )
# record the original loops and multiples as an attribute
#attr(out$simplified,"lmdata") = lmdata
attr(out$simplified$hyperplane,"lmdata") = lmdata
}
return( out )
}
# the difficult case is nrow(x)=3
# cat( "Computing nontrivial hyperplanes:\n" )
crossprods = allcrossproducts( x.simple )
crossprods = normalizeMatrix( crossprods, 2L ) #; print( str(crossprods) )
# set bysize=TRUE, so that non-trivial hyperplanes are in descending order by size
grp = findColumnGroups( crossprods, e2, oriented=FALSE, bysize=TRUE )
# hypersnt is the number of non-trivial groups
hypersnt = max( grp )
time2 = gettime()
timenthp = time2 - time1
log_level( INFO, "Found %d non-trivial hyperplanes.", hypersnt )
if( 0 < hypersnt )
{
hyperplane = vector( hypersnt, mode='list' )
# find the non-trivial hyperplanes, with more than 2 points
pair = allpairs( ncol(x.simple) )
# make table from nontrivial hyperplane index to column in crossprods
crossprodidx_nontriv = integer(hypersnt)
# make table from grouped column in crossprods to hyperplane index
hyperplaneidx = integer( ncol(crossprods) )
warncount = 0
for( i in 1:hypersnt )
{
mask = grp==i
subpair = pair[ mask, , drop=FALSE] #; print( subpair )
hyperraw = fastunion( as.integer(subpair) )
hyperplane[[i]] = gnd.simple[ hyperraw ] # sort( unique( as.integer(subpair) ) ) ]
m = length( hyperplane[[i]] )
if( nrow(subpair) != (m*(m-1L))/2L )
{
# the vector grp[] is missing some pairs that should be in this hyperplane
# fill out the vector grp[] with *all* m*(m-1)/2 pairs in this hyperplane
pairscomplete = allpairs( hyperraw )
pairidx = .Call( C_pairindex, pairscomplete, ncol(x.simple) )
if( length(pairidx) != (m*(m-1L))/2L )
{
log_level( FATAL, "Internal error. length(pairidx)=%d != %d.",
length(pairidx), m*(m-1L)/2L )
return(NULL)
}
grp[ pairidx ] = i
mask[ pairidx ] = TRUE # retro-correct mask[] too !
warncount = warncount + 1
if( warncount <= 10 )
{
log_level( WARN, "hyperplane %d came from %d pairs, but expected %d pairs.",
i, nrow(subpair), m*(m-1L)/2L )
}
}
# get normal from the first pair in the group
whicheqi = which(mask)
crossprodidx_nontriv[i] = whicheqi[1]
hyperplaneidx[whicheqi] = i
# normalnontriv[i, ] = cprods[ ,j]
}
if( 0 < warncount )
log_level( WARN, "There were %d total warnings about hyperplanes and pairs.", warncount )
#print( hyperplane )
#print( x.simple )
#time_start = gettime()
# pcount = paircount1( hyperplane, gnd.simple )
#time_elapsed = gettime() - time_start
#cat( "paircount1() time_elapsed=", time_elapsed, "sec\n" )
# find list of the trivials, with 2 points
hypertriv = trivialhypers2( hyperplane, gnd.simple )
if( is.character(hypertriv) )
{
# there was an error and hypertriv is the message
hypertriv = c( hypertriv, sprintf( "Try reducing argument e2=%g.", e2 ) )
log_level( WARN, paste(hypertriv,collapse='\n') )
#cat( paste(hypertriv,collapse='\n'), '\n' )
return(NULL)
}
#print( hypertriv )
idxtriv = which(grp==0)
crossprodidx_triv = idxtriv
crossprodidx = c( crossprodidx_nontriv, crossprodidx_triv )
hyperplaneidx[idxtriv] = (1:length(idxtriv)) + hypersnt
# within each non-trivial hyperplane, snap all the cross products to agree exactly, up to sign
# crossprods = snapcrossprods( crossprods, hyperplane, crossprods[ ,crossprodidx_nontriv,drop=F], gnd.simple )
# crossprods is modifed "in-place"
.Call( C_snapcrossprods, crossprods, hyperplane, crossprods[ ,crossprodidx_nontriv,drop=F], gnd.simple )
hyperplane = c( hyperplane, hypertriv ) # concatenate the 2 lists
if( length(hyperplane) != length(crossprodidx) )
{
log_level( FATAL, "Internal error. nrow(normaltriv)=%d != %d=length(hypertriv).",
length(hyperplane), length(crossprodidx) )
return(NULL)
}
# 1st test
ok = 1 <= hyperplaneidx & hyperplaneidx <= length(hyperplane)
if( ! all(ok) )
{
log_level( FATAL, "Internal Error. %d values of hyperplaneidx are invalid.", sum(!ok) )
return(NULL)
}
# 2nd test
seqq = 1:length(hyperplane)
ok = hyperplaneidx[ crossprodidx[seqq] ] == seqq
if( ! all(ok) )
{
log_level( FATAL, "Internal Error. %d values of hyperplaneidx o crossprodidx are invalid.", sum(!ok) )
return(NULL)
}
# The matrix pcount[,] has only 0s and 1s in the upper triangle
# We must add a trivial hyperplane for each 0.
# Count the number of 0s.
#extra = which( pcount==0L & row(pcount)<col(pcount), arr=T )
#count = nrow(extra)
#cat( "Adding", count, "trivial hyperplanes...\n" )
#hyperpair = vector( count, mode='list' )
#for( i in 1:count )
# hyperpair[[i]] = gnd.simple[ extra[i, ] ]
#cat( "Total is now", length(hyperplane), "hyperplanes.\n" )
}
else
{
# *ALL* the hyperplanes are trivial, and have size 2
# the matroid is uniform
# the number of hyperplanes is the number of all the pairs = n*(n-1)/2 n is the number of columns in x.simple
hyperplane = matrix2list( allpairs(gnd.simple), 1L )
crossprodidx = 1:ncol(crossprods)
hyperplaneidx = 1:ncol(crossprods)
#normal = t( cprods )
}
if( FALSE )
{
# transfer each row of matrix hypertriv to item in a list
hyperpair = vector( nrow(hypertriv), mode='list' )
for( i in 1:nrow(hypertriv) )
hyperpair[[i]] = hypertriv[i, ]
}
time3 = gettime()
timethp = time3 - time2
# simplified matroid
colnames(x.simple) = as.character( gnd.simple )
if( ! issimple )
# record the original loops and multiples as an attribute of the hyperplane list
attr(hyperplane,"lmdata") = lmdata
simplified = matroid3( hyperplane, integer(0), list(), gnd.simple, x.simple )
# add all the cross products. n*(n-1)/2 n is the number of columns in x.simple
simplified$crossprods = crossprods
# add the lookup table from hyperplane index to the column of crossprods
# this assigns a normal vector to each hyperplane which effectively chooses
# one facet from the facet-pair of the zonohedron, namely the facet that
# has this normal as the outward pointing normal
simplified$crossprodidx = crossprodidx
simplified$hyperplaneidx = hyperplaneidx
time4 = gettime()
timesimplified = time4 - time3
# unsimplified matroid
if( issimple )
{
# the matroid is *already* simple
out = simplified
}
else
{
# make output by unsimplification
hyper_un = unsimplify( hyperplane, loop, multiple, gnd.simple )
if( is.null(hyper_un) ) return(NULL)
out = matroid3( hyper_un, loop, multiple, ground, x, condata$multiplesupp )
# record the original loops and multiples as an attribute
# attr(simplified,"lmdata") = lmdata
out$simplified = simplified
}
time5 = gettime()
timeunsimplified = time5 - time4
timetotal = time5 - time0
if( 0 )
{
timeother = timetotal -timelm-timenthp-timethp-timesimplified-timeunsimplified
cat( "loops+multiples: ", timelm * 1000, " msec\n" )
cat( "non-trivial hyperplanes: ", timenthp * 1000, " msec\n" )
cat( "trivial hyperplanes: ", timethp * 1000, " msec\n" )
cat( "simplified matroid: ", timesimplified * 1000, " msec\n" )
cat( "unsimplified matroid: ", timeunsimplified * 1000, " msec\n" )
cat( "other: ", timeother * 1000, " msec\n" )
cat( "Total: ", timetotal * 1000, " msec\n" )
}
return(out)
}
# x list of hyperplanes
# ground integer vector of ground set, only used when rank=1 and otherwise ignored
matroid.list <- function( x, ground=NULL, ... )
{
#cat( "matroid.list\n" )
# ensure all are in integer mode and sorted
#x = lapply( x, function(v) { sort.int(as.integer(v)) } )
tmp = base::unlist( x )
if( ! is.integer(tmp) )
{
log_level( ERROR, "x has non-integers." )
return(NULL)
}
# scrub distracting attributes, if any
# attr( x, "lmdata" ) = NULL
n = length(x)
if( n <= 1 )
{
# a rank=1 matroid, this is a special case
# hyperplanes have rank 0, so a hyperplane contains only loops
# and there can be at most 1 hyperplane
if( is.null(ground) )
{
log_level( ERROR, "For a rank=1 matroid, ground cannot be NULL." )
return(NULL)
}
ground = as.integer(ground)
if( ! all( 0 < diff(ground) ) )
{
log_level( ERROR, "ground is invalid, because it is not in strictly increasing order." )
return(NULL)
}
if( n == 0 )
loop = integer(0)
else
loop = x[[1]]
ok = is.integer(loop) && subset1( loop, ground )
if( ! ok )
{
log_level( ERROR, "The loops are invalid. They must be integral and a subset of the ground set." )
return(NULL)
}
nonloop = setdiff( ground, loop )
if( length(nonloop) == 0 )
{
log_level( ERROR, "Every point in the ground set is a loop, which is invalid. The rank==0." )
return(NULL)
}
out = matroid1( loop, nonloop, ground )
if( ! is_simple(out) )
{
out$simplified = matroid1( integer(0), min(nonloop), min(nonloop) )
# record the original loops and multiples as an attribute
attr(out$simplified$hyperplane,"lmdata") = list( loop=loop, multiple=list(nonloop) )
}
return( out )
}
if( ! is.null(ground) )
{
log_level( WARN, "For a rank>1 matroid, argument ground is ignored." )
}
ground = fastunion( x )
out = list()
class( out ) = c( "matroid", class(out) )
# to test for ranks 2 and 3, first simplify
hypersimple = simplify( x, ground )
groundsimple = fastunion( hypersimple )
lenvec = lengths( hypersimple ) # sapply( hypersimple, length ) is much slower
minlen = min( lenvec )
if( minlen == 0 )
{
log_level( ERROR, "The simplified hyperplane list has an empty hyperplane, which is invalid." )
return(NULL)
}
out$ground = ground
out$hyperplane = x #; print( str(out$hyperplane) )
if( minlen == 1 )
{
# this is rank 2
# check the *all* hyperplanes have size 1, and there are no duplicates
ok = all( lenvec == 1L ) && anyDuplicated( unlist(hypersimple) )==0
if( ! ok )
{
log_level( ERROR, "A rank=2 matroid is detected, but the hyperplanes are invalid." )
return(NULL)
}
out$rank = 2L
}
else
{
# check for rank=3
#cat( "checking rank 3\n" ) ; flush.console()
#cmat = paircount1( hypersimple, groundsimple )
# hypersimple must satisfy the paving axioms,
# so hypertriv must be an empty list
hypertriv = trivialhypers2( hypersimple, groundsimple )
if( is.character(hypertriv) )
{
# there was an error and this is the message
# cat( paste(hypertriv,collapse='\n'), '\n' )
log_level( WARN, paste(hypertriv,collapse='\n') )
return(NULL)
}
if( 0 < length(hypertriv) )
{
# ERROR
mess = "The hyperplanes do not satisfy the paving matroid properties for rank=3."
mess = c( mess, " There are %d point pairs that are in no hyperplane." )
mess = c( mess, " One such point pair is %d,%d." )
mess = paste0( mess, sep='\n' )
pair = hypertriv[[1]]
log_level( ERROR, mess, length(hypertriv), pair[1], pair[2] )
return(NULL)
}
out$rank = 3L
}
lmdata = attr( hypersimple, "lmdata" )
if( is.null(lmdata) )
{
out$loop = integer(0)
out$multiple = list()
}
else
{
out$loop = lmdata$loop
out$multiple = lmdata$multiple
}
out$multiplesupp = data.frame( contiguous=is_contiguousgroup( out$loop, out$multiple, out$ground ) )
if( ! is_simple(out) )
{
if( out$rank == 2L )
{
out$simplified = matroid2( integer(0), hypersimple, groundsimple )
}
if( out$rank == 3L )
{
out$simplified = matroid3( hypersimple, integer(0), list(), groundsimple )
}
# record the original loops and multiples as an attribute
attr(out$simplified$hyperplane,"lmdata") = lmdata # list( loop=out$loop, multiple=out$multiple )
}
#attr( hypersimple, "lmdata" ) = NULL # remove distraction
return( out )
}
# loop integer vector of loops, can be empty
# nonloop integer vector of nonloops, cannot be empty
# ground ground set in ascending order
#
# conditions, which are not checked:
# loop and nonloop are disjoint
#
# returns a matroid of rank 1
matroid1 <- function( loop, nonloop, ground, matrix=NULL, multiplesupp=NULL )
{
out = list()
class( out ) = c( "matroid", class(out) )
out$ground = ground # sort( c( loop, nonloop ) )
out$hyperplane = list(loop)
out$rank = 1L
out$loop = loop
if( length(nonloop) == 1 )
out$multiple = list()
else
out$multiple = list( nonloop )
# make supplementary data.frame with 1 column
msupp = data.frame( contiguous=is_contiguousgroup( loop, out$multiple, ground ) )
if( ! is.null(matrix) )
{
if( ncol(matrix) != length(ground) )
{
log_level( ERROR, "%d != %d.", ncol(matrix), length(ground) )
return(NULL)
}
out$matrix = matrix
if( is.null(multiplesupp) ) multiplesupp = emptymultiplesupp(1)
if( nrow(multiplesupp) != length(out$multiple) )
{
log_level( ERROR, "multiplesupp %d != %d.", nrow(multiplesupp), length(out$multiple) )
return(NULL)
}
# add matrix-related columns
msupp = cbind( msupp, multiplesupp )
}
out$multiplesupp = msupp
return( out )
}
# loop integer vector of loops
# nonloop list of integer vectors of nonloops, which define a partition of the nonloops
# ground integer vector for the ground set, in ascending order
#
# conditions, which are not checked:
# 2 or more parts of the nontrivial partition, i.e. length(nonloop) >= 2
# loops and nonloops are disjoint
#
# returns a matroid of rank 2
matroid2 <- function( loop, nonloop, ground, matrix=NULL, multiplesupp=NULL )
{
out = list()
class( out ) = c( "matroid", class(out) )
out$ground = ground # fastunion( loop, nonloop ) # sort( c( loop, unique(unlist(nonloop)) ) )
if( 0 < length(loop) )
{
# add all loops to each set in the partition to get the hyperplanes
myfun <- function( vec ) { sort.int( c(loop,vec) ) }
out$hyperplane = lapply( nonloop, myfun )
}
else
out$hyperplane = nonloop
out$rank = 2L
out$loop = loop
sizevec = lengths(nonloop) #sapply( nonloop, length )
out$multiple = nonloop[ 2 <= sizevec ]
# make supplementary data.frame with 1 column
msupp = data.frame( contiguous=is_contiguousgroup( loop, out$multiple, ground ) )
if( ! is.null(matrix) )
{
if( ncol(matrix) != length(ground) )
{
log_level( ERROR, "%d != %d.", ncol(matrix), length(ground) )
return(NULL)
}
out$matrix = matrix
if( is.null(multiplesupp) ) multiplesupp = emptymultiplesupp(2)
if( nrow(multiplesupp) != length(out$multiple) )
{
log_level( ERROR, "multiplesupp %d != %d.", nrow(multiplesupp), length(out$multiple) )
return(NULL)
}
# add matrix-related columns
msupp = cbind( msupp, multiplesupp )
}
out$multiplesupp = msupp
return( out )
}
# matroid3()
#
# unlike matroid1() and matroid2(), this one does very little processing.
# It depends on the caller to do that.
#
# hyperplane the hyperplanes for a matroid of rank 3.
# if the matroid is simple these sets form a 2-partition (of the ground set of the simple matroid)
# loop integer vector of loops
# multiple list of disjoint multiple groups
# ground integer vector for the ground set, in ascending order
#
# conditions, which are not checked:
# loops are disjoint from the remaining sets
# the first point in each multiple group is in some hyperplane
#
# returns a matroid of rank 3
#
# note that if loop[] and multiple[] are empty, hyperplane can be used as is
matroid3 <- function( hyperplane, loop, multiple, ground=NULL, matrix=NULL, multiplesupp=NULL )
{
out = list()
class( out ) = c( "matroid", class(out) )
if( is.null(ground) ) ground = fastunion( hyperplane, loop, multiple ) # sort.int( unique( c( loop, unlist(multiple), unlist(hyperplane) ) ) )
out$ground = ground
out$hyperplane = hyperplane
out$rank = 3L
out$loop = loop
out$multiple = multiple
# make supplementary data.frame with 1 column
msupp = data.frame( contiguous=is_contiguousgroup( loop, multiple, ground ) )
if( ! is.null(matrix) )
{
if( ncol(matrix) != length(ground) )
{
log_level( ERROR, "%d != %d.", ncol(matrix), length(ground) )
return(NULL)
}
out$matrix = matrix
if( is.null(multiplesupp) ) multiplesupp = emptymultiplesupp(3)
if( nrow(multiplesupp) != length(multiple) )
{
log_level( ERROR, "multiplesupp %d != %d.", nrow(multiplesupp), length(multiple) )
return(NULL)
}
msupp = cbind( msupp, multiplesupp ) # add matrix-related columns
}
out$multiplesupp = msupp
return( out )
}
getsimplified.matroid <- function( x, ... )
{
if( is.null(x$simplified) )
return(x)
else
return( x$simplified )
}
unsimplify.matroid <- function( x, loop=NULL, multiple=NULL, ... )
{
return( matroid( unsimplify(x$hyperplane,loop,multiple,x$ground) ) )
}
is_uniform.matroid <- function( x )
{
if( x$rank == 1 )
return( length(x$loop)==0 )
if( ! is_simple(x) ) return(FALSE)
return( all( lengths(x$hyperplane) == x$rank-1L ) )
}
is_paving.matroid <- function( x )
{
if( x$rank == 3L )
return( is_simple(x) )
else if( x$rank == 2L )
return( length(x$loop)==0 )
else if( x$rank == 1L )
return( TRUE )
log_level( FATAL, "Internal error. rank=%d", x$rank )
return( NA )
}
is_simple.matroid <- function( x )
{
return( length(x$loop)==0 && length(x$multiple)==0 )
}
getground.matroid <- function( x )
{
return( x$ground )
}
getloop.matroid <- function( x )
{
return( x$loop )
}
getmultiple.matroid <- function( x )
{
return( x$multiple )
}
gethyperplane.matroid <- function( x )
{
return( x$hyperplane )
}
getmatrix.matroid <- function( x )
{
return( x$matrix )
}
# x a simple matroid of rank 3, not checked
# hyperidx an integer m-vector of hyperplane indexes of x
# if NULL then take this to be *ALL* the hyperplanes
#
# returns a 3xm matrix with the "distinguished" normal of the hyperplanes in the rows
getnormal.matroid <- function( x, hyperidx, ... )
{
#if( ! is_simple(x) || x$rank !=3 )
# {
# log.string( FATAL, "Internal error. The matroid is invalid." )
# return(NULL)
# }
#if( any( length(x$crossprodidx) < hyperidx ) )
# {
# cat( "getnormal(). hyperidx=", hyperidx, '\n' )
# return(NULL)
# }
if( is.null(hyperidx) )
out = x$crossprods[ , x$crossprodidx ]
else
out = x$crossprods[ , x$crossprodidx[hyperidx], drop=FALSE ]
return( out )
}
# x a matroid
# idx a single integer, which is in the ground set
#
# returns the index of the multiple[[]] group that contains idx,
# and if there is none, then returns 0L
# uses a brute force search, maybe optimize later
#
# in case of error returns NULL
getmultipleindex.matroid <- function( x, idx )
{
ok = is.integer(idx) && length(idx)==1
if( ! ok )
{
log_level( ERROR, "idx=%s is invalid.", as.character(idx) )
return(NULL)
}
if( !( idx %in% x$ground ) )
{
log_level( ERROR, "idx=%d is invalid.", idx )
return(NULL)
}
for( i in seq_len( length(x$multiple) ) )
{
if( idx %in% x$multiple[[i]] ) return(i)
}
return( 0L )
}
# x a matroid
# subs a vector of integers representing subset of x$ground, or a list of such vectors
#
# returns an integer vector with length = length of subs
#
# if a set is NOT a subset of ground, the corresponding integer is NA_integer_
rank <- function( x, subs )
{
if( ! inherits( x, "matroid" ) )
{
log_level( ERROR, "x is not a matroid." )
return(NULL)
}
if( ! is.list(subs) )
subs = list( as.integer(subs) )
# verify that all sets in subs are subsets of x$ground
bad = ! .Call( C_issubset, subs, x$ground )
if( any(bad) )
{
log_level( WARN, "%d of %d subsets are not a subset of ground.",
sum(bad), length(bad) )
# cat( mess )
# return( NULL )
}
names.saved = names(subs)
if( ! is_simple(x) )
subs = .Call( C_simplifygeneral, subs, x$ground, x$loop, x$multiple )
if( x$rank == 1L )
{
# a special case
out = lengths(subs)
if( 1L < max(out) )
{
log_level( FATAL, "Internal error. For rank 1 matroid, max(out) = %d > 1.", max(out) )
return( NULL )
}
}
else if( x$rank == 2L )
{
# a special case
out = pmin( lengths(subs), 2L )
# return( out )
}
else if( x$rank == 3L )
{
n = length(subs)
out = integer(n)
# find the non-trivial hyperplanes of 3 or points
# we do not need to simplfy to the non-trivials now,
# because C_anyissuperset uses the fact that the lengths of the hyperplanes are decreasing
# and can optimize it
#hypersnt = x$hyperplane[ 3 <= lengths(x$hyperplane) ] this takes too long
for( i in 1:n )
{
if( bad[i] ) next
set = subs[[i]]
if( length(set) <= 2L )
{
out[i] = length(set)
}
else # if( 0 < length(hypersnt) )
{
# rank(set) is either 2 or 3, depending on whether set is a subset of a hyperplane
test = .Call( C_anyissuperset, x$hyperplane, set, TRUE ) # .Call( C_issuperset, hypersnt, set )
if( test ) # any(test) )
out[i] = 2L
else
out[i] = 3L
}
#else
# # length(set) >= 3 but the length of all hyperplanes is < 3
# out[i] = 3L
}
}
else
{
log_level( ERROR, "rank(x)=%g != 3.", x$rank )
return( NULL )
}
# mark sets that are not subsets of ground with NA
out[ bad ] = NA_integer_
names(out) = names.saved
return( out )
}
is_independent <- function( x, subs )
{
if( ! is.list(subs) )
subs = list( as.integer(subs) )
out = (rank(x,subs) == lengths(subs))
names(out) = names(subs)
return( out )
}
is_loop.matroid <- function( x, point )
{
#names.saved = names(point)
#point = as.integer(point)
out = is.finite( match( point, x$loop ) )
# make points not in the ground set NA
out[ is.na( match(point,x$ground) ) ] = NA
names(out) = names(point) #names.saved
return(out)
}
# lst a list of integer vectors
charsummary <- function( lst )
{
if( length(lst) == 0 ) return( '{}' )
out = ''
if( length(lst) <= 8 )
{
for( vec in lst )
out = c( out, sprintf( "{%s}", paste(vec,collapse=' ') ) )
out = paste( out, collapse=' ' )
if( nchar(out) <= 80 )
return(out)
}
out = ''
sizevec = unlist( lapply( lst, length ) )
sizeunq = sort( unique(sizevec) )
for( size in sizeunq )
out = c( out, sprintf( " [%d-point: %d]", size, sum( sizevec==size ) ) )
out = paste( out, collapse='' )
return( out )
}
print.matroid <- function( x, ... )
{
cat( "ground set: ", length(x$ground), " points {", paste(x$ground,collapse=' '), "}\n", sep='' )
lmdata = attr(x$hyperplane,"lmdata") # attr(x,"lmdata")
if( ! is.null(lmdata) )
{
# this matroid is simple and derived from an "original" matroid
for( vec in lmdata$multiple )
{
mess = paste( vec, collapse=' ' )
mess = sprintf( " Point %d corresponds to the multiple group {%s} in the original matroid.\n",
vec[1], mess )
cat( mess )
}
}
cat( "hyperplanes: ", length(x$hyperplane), " ", charsummary(x$hyperplane), '\n', sep='' )
cat( "rank: ", x$rank, '\n', sep='' )
cat( "loops: ", length(x$loop), " {", paste(x$loop,collapse=' '), "}", '\n', sep='' )
cat( "multiple groups: ", length(x$multiple), " ", charsummary(x$multiple), '\n', sep='' )
cat( "uniform: ", is_uniform(x), '\n', sep='' )
cat( "paving: ", is_paving(x), '\n', sep='' )
cat( "simple: ", is_simple(x), '\n', sep='' )
cat( "contiguous: ", all(x$multiplesupp$contiguous), '\n', sep='' )
if( ! is.null(x$matrix) )
{
mess = sprintf( "This matroid is constructed from a %dx%d real matrix.\n", nrow(x$matrix), ncol(x$matrix) )
cat( mess )
if( ncol(x$matrix) <= 10 )
print( x$matrix )
}
if( ! is.null(x$simplified) )
{
# print the simplified matroid, and indent 4 spaces
cat( '\n' )
cat( "The summary of the simplified matroid is:\n" )
mess = paste( " ", capture.output( print(x$simplified) ), '\n', sep='' )
cat( mess )
}
return( invisible(TRUE) )
}
# x a list of subsets of a ground set, the hyperplanes of a matroid
# ground integer vector, if NULL computed from x$ground
#
# finds all loops and groups of multiples
# returns a new list of the same length with:
# *) all loops removed
# *) all multiples removed, except for the first point in each group
simplify.list <- function( x, ground=NULL, ... )
{
if( is.null(ground) ) ground = fastunion(x)
lmdata = loopsandmultiples( x, ground ) # ; print( str(lmdata) )
out = .Call( C_simplify, x, ground, lmdata$loop, lmdata$multiple )
# record the original loops and multiples as an attribute
attr( out, "lmdata" ) = lmdata
return( out )
}
# x a list of subsets of a ground set, the hyperplanes of a matroid
# ground integer vector, if NULL computed from x$ground
#
# finds all loops and groups of multiples
# returns a new list of the same length with:
# *) all loops removed
# *) all multiples removed, except for the first point in each group
simplify_old.list <- function( x, ground=NULL )
{
# attr( x, "ground" ) = ground
lmdata = loopsandmultiples( x, ground ) # ; print( str(lmdata) )
remove = lmdata$loop
# for each group of multiples, remove all except the first point
for( idx in lmdata$multiple )
remove = c( remove, idx[-1] )
if( length(remove) == 0 ) return(x) # no change
# for each hyperplane, remove every point in the vector remove
if( 1 )
{
for( i in seq_len(length(x)) )
{
hp = x[[i]]
idx = match( remove, hp, nomatch=0 )
if( any( 0 < idx ) ) x[[i]] = hp[ -idx ]
}
}
else
{
# this is actually SLOWER - TODO: write a C version
myfun <- function( hp )
{
idx = match( remove, hp, nomatch=0 )
if( any( 0 < idx ) )
out = hp[ -idx ]
else
out = hp
return(out)
}
x = lapply( x, myfun )
}
# record the original loops and multiples as an attribute
attr( x,"lmdata") = lmdata
return( x )
}
# x a list of integer vectors, representing subsets of a ground set
# loop an integer vector, with all points disjoint from x
# multiple a list of multiples groups, each group must intersect the ground set in 1 point
# ground union of all points in x, in ascending order
unsimplify.list <- function( x, loop=NULL, multiple=NULL, ground=NULL, ... )
{
lmdata = attr(x,"lmdata")
if( ! is.null(lmdata) )
{
if( is.null(loop) ) loop = lmdata$loop
if( is.null(multiple) ) multiple = lmdata$multiple
}
else
{
if( is.null(loop) ) loop = integer(0)
if( is.null(multiple) ) multiple = list()
}
if( length(loop)==0 && length(multiple)==0 )
{
# nothing to do
attr(x,"lmdata") = NULL # ensure that "lmdata" is truly NULL
return( x )
}
if( is.null(ground) ) ground = fastunion(x)
out = .Call( C_unsimplify, x, ground, loop, multiple )
return( out )
}
########### helper functions ############
# hyperplane a list of integer vector, defining subsets of a ground set
# Undocumented: it may also have an attribute "ground" = the ground set
# ground integer vector, if NULL computed from hyperplane$ground
# returns a list with items:
# loop an integer vector of indexes of loops
# multiple a list of integer vectors, each of which is a group of multiples
#
# loop a point is a loop iff it appears in every hyperplane
# multiple
loopsandmultiples <- function( hyperplane, ground=NULL )
{
if( ! is.list(hyperplane) )
{
log_level( ERROR, "Argument hyperplane is not a list." )
return(NULL)
}
out = list()
m = length(hyperplane)
if( m == 0 )
{
# no loops or multiples
out$loop = list()
out$multiple = list()
return(out)
}
tmp = base::unlist( hyperplane ) #; cat( "tmp=", tmp, '\n' )
if( ! is.integer(tmp) )
{
log_level( ERROR, "Argument hyperplane has non-integers." )
return(NULL)
}
# ground = attr( hyperplane, "ground" )
if( is.null(ground) )
{
#tmp = unique(tmp)
#ground = sort( hyperplane )
ground = fastunion( hyperplane )
}
else
{
if( ! is.integer(ground) )
{
log_level( ERROR, "ground is non-integer." )
return(NULL)
}
# verify that ground is increasing
if( ! all( 0 < diff(ground) ) )
{
log_level( ERROR, "ground is not strictly increasing." )
return(NULL)
}
# verify that tmp is a subset of ground
if( ! subset1(tmp,ground) )
{
log_level( ERROR, "One of the hyperplanes is not a subset of ground." )
return(NULL)
}
# imax = max( tmp, ground )
}
gmax = ground[ length(ground) ] # largest possible index
maskg = logical( gmax )
maskg[ground] = TRUE # to be used below
# create the counters
if( 1 )
{
res = incidencedata( hyperplane, ground )
if( is.null(res) ) return(NULL)
# incident[] an integer vector. incident[i] = # of hyperplanes that contain point i
# hash[] a real vector depending on the incidence pattern of the point, hash[i] can be very large, so use real
incident = res$incident #; print( incident )
hash = res$hash
}
else
{
# first version too slow
incident = integer( gmax ) # incident[i] = # of hyperplanes that contain point i
hash = double( gmax ) # hash function of point i, hash[i] can be very large, so use double
for( j in 1:m )
{
hp = hyperplane[[ j ]] # hp is the set of points in hyperplane j
incident[hp] = incident[hp] + 1L
hash[hp] = hash[hp] + j^2 # a large signature, so collisions not likely
}
#cat( "hash=", hash, '\n' )
}
out$loop = which( incident == m )
out$multiple = list() # grow this list one at a time - slow and not good
# the first grouping only uses the hash function, and so it is only crude and approximate
grp = grpDuplicated( matrix(hash,1,length(hash)), MARGIN=2 )
if( all( grp==0 ) )
{
# no multiples
return( out )
}
#cat( "grp=", grp, '\n' )
# loops cannot be multiples, so zero them
grp[ out$loop ] = 0L
# points not in the ground set cannot be multiples, so zero them
grp[ ! maskg ] = 0L
#cat( "after zeroing loops and points outside ground set, grp=", grp, '\n' )
grplist = grplistfromvec( grp ) #; cat( "grplist:\n" ) ; print( grplist )
pcount = 0 # of parallel groups
for( idx in grplist )
{
#cat( "idx=", idx, '\n' )
# removing loops and points not in ground set may generate invalid groups
if( length(idx) <= 1 ) next # not a valid group
# make incidence matrix for columns taken from idx
# this is a subset of the full incidence matrix, and so saves a lot of memory
if( 1 )
mat = incidencematrix( hyperplane, ground, idx )
else
{
mat = matrix( FALSE, m, length(idx) )
for( j in 1:m ) { mat[j, ] = idx %in% hyperplane[[ j ]] }
}
# now compute the true multiple groups, using the full vector instead of a hash function
grpsub = grpDuplicated( mat, MARGIN=2 ) #; cat( "grpsub=", grpsub, '\n' )
grpsublist = grplistfromvec( grpsub )
for( idxsub in grpsublist )
{
#cat( "idxsub=", idxsub, '\n' )
# removing loops and points not in ground set may generate invalid groups
if( length(idxsub) <= 1 ) next # not a valid group
pcount = pcount + 1
#cat( "idxsub=", idxsub, " pcount=", pcount, '\n' )
out$multiple[[pcount]] = idx[ idxsub ]
}
}
return( out )
}
# x matroid
# W invertible matrix - 2x2 or 3x3
lintransform.matroid <- function( x, W )
{
if( is.null(x$matrix) )
{
log_level( WARN, "matroid is not vectorial, so returning the matroid unchanged." )
return(x) # not generated from a matrix, so nothing can be done
}
if( length(W) == 1 )
W = W * diag( x$rank )
# check that W is OK
ok = is.matrix(W) && all( dim(W) == c(x$rank,x$rank) )
if( ! ok )
{
log_level( ERROR, "matrix W is invalid." )
return(NULL)
}
Winv = try( solve(W), silent=TRUE )
if( class(Winv)[1] == "try-error" )
{
log_level( ERROR, "matrix W is not invertible." )
return(NULL)
}
# just copy from x to out, and then make selective changes !
out = x
out$matrix = W %*% x$matrix
if( 0 < nrow(out$multiplesupp) )
{
out$multiplesupp$major = x$multiplesupp$major %*% t(W)
out$multiplesupp$minor = x$multiplesupp$minor %*% t(W)
}
if( ! is.null(x$crossprods) )
{
# transform crossprods, using Winv
crossprods = t(Winv) %*% x$crossprods
out$crossprods = normalizeMatrix( crossprods, 2L )
}
if( ! is.null(x$simplified) )
{
out$simplified$matrix = W %*% x$simplified$matrix
if( ! is.null(x$simplified$crossprods) )
{
# transform crossprods, using Winv
crossprods = t(Winv) %*% x$simplified$crossprods
out$simplified$crossprods = normalizeMatrix( crossprods, 2L )
}
}
return( out )
}
# loop integer vector of loops
# multiple list of disjoint multiple groups
# ground integer vector for the ground set, in ascending order
#
# returns a logical vector the same length as multiple[[]]
is_contiguousgroup <- function( loop, multiple, ground )
{
m = length(multiple)
out = logical(m)
if( m == 0 ) return( out )
# subtract loops from ground set
if( 0 < length(loop) )
{
idx = match( loop, ground )
gnd.noloops = ground[ -idx ]
}
else
gnd.noloops = ground
for( i in 1:m )
out[i] = is_contiguous( multiple[[i]], gnd.noloops )
return( out )
}
# x a simple matroid of rank 3. Not checked it takes too long.
# hypersub integer m-vector of indexes of a hyperplane (in the simple matroid)
# gen a generator/point in the hyperplane (in the ground set of the simple matroid)
# normal mx3 matrix of normal vectors to the m hyperplanes given by hypersub
#
# returns 3xm vector of m facet diameters
getdiametermatrix <- function( x, hypersub, pcube, gen, normal )
{
.Call( C_diametermatrix, x$hyperplane, hypersub, pcube, gen, x$ground, normal, x$matrix, x$crossprods )
}
getbeltdata <- function( x, hypersub, pcube, gen, normal )
{
.Call( C_beltdata, x$hyperplane, hypersub, pcube, gen, x$ground, normal, x$matrix, x$crossprods )
}
# returns vector with column indexes of all groups with mixed directions
getmixed.matroid <- function( x )
{
if( is.null(x$matrix) ) return(NULL) # matroid did not come from a matrix
if( nrow(x$multiplesupp)==0 ) return(integer(0)) # no multiple groups
mixed = x$multiplesupp$mixed # logical vector
return( x$multiplesupp$colidx[mixed] )
}
########### UseMethod() functions ############
matroid <- function( x, ... )
{
UseMethod('matroid')
}
simplify <- function( x, ... )
{
UseMethod('simplify')
}
getsimplified <- function( x, ... )
{
UseMethod('getsimplified')
}
unsimplify <- function( x, ... )
{
UseMethod('unsimplify')
}
is_simple <- function( x )
{
UseMethod('is_simple')
}
is_uniform <- function( x )
{
UseMethod('is_uniform')
}
is_paving <- function( x )
{
UseMethod('is_paving')
}
getground <- function( x )
{
UseMethod('getground')
}
gethyperplane <- function( x )
{
UseMethod('gethyperplane')
}
#rank <- function( x, subs )
#{
# UseMethod('rank')
#}
#is_independent <- function( x, subs )
#{
# UseMethod('is_independent')
#}
is_loop <- function( x, point )
{
UseMethod('is_loop')
}
getmultipleindex <- function( x, idx )
{
UseMethod('getmultipleindex')
}
getmixed <- function( x )
{
UseMethod('getmixed')
}
getloop <- function( x )
{
UseMethod('getloop')
}
getmultiple <- function( x )
{
UseMethod('getmultiple')
}
################## deadwood below #######################################
# x a simple matroid of rank 3. Not checked it takes too long.
# hyperidx index of a hyperplane (in the simple matroid)
# gen a generator/point in the hyperplane (in the ground set of the simple matroid)
#
# parameters 2 and 3 define a zonogon facet, and a pair of antipodal edges of that zonogon
#
# returns the vector from the midpoint of one edge to the midpoint of the antipodal edge
getdiameter <- function( x, hyperidx, gen )
{
#if( ! is_simple(x) || x$rank !=3 )
# {
# log.string( FATAL, "Internal error. The matroid is invalid." )
# return(NULL)
# }
# get all ground set points of the hyperplane
hyper = x$hyperplane[[hyperidx]]
# convert from ground set to raw index
generatoridx = match( hyper, x$ground )
if( any( is.na(generatoridx) ) )
{
log_level( FATAL, "Internal error. Hyperplane %g is not a subset of the ground set.", hyperidx )
return(NULL)
}
k = match( gen, hyper )
if( is.na(k) )
{
log_level( FATAL, "Internal error. Generator %g is not in hyperplane %g.", gen, hyperidx )
return(NULL)
}
# genidx = generatoridx[k]
# reorder generatoridx so that genidx comes first
generatoridx = c( generatoridx[k], generatoridx[-k] )
normal = x$crossprods[ , x$crossprodidx[hyperidx] ] #; print(normal)
out = .Call( C_diametervector, generatoridx, normal, x$matrix, x$crossprods )
return(out)
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/matroid.R |
# A a matrix [possibly with NAs or NaNs ?]
# eps difference tolerance, used to 'collapse' one column at a time
# oriented if FALSE, then 2 rows that differ only in sign are considered the same.
# bysize sort the groups in decreasing order by size; requires extra work
# returns a list with
# groupidx an integer vector with length(group) = ncol(A)
# 0 means this row is a trivial singleton group (most common).
# a row with an NA is always in its own singleton cluster
findColumnGroups <- function( A, eps, oriented, bysize=FALSE )
{
if( ! oriented )
{
A = conditionalAntipodal( A, eps/2, MARGIN=2 )
if( is.null(A) ) return(NULL)
}
# collapse each coordinate, usually 3 of them
Acollapsed = array( NA_real_, dim=dim(A) )
for( i in 1:nrow(A) )
{
Acollapsed[i, ] = collapseGroups1D( A[i, ], eps=eps )
}
#out = list()
#out$groupidx = grpDuplicated(Acollapsed,MARGIN=2)
out = grpDuplicated( Acollapsed, MARGIN=2 )
if( bysize && ! is.null(out) )
out = relabelGrpIndexes( out )
return(out)
}
# group an non-negative integer vector, where positive integers indicate membership in a group
#
# returns a vector so that the groups are in descending order by size
relabelGrpIndexes <- function( group )
{
n = max( group )
if( n <=1 ) return(group) # all 0s or only 1 group, so no change
member = vector( n, mode='list' )
for( i in 1:n ) member[[i]] = which( group==i )
lenvec = lengths(member)
perm = order( lenvec, decreasing=TRUE )
out = group
for( i in 1:n ) out[ member[[ perm[i] ]] ] = i
return( out )
}
# vec vector of doubles
# eps small non-negative number
# A _group_ is a maximal set of elements in vec
# with adjacent differences all <= eps
# the function modifies vec[] by replacing each value in a group
# by the mean of that group.
# Exception: if the group contains a single integer (possibly with repeats),
# then each value in the group is replaced by that integer.
#
# returns: the modified vec
collapseGroups1D <- function( vec, eps )
{
ok = is.numeric(vec) && ! any(is.na(vec))
if( ! ok )
{
log_level( ERROR, "Argument vec is invalid." )
return(NULL)
}
ok = is.numeric(eps) && length(eps)==1 && 0<=eps
if( ! ok )
{
log_level( ERROR, "Argument eps is invalid." )
return(NULL)
}
if( length(vec)<=1 || eps==0 ) return(vec) # no change
# sort vec in increasing order
perm = order(vec)
out = vec[perm]
# change vector out[] "in place"
ok = .Call( C_collapseGroups1D_R, out, eps )
if( ! ok ) return(NULL)
# restore original order
out[perm] = out
return(out)
}
# A a numeric matrix
# eps small positive number
# MARGIN 1 (vectors are the rows) or 2 (vectors are the columns)
# for each vector, search for the first number whose absolute value > eps
# If that value is negative then apply antipodal, and otherwise the identity.
# So in the returned matrix, in each vector the first "significant" value is positive
#
conditionalAntipodal <- function( A, eps, MARGIN )
{
ok = is.double(A) && is.matrix(A)
if( ! ok )
{
return(NULL)
}
ok = is.double(eps) && length(eps)==1
if( ! ok )
{
return(NULL)
}
MARGIN = as.integer(MARGIN)
ok = length(MARGIN)==1 && MARGIN %in% 1L:2L
if( ! ok )
{
return(NULL)
}
# make a deep (non-shallow) copy of A, because C_conditionalAntipodal() modifies in-place
out = duplicate(A)
# change matrix out[] "in place"
ok = .Call( C_conditionalAntipodal, out, eps, MARGIN )
if( ! ok ) return(NULL)
return( out )
}
duplicate <- function(x)
{
.Call(C_duplicateR, x)
}
obj_addr <- function(x)
{
.Call(C_obj_addr,x)
}
############ deadwood below ##################
# too slow
conditionalAntipodal1 <- function( A, eps )
{
myfun <- function( vec )
{
idx = which( eps < abs(vec) )
if( length(idx)==0 || 0<vec[idx[1]] )
return( vec )
else
return( -vec )
}
return( base::apply( A, MARGIN=2, myfun ) )
}
# too slow
conditionalAntipodal2 <- function( A, eps )
{
A = t(A)
# extract the first non-zero entry in each row
first = apply( A, 1, function(r) { r[ which(eps<abs(r))[1] ] } )
# first[ ! is.finite(first) ] = 0 # change NAs to 0
A = t( sign(first) * A )
return(A)
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/nearequal.R |
# getmetrics2trans( x, angles=TRUE, more=TRUE, tol=5.e-12 )
#
# x a zonohedron
# angles add dihedral angles of all edges
# more add more pgramdf columns
# tol tolerance for "edge-on" facets, as viewed from the center.
# And also for the deficient shift.
#
# returns list with:
#
# generators # of generators, in the simplified matroid
#
# pgramdf a data frame on the pgrams, with N*(N-1)/2 rows,
# computed initially by allpgramcenters2trans(), but then possibly modified.
# It has these columns:
# idxpair integer matrix with 2 columns i and j with 1 <= i < j <= n
# gndpair idxpair converted to the ground set, another integer matrix with 2 columns
# center real matrix with 3 columns containing the corresponding facet center,
# for the centered surface. If linkingnum is negative this is reversed.
# cross unitized cross product of generators, never reversed
# beta coefficient of the plane equation of the facet
# If linkingnum is negative this is reversed.
# If the surface is starshaped, all beta are positive.
#
# if the 2-trans surface is starshaped, then these additional columns are added
# hyperplaneidx index of the hyperplane that contains a congruent copy of this facet in the zonohedron
# centermax center of the pgram facet, relative to the center of the zonohedron
# betamax plane constant for this pgram facet
# deficit betamax - beta. When coincident, it might not be exactly 0.
# shift distance between the 2 facet centers - centermax and center.
# When coincident, it should be 0, but may be very small because of truncation.
# deficient equal to shift >= tol. a logical.
# area area of the pgram facet
# linkingnumber integer linking number with respect to the center, NA if undefined
# signcount integer 4-vector with -,0,+ and total sign counts. The total is N*(N-1)/2
# signsurf sign of linking number of center with the surface, +1, -1, or 0, or NA
# starshaped surface is starshaped, at the center, logical and often NA
# injective surface is injective, logical and often NA
# if angles==TRUE, then anglesDH is added:
# anglesDH data frame with dihedral angle data. N*(N-1) rows and these columns:
# pivot integer index of the generator where dihedral angle pivots, the pivot of the "hinge" edge
# wing1 index of the generator forming wing 1 of the "hinge"
# wing2 index of the generator forming wing 2 of the "hinge"
# level the # of 1s in the level where the edge ends
# angle the external dihedral angle, where positive is convex and negative is concave
# edgemid midpoint of the edge, in the *centered* polyhedron
# if the 2-trans surface is starshaped, then these additional items are added to the output
#
# areadeficient sum of the areas of the deficient pgrams. For both halves of the surface.
# volumedeficient sum of the deficient volume, between surface and zonohedron. For both halves of the surface and zono.
getmetrics2trans <- function( x, angles=TRUE, more=TRUE, tol=5.e-12 )
{
if( ! inherits(x,"zonohedron") )
{
log_level( ERROR, "Argument x is invalid. It's not a zonohedron." )
return(NULL)
}
pgramdf = allpgramcenters2trans( x )
if( is.null(pgramdf) ) return(NULL)
matsimple = getsimplified( x$matroid )
#centermat = t(pgramdf$center)
#if( ncol(centermat) != ncol(matsimple$crossprods) )
# {
# log.string( ERROR, "ncol(centermat)=%d != %d=ncol(matsimple$crossprods).",
# ncol(centermat) != ncol(matsimple$crossprods) )
# return(NULL)
# }
#dotvec = .colSums( centermat * matsimple$crossprods, nrow(centermat), ncol(centermat) )
# get the linking number with respect to the center
linkingnum = linkingnumber( x, pgramdf ) # , , c(0,0,0) )
# alternate still needs work
#linkingnum = linkingnumber2( x ) # , , c(0,0,0) )
betavec = pgramdf$beta
countneg = sum( betavec < -tol )
countpos = sum( tol < betavec )
countzero = length(betavec) - countneg - countpos
if( is.finite(linkingnum) && abs(linkingnum) == 1 )
{
# the usual case
if( 0<countneg && 0<countpos )
# mixed signs
starshaped = FALSE
else if( countzero == 0 )
# all negative or all positive
starshaped = TRUE
else
# some dot products are "zero", degenerate and not mixed
starshaped = NA # logical
}
else
# if the linking number is not +1 or -1, the surface cannot be starshaped
starshaped = FALSE
ground = getground( matsimple )
if( FALSE && is.finite(starshaped) && ! starshaped )
{
if( countneg <= countpos )
mask = betavec < 0
else
mask = 0 < betavec
gndpair = ground[pgramdf$idxpair]
dim(gndpair) = c( length(gndpair)/2, 2 )
df = cbind( pgramdf, gndpair, betavec )
print( df[mask, ] )
}
# signsurf is the linking number of the 2-transition polyhedral surface and 0
# it is defined whenever 0 is not in the surface, even when surface has self-intersections, map is not injective
# but we do not really have time to determine the sign accurately,
# so choose the dominant facet sign and it will usually be correct
# crossprod lookup is outward when signsurf=1 and inward when signsurf=-1
signsurf = sign( linkingnum )
if( is.finite(signsurf) && signsurf < 0 )
{
# use antipodal pgrams instead
# so we can compare beta with the corresponding betamax from the zonohedron,
# and center with centermax from the zonohedron.
# we only do this when starshaped is TRUE, see below
# the pgram normal vector stays the same,
# and when starshaped, it changes from inward pointing to outward pointing
pgramdf$center = -(pgramdf$center)
pgramdf$beta = -(pgramdf$beta)
}
if( FALSE && is.finite(starshaped) && starshaped )
{
# verify that all beta > 0
masknonpos = pgramdf$beta <= tol
if( any(masknonpos) )
{
log_level( ERROR, "internal error. %d of %d beta coeffs are non-positive for strictly starshaped polyhedron.
tol=%g.", sum(masknonpos), length(masknonpos), tol )
return(NULL)
}
}
injective = NA # logical
if( is.finite(linkingnum) && abs(linkingnum)!=1 )
injective = FALSE
else if( is.finite(starshaped) && starshaped )
injective = TRUE
out = list()
out$generators = ncol( getmatrix(matsimple) )
out$pgramdf = pgramdf
out$linkingnumber = linkingnum
out$signcount = c( negative=countneg, zero=countzero, positive=countpos, total=length(betavec) )
out$signsurf = signsurf
out$starshaped = starshaped
out$injective = injective
if( angles && is.finite(signsurf) && signsurf != 0 )
{
# get all the edge dihedral angles
res = edgeangles2trans( x, signsurf )
if( is.null(res) ) return(NULL)
out$anglesDH = res
}
if( more && is.finite(starshaped) && starshaped )
{
# add more columns to out$pgramdf
# beta for the zonohedron, where normal product is maximized. The normal is outward pointing.
betamax = x$facet$beta[ matsimple$hyperplaneidx ]
pgrams = nrow(pgramdf)
if( length(betamax) != pgrams )
{
log_level( ERROR, "internal error. length(betamax)=%d != %d=pgrams.", length(betamax), pgrams )
return(FALSE)
}
# since the surface is starshaped, all out$pgramdf$beta > 0
deficit = betamax - out$pgramdf$beta # always non-negative
out$pgramdf$hyperplaneidx = matsimple$hyperplaneidx
out$pgramdf$betamax = betamax
out$pgramdf$deficit = deficit
# x$facet$center[ , ] is the center of the zonogon facet.
# The next line is only correct for the trivial facets.
# For a non-trivial facet, centermax is set to the center of the zonogon facet,
# which is not the pgram center for any tiling, in general, and incorrect.
# This is fixed in the for() loop below.
centermax = x$facet$center[ matsimple$hyperplaneidx, , drop=FALSE]
# sign modification
# centermax = x$facet$sign[ matsimple$hyperplaneidx ] * centermax # recycling rule used here
.Call( C_timesEqual, centermax, as.double(x$facet$sign[ matsimple$hyperplaneidx ]), 2L ) # multiply in place
# for the non-trivial zonogon facets, centermax needs special treatment
for( k in seq_len( length(x$zonogon) ) )
{
zono = x$zonogon[[k]]
# subgnd has M ints, where M > 2
subgnd = getground(zono$matroid)
# the the length of idx is M*(M-1)/2. the integers are in 1 : N*(N-1)/2
idx = translateallpairs( subgnd, ground )
masksmall = deficit[idx] <= tol
if( any( masksmall ) )
{
# for the maximizing pgrams, with very SMALL deficit,
# use the 2-transition pgrams
# later, the shift will be computed as 0, and deficient will be FALSE
idxsub = idx[ masksmall ]
# assign only those centers for which deficit is SMALL
centermax[ idxsub, ] = out$pgramdf$center[ idxsub, ]
}
masklarge = ! masksmall
if( any( masklarge ) )
{
# for the maximizing pgrams, with LARGE deficit
# use the tiling pgrams, for the standard tiling of the zonogon facet
center3D = gettilecenters3D( x, k )
# correct the signs
# center3D = x$signtile[[k]] * center3D # recycling rule used here
.Call( C_timesEqual, center3D, as.double( x$signtile[[k]] ), 2L ) # multiply in place
idxsub = idx[ masklarge ]
# assign only those centers for which deficit is LARGE
centermax[ idxsub, ] = center3D[ masklarge, ]
}
}
out$pgramdf$centermax = centermax
out$pgramdf$shift = sqrt( .rowSums( (out$pgramdf$centermax - out$pgramdf$center)^2 , length(betamax), 3 ) )
deficient = tol <= out$pgramdf$shift # & out$pgramdf$deficit != 0
out$pgramdf$deficient = deficient
if( FALSE )
{
# print some tracing data
for( k in seq_len( length(x$zonogon) ) )
{
cat( "------------------- facet ", k, " -----------------\n" )
zono = x$zonogon[[k]]
subgnd = getground(zono$matroid)
idx = translateallpairs( subgnd, ground )
print( out$pgramdf[idx, ] )
}
}
# compute area of all the pgrams
crossprodsraw = allcrossproducts( getmatrix(matsimple) )
out$pgramdf$area = sqrt( .colSums( crossprodsraw^2, nrow(crossprodsraw), ncol(crossprodsraw) ) )
out$areadeficient = 2*sum( out$pgramdf$area[deficient] )
out$volumedeficient = (2/3) * sum( out$pgramdf$area[deficient] * out$pgramdf$deficit[deficient] )
out$volume = (2/3) * sum( out$pgramdf$area * betavec )
if( FALSE )
{
mask = tol <= out$pgramdf$shift
cat( "range of {shift < tol} =", range( out$pgramdf$shift[ ! mask ] ), " (tol=", tol, ')\n' )
cat( "range of {tol < shift} =", range( out$pgramdf$shift[ mask ] ), '\n' )
}
}
return( out )
}
# inside2trans()
#
# x a zonohedron object
# p Mx3 matrix, with the M query points in the rowSums
# value a dataframe with columns
# p the given Mx3 input matrix
# inside TRUE means the linking number with the 2-transition surface is non-zero
# linkingnumber the linking number of the point w.r.t. the surface
# distance distance from the point to the surface
# timecalc time to do the calculation, in sec
# negative or 0 means inside, and positive means in the exterior
# NOTE: if positive then the distance is only approximate.
inside2trans <- function( x, p ) # tol=5.e-12
{
if( ! inherits(x,"zonohedron") )
{
log_level( ERROR, "Argument x is invalid. It is not a zonohedron." )
return(NULL)
}
p = prepareNxM( p, 3 )
if( is.null(p) ) return(NULL)
pgramdf = allpgramcenters2trans( x )
if( is.null(pgramdf) ) return(NULL)
# subtract x$center from all the given points
#point_centered = duplicate( p )
#res = .Call( C_plusEqual, point_centered, -x$center, 1L ) # changes point_centered in place
#if( is.null(res) ) return(NULL)
point_centered = .Call( C_sumMatVec, p, -x$center, 1L )
matsimp = getsimplified( x$matroid )
matgen = getmatrix(matsimp)
m = nrow(p)
inside = rep( NA, m ) # logical
linknum = rep( NA_integer_, m )
distance = rep( NA_real_, m )
timecalc = rep( NA_real_, m )
for( k in 1:m )
{
time_start = gettime()
distance[k] = .Call( C_dist2surface, matgen, pgramdf$idxpair, pgramdf$center, pgramdf$cross, point_centered[k, ] )
if( distance[k] != 0 )
linknum[k] = linkingnumber( x, pgramdf, point_centered[k, ] )
# else if distance[k]==0 we just leave linknum[k] as it is, which is NA_integer_
inside[k] = (linknum[k] != 0L)
timecalc[k] = gettime() - time_start
}
rnames = rownames(p)
if( is.null(rnames) || anyDuplicated(rnames) ) rnames = 1:m
out = data.frame( row.names=rnames )
out$p = p
out$distance = distance
out$linkingnumber = linknum
out$inside = inside
out$timecalc = timecalc
return( out )
}
# x a zonohedron object
# type 'e' for edges, 'f' for facets, 'p' for points (centers of facets)
# ecol edge color
# econc if TRUE then concave edges overdrawn thick and red
# fcol color used for the coincident facets
# falpha opacity used for the coincident facets
# normals if TRUE then facet normals are drawn
# both if TRUE draw both halves
# bgcol background color
# add if TRUE then add to an existing plot
plot2trans <- function( x, type='ef', ecol='black', econc=FALSE,
fcol='yellow', falpha=0.5, level=NULL,
normals=FALSE, both=TRUE, bgcol="gray40", add=FALSE, ... )
{
if( ! inherits(x,"zonohedron") )
{
log_level( ERROR, "Argument x is invalid. It's not a zonohedron." )
return(NULL)
}
if( ! requireNamespace( 'rgl', quietly=TRUE ) )
{
log_level( ERROR, "Package 'rgl' cannot be loaded. It is required for plotting the zonohedron." )
return(FALSE)
}
matsimp = getsimplified( x$matroid )
matgen = getmatrix(matsimp)
numgen = ncol(matgen)
if( ! is.null(level) )
{
# check validity of level
ok = all( level %in% (0:(numgen-2L)) )
if( ! ok )
{
log_level( ERROR, "argument level is invalid. All values must be integers in [0,%d].", numgen-2 )
return(FALSE)
}
}
if( add )
{
if( rgl::cur3d() == 0 )
{
log_level( ERROR, "Cannot add surface to plot, because there is no rgl window open." )
return(FALSE)
}
}
else
{
# start 3D drawing
rgl::bg3d( color=bgcol )
white = 2 * x$center
#cube = rgl::scale3d( rgl::cube3d(col="white"), center[1], center[2], center[3] )
#cube = rgl::translate3d( cube, center[1], center[2], center[3] )
rgl::points3d( 0, 0, 0, col='black', size=10, point_antialias=TRUE )
rgl::points3d( white[1], white[2], white[3], col='white', size=10, point_antialias=TRUE )
rgl::points3d( x$center[1], x$center[2], x$center[3], col='gray50', size=10, point_antialias=TRUE )
# exact diagonal
rgl::lines3d( c(0,white[1]), c(0,white[2]), c(0,white[3]), col=c('black','white'), lwd=3, lit=FALSE )
}
gndgen = getground(matsimp)
pgramdf = allpgramcenters2trans( x )
#edgesok = TRUE # ! is.null(metrics$anglesDH)
#if( grepl( 'e', type ) && ! edgesok )
# log.string( WARN, "Cannot draw edges because the dihedral angles are not available." )
doedges = grepl( 'e', type )
if( doedges && econc ) #&& edgesok )
{
metrics = getmetrics2trans( x, tol=1.e-12 )
if( is.null(metrics) )
return(FALSE)
anglesDH = metrics$anglesDH
#colvec = ifelse( 0 <= anglesDH$angle, ecol, 'red' )
#lwdvec = ifelse( 0 <= anglesDH$angle, 1L, 3L )
pivotmat = t( matgen[ , anglesDH$pivot ] )
point0 = anglesDH$edgemid - 0.5*pivotmat
point1 = anglesDH$edgemid + 0.5*pivotmat
xyz = rbind( point0, point1 )
m = nrow(anglesDH)
perm = 1:(2*m)
dim(perm) = c(m,2L)
perm = t(perm)
dim(perm) = NULL
# print( perm )
xyz = xyz[ perm, ]
xyzdisp = .Call( C_sumMatVec, xyz, x$center, 1L )
#rgl::segments3d( xyzdisp, col=ecol, lwd=1 )
conmask = (anglesDH$angle < 0) #; print( conmask )
if( econc && any(conmask) )
{
mask2 = rep(conmask,2)
dim(mask2) = c(m,2L)
mask2 = t(mask2)
dim(mask2) = NULL
rgl::segments3d( xyzdisp[mask2, ], col='red', lwd=5 )
}
if( both )
{
xyzdisp = .Call( C_sumMatVec, -xyz, x$center, 1L )
#rgl::segments3d( xyzdisp, col=ecol, lwd=1 )
if( econc && any(conmask) )
rgl::segments3d( xyzdisp[mask2, ], col='red', lwd=5 )
}
}
if( grepl( 'f', type ) )
{
# draw filled quads
pgrams = nrow(pgramdf)
step = 4
quadmat = matrix( 0, nrow=step*pgrams, ncol=3 )
for( i in 1:pgrams )
{
center = pgramdf$center[i, ]
edge = matgen[ , pgramdf$idxpair[i, ] ] # 3x2 matrix
k = step*(i-1)
quadmat[k+1, ] = center - 0.5 * edge[ , 1] - 0.5*edge[ , 2]
quadmat[k+2, ] = center - 0.5 * edge[ , 1] + 0.5*edge[ , 2]
quadmat[k+3, ] = center + 0.5 * edge[ , 1] + 0.5*edge[ , 2]
quadmat[k+4, ] = center + 0.5 * edge[ , 1] - 0.5*edge[ , 2]
}
if( ! is.null(level) )
{
levelvec = pgramdf$idxpair[ ,2] - pgramdf$idxpair[ ,1] - 1L
# repeat each value step (4) times
levelvec = matrix( levelvec, nrow=step, ncol=length(levelvec), byrow=TRUE )
dim(levelvec) = NULL # back to vector
levelmask = levelvec %in% level
levelmaskanti = (numgen-2L - levelvec) %in% level
}
if( is.null(level) )
xyz = .Call( C_sumMatVec, quadmat, x$center, 1L )
else
xyz = .Call( C_sumMatVec, quadmat[levelmask, ], x$center, 1L )
rgl::quads3d( xyz, col=fcol, alpha=falpha, lit=TRUE ) # quad filled
if( doedges )
rgl::quads3d( xyz, col=ecol, lwd=1, front='lines', back='lines', lit=FALSE ) # quad edges
if( both )
{
if( is.null(level) )
xyz = .Call( C_sumMatVec, -quadmat, x$center, 1L )
else
xyz = .Call( C_sumMatVec, -quadmat[levelmaskanti, ], x$center, 1L )
rgl::quads3d( xyz, col=fcol, alpha=falpha, lit=TRUE ) # quad filled
if( doedges )
rgl::quads3d( xyz, col=ecol, lwd=1, front='lines', back='lines', lit=FALSE ) # quad edges
}
}
if( grepl( 'p', type ) )
{
# draw centers
xyz = .Call( C_sumMatVec, pgramdf$center, x$center, 1L )
rgl::points3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], col='black', size=3, point_antialias=TRUE )
if( both )
{
xyz = .Call( C_sumMatVec, -pgramdf$center, x$center, 1L )
rgl::points3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], col='black', size=3, point_antialias=TRUE )
}
}
if( normals )
{
xyz = .Call( C_sumMatVec, pgramdf$center, x$center, 1L )
for( i in 1:nrow(xyz) )
rgl::arrow3d( xyz[i, ], xyz[i, ] + pgramdf$cross[i, ], type="lines", col="black" )
}
return( invisible(TRUE) )
}
# arguments:
#
# N dimension of the cube, must be a positive integer
# crange range for the count of +1/2s for the edges, does not affect the vertex
# type 'both' means both Type1 (BP) and Type2 (BS)
# can also be 'BP'
#
# returns list with components:
# N the input N
# vertex (N*(N-1)+2)x2 integer matrix with code for the vertex
# the 1st int is the # of ones, and the 2nd is the starting position
# edge (2N*(N-2) + 2N) x 2 integer matrix with starting and ending index (in vertex) of the edges
# this number applies only when crange=c(0L,N)
#
trans2subcomplex <- function( N, crange=c(0L,N), type='both' )
{
ok = length(N)==1 && 0<N
if( ! ok )
{
log_level( ERROR, "N is invalid." )
return(NULL)
}
ok = is.numeric(crange) && length(crange)==2 && 0<=crange[1] && crange[1]<crange[2] && crange[2]<=N
if( ! ok )
{
log_level( ERROR, "crange is invalid." )
return(NULL)
}
vertex = .Call( C_trans2vertex, N )
colnames(vertex) = c( "count", "start" )
out = list()
out$N = N
out$vertex = vertex
out$edge = .Call( C_trans2edge, N, crange )
if( type != 'both' )
{
rsums = rowSums(vertex)
if( toupper(type) == 'BP' )
vvalid = rsums <= N+1
else if( toupper(type) == 'BS' )
vvalid = N+1 <= rsums | vertex[ ,2]==1 # add the vertices whose 1s start at position 1
else
{
log_level( ERROR, "type=%s in invalid.", type )
return(NULL)
}
vvalid = vvalid | is.na(rsums) # is.na() is for the poles
evalid = vvalid[ out$edge[ ,1] ] & vvalid[ out$edge[ ,2] ]
out$edge = out$edge[ evalid, ]
}
# print( out$edge )
return(out)
}
# parameters:
# N the dimension of the cube, must be a positive integer
# count integer M-vector with the number of 1s in the vertex
# start integer M-vector with the starting index of the run of 1s, 1-based
#
# returns an MxN matrix where the i'th row is the desired vertex of the N-cube, [-1/2,1/2]^N
vertexfromcode <- function( N, count, start )
{
M = length(count)
if( length(start) != M ) return(NULL)
out = .Call( C_vertexfromcode, N, count, start )
return(out)
}
# zono a zonohedron, whose simplified matroid is generated by an 3 x N matrix of N generators,
# defining a 2-transition surface in R^3
#
# returns a data frame with N*(N-1)/2 rows and these columns
# idxpair integer matrix with 2 columns i and j with 1 <= i < j <= n. 1-based
# gndpair idxpair converted to the ground set, another integer matrix with 2 columns
# center real matrix with 3 columns containing the corresponding pgram center,
# within the centered zonohedron. These are computed efficiently using a cumulative matrix sum technique.
# cross a unit normal for the pgram, equal to the normalized cross-product of the 2 generators,
# and directly copied from the crossprods member of the matroid
# beta constant of the plane equation of the pgram. These can be + or - or 0.
#
# the row order is the same as the column order returned by allcrossproducts()
allpgramcenters2trans <- function( zono ) #, centered=TRUE )
{
matsimple = getsimplified(zono$matroid)
matgen = getmatrix(matsimple)
#ok = is.numeric(matgen) && is.matrix(matgen)
#if( ! ok ) return(NULL)
n = ncol(matgen)
matcum = .Call( C_cumsumMatrix, matgen, 2L )
idxpair = .Call( C_allpairs, n )
colnames(idxpair) = c('i','j')
# pgrams = nrow(idxpair) # N(N-1)/2
# center is loaded with the *uncentered* coords
center = .Call( C_allpgramcenters2trans, matgen, matcum )
if( TRUE )
{
# translate from original to the *centered* zonohedron
centerzono = matcum[ ,n] / 2
#center = .Call( C_sumMatVec, center, -centerzono, 1L )
.Call( C_plusEqual, center, -centerzono, 1L ) # change center[,] in place. Might be a tiny bit faster
# print( str(center) )
}
out = data.frame( row.names=1:nrow(idxpair) )
out$idxpair = idxpair
ground = getground( matsimple )
gndpair = ground[idxpair]
dim(gndpair) = dim(idxpair)
out$gndpair = gndpair
out$center = center
# in the next line, matsimple$crossprods is 3 x N(N-1)/2
out$cross = t(matsimple$crossprods)
out$beta = .rowSums( center * out$cross, nrow(center), ncol(center) )
return( out )
}
# zono the zonohedron
# pgramdf pgram data frame, as returned from allpgramcenters2trans(zono)
# point point from which to take the linking number, in centered zono coordinates
# the default is the center of symmetry
#
# returns the integer linking number.
# if point[] is a vertex of the surface, it returns NA_integer_
linkingnumber <- function( zono, pgramdf=NULL, point=c(0,0,0) )
{
matsimp = getsimplified( zono$matroid )
matgen = getmatrix(matsimp)
if( is.null(pgramdf) )
{
pgramdf = allpgramcenters2trans( zono )
if( is.null(pgramdf) ) return(NULL)
}
out = .Call( C_linkingnumber, matgen, pgramdf$idxpair, pgramdf$center, point )
return( out )
}
linkingnumber2 <- function( zono, point=c(0,0,0) )
{
matsimp = getsimplified( zono$matroid )
matgen = getmatrix(matsimp)
matcum = cbind( 0, .Call( C_cumsumMatrix, matgen, 2L ) )
out = .Call( C_linkingnumber2, matcum, point )
return( out )
}
# k1 and k2 distinct integers in 1:n, not in any specific order
# crossprods 3 x N*(N-1)/2 matrix of precomputed normalized cross products
crossprodlookup <- function( k1, k2, n, crossprods )
{
s = sign( k2 - k1 )
if( s < 0 )
{
# swap so that k1 < k2
temp=k1 ; k1=k2 ; k2=temp
}
cp = s * crossprods[ , (k1-1)*n - ((k1)*(k1+1))/2 + k2 ] #; cat( "cp=", cp )
return( cp )
}
# k0 index of the pivot edge
# k1, sign1 index and sign of wing #1
# k2, sign2 index and sign of wing #2
# matgen 3 x N matrix of edge generators
# crossprods 3 x N*(N-1)/2 matrix of precomputed normalized cross products
hingeangle <- function( k0, k1, sign1, k2, sign2, matgen, crossprods )
{
n = ncol(matgen)
signwings = sign1 * sign2
cp1 = signwings * crossprodlookup( k1, k0, n, crossprods ) # s * crossprods[ , PAIRINDEX( k1, k0, n ) ]
cp2 = crossprodlookup( k0, k2, n, crossprods ) #s * crossprods[ , PAIRINDEX( k0, k2, n ) ]
theta = angleBetween( cp1, cp2, unitized=TRUE ) #; cat( "theta=", theta, '\n' )
cp = signwings * crossprodlookup( k1, k2, n, crossprods ) # s * crossprods[ , PAIRINDEX( k1, k2, n ) ] ; cat( "cp=", cp )
s = sign( sum( matgen[ ,k0] * cp ) ) # ; cat( " gen=", matgen[ ,k], " s=", s, '\n' )
return( s * theta )
}
# zono a zonohedron
# signsurf if k1<k2 then the crossprod lookup is outward when signsurf=1 and inward when signsurf=-1
# returns data.frame with N*(N-1) rows and these columns
# pivot integer index of the generator where dihedral angle pivots, the pivot of the "hinge"
# wing1 index of the generator forming wing 1 of the "hinge"
# wing2 index of the generator forming wing 2 of the "hinge"
# level the # of 1s in the level where the edge ends
# angle the external dihedral angle, where positive is convex and negative is concave
# edgemid midpoint of the edge, in the *centered* polyhedron
edgeangles2trans <- function( zono, signsurf )
{
matsimp = getsimplified( zono$matroid )
matgen = getmatrix( matsimp )
crossprods = matsimp$crossprods
ok = is.numeric(matgen) && is.matrix(matgen) && nrow(matgen)==3
if( ! ok ) return(NULL)
gensum = .Call( C_cumsumMatrix, matgen, 2L )
n = ncol(matgen)
knext = c( 2L:n, 1L )
kprev = c( n, 1L:(n-1L) )
hinges = n*(n-1L) # later n
pivot = rep( NA_integer_, hinges )
wing1 = matrix( NA_integer_, hinges, 2 ) ; colnames(wing1) = c( "index", "sign" )
wing2 = matrix( NA_integer_, hinges, 2 ) ; colnames(wing2) = colnames(wing1)
level = rep( NA_integer_, hinges )
angle = rep( NA_real_, hinges )
edgemid = matrix( NA_real_, hinges, 3 )
# group # is for the bottom level 0, for black and white points
for( k in 1:n )
{
pivot[k] = k
k1 = kprev[k]
k2 = knext[k]
wing1[k, ] = c(k1,+1L)
wing2[k, ] = c(k2,+1L)
level[k] = 0L
angle[k] = hingeangle( k, k1, +1, k2, +1, matgen, crossprods )
edgemid[k, ] = 0.5 * matgen[ ,k]
}
# group #2 is for the higher levels, away from the black point
kwrap = rep( 1:n, 2 )
white = gensum[ ,n] #; cat( "white=", white, '\n' )
count = n
for( shift in 1:(n-2) )
{
for( k in 1:n )
{
k1 = kwrap[ k+shift ]
k2 = kwrap[ k1+1 ]
count = count+1L
pivot[count] = k
wing1[count, ] = c(k1,-1L)
wing2[count, ] = c(k2,+1L)
#cat( "k=", k, " k2=", k2, '\n' )
mid = 0.5 * matgen[ ,k]
level[count] = shift
if( k+shift <= n )
{
mid = mid + (gensum[ ,k1] - gensum[ ,k])
}
else
{
# wrapped around
mid = mid + (gensum[ ,k-1L] - gensum[ ,k1])
mid = white - mid
}
angle[count] = hingeangle( k, k1, -1, k2, +1, matgen, crossprods )
#cat( "mid=", mid, '\n' )
edgemid[count, ] = mid
}
}
out = data.frame( row.names=1:hinges )
out$pivot = pivot
out$wing1 = wing1
out$wing2 = wing2
out$level = level
out$angle = signsurf * angle
out$edgemid = .Call( C_sumMatVec, edgemid, -white/2, 1L ) # translate to the *centered* polyhedron
return( out )
}
# x a zonohedron
# hpidx index of a non-trivial hyperplane in x
#
# returns TRUE or FALSE
is_2transfacetNT <- function( x, hpidx )
{
matsimple = getsimplified(x$matroid)
numgen = length( matsimple$hyperplane[[hpidx]] )
if( numgen <= 2 )
{
log_level( WARN, "internal error. hpidx=%d is a trivial %d-point hyperplane.", hpidx, numgen )
return( FALSE )
}
zgon = x$zonogon[[hpidx]]
if( ! is_salient(zgon) ) return(FALSE)
# check that the generators are monotone ordered by angle
mat = getmatrix( getsimplified(zgon$matroid) )
theta = atan2( mat[2, ], mat[1, ] )
# rotate so facet0 is at theta=0
theta = theta - theta[ zgon$facet0[1] ] #; print( theta )
# wrap to range[-pi,pi]
theta = ((theta+pi) %% (2*pi)) - pi #; print( theta )
perm = order( theta )
n = length(perm)
monotone = all(perm==1:n) || all(perm==n:1)
if( ! monotone ) return(FALSE)
# check that the generators of the zonogon are contiguous in those of the zonohedron, with wrap-around (cyclic)
ground = getground(matsimple) # strictly increasing
subground = getground( getsimplified(zgon$matroid) )
if( ! is_contiguous( subground, ground ) ) return( FALSE )
return( TRUE )
}
is_contiguous <- function( subground, ground, cyclic=TRUE )
{
idx = match( subground, ground ) #; print(idx)
if( any( is.na(idx) ) )
{
log_level( WARN, "internal error. ground set of non-trivial facet is not a subset of the zonohedron ground set." )
return( FALSE )
}
diffidx = diff( idx )
count1 = sum( diffidx == 1 )
# m = length(ground)
n = length(subground)
if( cyclic )
out = (count1 == n-1) || ( (count1 == n-2) && (sum(diffidx==(1-length(ground))) == 1) )
else
out = (count1 == n-1)
return( out )
}
# idxpair pair of distinct integer indexes, between 1 and n. NOT verified
# alpha pair of points in [0,1]. NOT verified
# n integer n >= 3. NOT verified
#
# returns 2-transition point in n-cube with the given data
pcubefromdata <- function( idxpair, alpha, n )
{
out = numeric( n )
out[idxpair] = alpha
if( idxpair[1] < idxpair[2] )
{
# Type I
if( idxpair[1]+1 <= idxpair[2]-1 ) out[ (idxpair[1]+1):(idxpair[2]-1) ] = 1
}
else
{
# Type II
if( idxpair[1]+1 <= n ) out[ (idxpair[1]+1):n ] = 1
if( 1 <= idxpair[2]-1 ) out[ 1:(idxpair[2]-1) ] = 1
}
return( out )
}
# x a zonohedron object
# base basepoint of all the rays, a 3-vector
# direction M x 3 matrix with non-zero directions in the rows
# invert return 2-transition point in the cube
# plot add 3D plot
# tol tolerance for being strictly starshaped, and for intersection with a pole
#
# value a dataframe with columns
# base given basepoint of all the rays (all the same)
# direction given directions of the rays
# idxpair the 2 indexes of the generators of the pgram that the ray intersects
# alpha the 2 coordinates of the intersection point within the pgram
# tmax ray parameter of intersection with pgram
# point point of intersection of the ray and the pgram
# iters the number of pgrams searched, until the desired one was found
# timetrace time to do the raytrace, in seconds
raytrace2trans <- function( x, base, direction, invert=FALSE, plot=FALSE, tol=1.e-12, ... )
{
if( ! inherits(x,"zonohedron") )
{
log_level( ERROR, "Argument x is invalid. It's not a zonohedron." )
return(NULL)
}
ok = is.numeric(base) && length(base)==3 && all( is.finite(base) )
if( ! ok )
{
log_level( ERROR, "base is invalid. It must be a numeric vector of length 3, and all entries finite." )
return(NULL)
}
center = getcenter(x)
base_centered = base - center
base_at_pole = all(base_centered == -center) || all(base_centered == center)
if( base_at_pole )
{
log_level( ERROR, "The base=(%g,%g,%g) of the rays is at a pole, which cannot be processed at this time.",
base[1], base[2], base[3] )
return(NULL)
}
direction = prepareNxM( direction, 3 )
if( is.null(direction) ) return(NULL)
if( any( x$matroid$multiplesupp$mixed ) )
{
log_level( ERROR, "In the zonohedron generators, one of the multiple groups has mixed directions. The 2-transition surface cannot be processed in this version." )
return(NULL)
}
symmetric = all( base_centered == 0 )
pgramdf = allpgramcenters2trans(x)
if( is.null(pgramdf) ) return(NULL)
linknum = linkingnumber( x, pgramdf, base_centered )
if( is.na(linknum) )
{
log_level( ERROR, "The linking number is undefined for point=(%g,%g,%g).",
base[1], base[2], base[3] )
return(NULL)
}
if( abs(linknum) != 1 )
{
log_level( ERROR, "The linking number at basepoint=(%g,%g,%g) is %d, which is not +1 or -1. The ray intersection is never unique.",
base[1], base[2], base[3], linknum )
return(NULL)
}
matsimple = getsimplified( x$matroid )
if( ! all( x$matroid$multiplesupp$contiguous ) )
{
log_level( WARN, "The 2-transition surface is not starshaped at any point, because one of the multiple groups is not contiguous. The ray intersection may not be unique." )
}
else
{
if( linknum == -1 )
{
# use antipodal facets instead
# so we can compare beta with the corresponding betamax from the zonohedron,
# we only do this when starshaped is TRUE, see below
#cat( "linknum =", linknum, " so reversing beta.\n" )
# pgramdf$center = -(pgramdf$center)
pgramdf$beta = -(pgramdf$beta)
}
betamin = min( pgramdf$beta )
if( betamin < tol )
{
log_level( WARN, "The 2-transition surface is not strictly starshaped at any point, because min(beta) = %g < %g. The ray intersection may not be unique.",
betamin, tol )
# return(NULL)
}
else
{
basedotnormal = as.double( base_centered %*% matsimple$crossprods ) #; print( str(basedotnormal) )
ok = all( abs(basedotnormal) < pgramdf$beta - tol )
if( ! ok )
{
log_level( WARN, "The 2-transition surface is not strictly starshaped at point=(%g,%g,%g). The ray intersection may not be unique.",
base[1], base[2], base[3] )
#return(NULL)
}
}
}
matgen = getmatrix( matsimple )
m = ncol(matgen)
# extend pgramdf with antipodal data
#pgramdf = pgramdf_plus( pgramdf, base_centered )
#print( str(pgramdf) )
# extend pairs in usual i<j order, with new pairs where i>j
# the number of rows is m*(m-1)
idxpair_plus = rbind( pgramdf$idxpair, pgramdf$idxpair[ ,2:1] )
rays = nrow(direction)
matcum = cbind( 0, .Call( C_cumsumMatrix, matgen, 2L ) ) # this has m+1 columns
# print( gensum )
tmax = rep(NA_real_,rays)
idxpair = matrix(NA_integer_,rays,2)
alpha = matrix(NA_real_,rays,2)
point = matrix(NA_real_,rays,3)
iters = rep( NA_integer_, rays )
transitions = rep( NA_integer_, rays )
timetrace = rep(NA_real_,rays)
if( invert ) pcube = matrix( NA_real_, rays, m )
for( k in 1:rays )
{
# cat( "k =", k, '\n' ) ; flush.console()
time_start = gettime()
#print( matgen[ ,cw_init[1], drop=F ] )
# get current direction
dir = direction[k, ]
if( all(dir==0) ) next # ray is undefined
dat = raytrace2trans_single( base, dir, center, matgen, matcum, pgramdf, idxpair_plus, tol=tol )
timetrace[k] = gettime() - time_start
# print( str(dat) )
if( ! is.null(dat) )
{
idxpair[k, ] = dat$idxpair
alpha[k, ] = dat$alpha
tmax[k] = dat$tmax
point[k, ] = dat$point
iters[k] = dat$iters
transitions[k] = 2L
if( invert ) pcube[k, ] = pcubefromdata( dat$idxpair, dat$alpha, m )
}
#print( (dat$XYZ - base) / rtdata$direction[k, ] )
}
rnames = rownames(direction)
if( is.null(rnames) || anyDuplicated(rnames) ) rnames = 1:rays
# convert idxpair to gndpair, for output
gndpair = getground( matsimple )[ idxpair ]
dim(gndpair) = dim(idxpair)
out = data.frame( row.names=rnames )
out$base = matrix( base, rays, 3, byrow=TRUE ) # replicate base to all rows
out$direction = direction
out$gndpair = gndpair
#out$idxpair = idxpair
out$alpha = alpha
out$tmax = tmax
out$point = point
#out$transitions = transitions
out$iters = iters
out$timetrace = timetrace
if( invert ) out$pcube = invertcubepoints( x, pcube )
if( plot )
{
if( ! requireNamespace( 'rgl', quietly=TRUE ) )
log_level( WARN, "Package 'rgl' is required for plotting. Please install it." )
else if( rgl::cur3d() == 0 )
log_level( WARN, "Cannot add raytrace to plot, because there is no rgl window open." )
else
{
xyz = matrix( base, nrow=rays, ncol=3, byrow=TRUE )
xyz = rbind( xyz, point )
perm = 1:(2*rays)
dim(perm) = c(rays,2L)
perm = t(perm)
dim(perm) = NULL
# print( perm )
xyz = xyz[ perm, ]
col = 'red'
rgl::segments3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], col=col )
rgl::points3d( base[1], base[2], base[3], col=col, size=5, point_antialias=TRUE )
rgl::points3d( point[ ,1], point[ ,2], point[ ,3], col=col, size=5, point_antialias=TRUE )
out = invisible(out)
}
}
return(out)
}
# base base of ray, uncentered
# dir direction of ray
# center center of of symmetry
# matgen 3 x M matrix of generators
# matcum 3 x M+1 matrix = cumsum of matgen
# pgramdf data frame with idxpair, center, and other variables. not extended with antipodal data
# idxpair_plus integer matrix with 2 columns, extended with antipodal data
# tol tolerance for poletest()
# returns a list with these items:
# idxpair indexes of the generators of the pgram, or both NA if the ray intersects a pole
# alpha 2 coords in [0,1] for point inside the pgram
# iters # of iterations, or 0 when the ray intersect a pgram that intersects a pole
# tmax parameter of ray when it intersects the surface, or NA in case of failure
# point intersection of ray with the surface, or NA in case of failure
raytrace2trans_single <- function( base, dir, center, matgen, matcum, pgramdf, idxpair_plus, tol=1.e-12 )
{
m = ncol(matgen)
base_centered = base - center
# from current dir, compute a 2x3 projection matrix
frame3x3 = goodframe3x3( dir )
matproj = t( frame3x3[ , 1:2 ] )
success = FALSE
iters = 0L
#cat( "------------ dir =", dir, " -------------", '\n' )
######## Phase 1 - test for intersection at or near a pole
# dat = poletest( base_centered, dir, center, matgen, matproj, tol=tol )
dat = poletest_v2( base_centered, dir, center, matproj, tol=tol )
if( ! is.null(dat) )
{
# test for a good intersection at or near a pole
alpha = dat$alpha
if( is.na( dat$idxpair[1] ) )
{
# intersection at a pole
dat$XYZ = (1 + dat$signpole) * center # so either 0 or 2*center
success = TRUE
}
else
{
# test for intersection with a pgram that intersects a pole
ok = all( 0 <= alpha ) && all( alpha <= 1 )
if( ok )
{
# compute XYZ taking advantage of all the 0s
mat3x2 = matgen[ , dat$idxpair ]
if( dat$signpole == -1 )
# near 0
dat$XYZ = as.double( mat3x2 %*% alpha )
else
# near white
dat$XYZ = 2*center - as.double( mat3x2 %*% (1-alpha) ) # complement
# final check on orientation
# (dat$XYZ - base) must be parallel to dir
if( 0 < sum( (dat$XYZ - base)*dir ) )
success = TRUE
}
}
if( success )
{
out = list()
out$idxpair = dat$idxpair
out$alpha = alpha
out$iters = 0L
# redo to match documentation
out$tmax = sum( (dat$XYZ - base)*dir ) / sum( dir^2 )
out$point = base + out$tmax * dir
return( out )
}
}
if( FALSE )
{
# use BRUTE FORCE search in 3D
# only useful for timing comparison
res = findpgram3D( base, dir, center, matgen, pgramdf )
if( ! is.null(res) )
{
# print( str(res) ) ; flush.console()
out = list()
out$idxpair = res$idxpair
out$alpha = res$alpha
out$iters = iters + res$idx
# an alternate way to get XYZ
# no antipodal data, so res$idx <= nrow(pgramdf)
centerpg = pgramdf$center[ res$idx, ]
XYZ = center + centerpg + as.double( matgen[ , res$idxpair ] %*% (res$alpha - 1/2) )
# typical way to get XYZ
# XYZ = XYZfrom2trans( res$idxpair, res$alpha, matgen, matcum ) # ; print( XYZ )
# redo to match documentation
out$tmax = sum( (XYZ - base)*dir ) / sum( dir^2 ) # sqrt( sum( (XYZ - base)^2 ) / sum( dir^2 ) )
out$point = base + out$tmax * dir
return( out )
}
}
# a near-pole intersection did not work
# rotate face centers to align direction with z-axis
temp = pgramdf$center %*% frame3x3
# extend with antipodal centers, the number of rows is now m*(m-1)
#time_bind = gettime()
#centerrot = rbind( temp, -temp )
centerrot = .Call( C_extend_antipodal, temp )
#cat( "bind time =", gettime() - time_bind, '\n' )
baserot = as.double( base_centered %*% frame3x3 )
# cat( "baserot = ", baserot, '\n' )
if( FALSE )
{
# find a good initial pgram2 for iteration
time_initial = gettime()
if( FALSE )
{
qpoint = baserot[1:2]
matdiff = pgram2df$center - matrix( qpoint, nrow(pgram2df), 2, byrow=TRUE )
test = .rowSums( matdiff^2, nrow(matdiff), 2L )
# but force skipping of unit vectors in the wrong halfspace
test[ pgram2df$z < 0 ] = Inf
imin = which.min(test)
#cat( "1st imin =", imin, '\n' )
}
# ignore facets whose centers have negative z coords
baserot[3] = max( baserot[3], 0 )
# call optimized C function to get the closest center to qpoint
imin = .Call( C_optimalcenter, centerrot, baserot ) + 1L # 0-based to 1-based
if( length(imin)==0 )
{
# C_optimalcenter() failed !
# all the centers are below baserot[3], which is unusual
# there must be a few large faces, e.g. a cube
# choose the center with largest z
imin = which.max( centerrot[ ,3] )
#cat( "center with largest z: imin =", imin, " initial idxpair0 =", idxpair_plus[imin, ], '\n' )
}
#cat( "imin =", imin, '\n' )
idxpair0 = idxpair_plus[imin, ]
#cat( "idxpair0 =", idxpair0, " centermin =", centerrot[imin, ], '\n' )
#cat( "Initial time =", gettime() - time_initial, '\n' )
##### use custom iteration on 2D pgrams, to find the one that contains the query point
matgen2 = matproj %*% matgen
res = findpgram2D( centerrot, baserot, matgen2, idxpair0 ) # idxpair0
if( ! is.null(res) )
{
out = list()
out$idxpair = res$idxpair
out$alpha = res$alpha
out$iters = iters + res$iters
XYZ = XYZfrom2trans( res$idxpair, res$alpha, matgen, matcum ) # ; print( XYZ )
#XYZ = XYZfrom2trans_slow( res$idxpair, res$alpha, matgen ) ; print( XYZ )
# redo to match documentation
out$tmax = sum( (XYZ - base)*dir ) / sum( dir^2 ) # sqrt( sum( (XYZ - base)^2 ) / sum( dir^2 ) )
out$point = base + out$tmax * dir
return( out )
}
}
# res = findpgram3D( base, dir, center, matgen, pgramdf )
if( TRUE )
{
##### use brute force search over all suitable pgrams,
# but using C function for great speed
# ignore facets whose centers have negative z coords
# baserot[3] = max( baserot[3], 0 )
genrot = crossprod( frame3x3, matgen ) # same as t(frame3x3) %*% matgen
res = findpgram2D_v3( centerrot, baserot, idxpair_plus, genrot )
if( ! is.null(res) )
{
out = list()
out$idxpair = res$idxpair
out$alpha = res$alpha
out$iters = iters + res$idx
XYZ = XYZfrom2trans( res$idxpair, res$alpha, matgen, matcum ) # ; print( XYZ )
#XYZ = XYZfrom2trans_slow( res$idxpair, res$alpha, matgen ) ; print( XYZ )
# redo to match documentation
out$tmax = sum( (XYZ - base)*dir ) / sum( dir^2 ) # sqrt( sum( (XYZ - base)^2 ) / sum( dir^2 ) )
out$point = base + out$tmax * dir
return( out )
}
}
# res = findpgram3D( base, dir, center, matgen, pgramdf )
return( NULL )
}
XYZfrom2trans <- function( idxpair, alpha, matgen, matcum )
{
m = ncol(matgen)
slo = (idxpair[1] - alpha[1]) %% m
shi = (idxpair[2]-1 + alpha[2]) %% m
ilo = as.integer( slo + 1 )
ihi = as.integer( shi + 1 )
XYZlo = matcum[ , ilo ] + (slo-floor(slo))*matgen[ , ilo ]
XYZhi = matcum[ , ihi ] + (shi-floor(shi))*matgen[ , ihi ]
XYZ = as.double( XYZhi - XYZlo )
if( ihi < ilo )
# this is a bandstop, Type II
XYZ = XYZ + matcum[ ,m+1]
return( XYZ )
}
XYZfrom2trans_slow <- function( idxpair, alpha, matgen )
{
pcube = pcubefromdata( idxpair, alpha, ncol(matgen) )
out = as.double( matgen %*% pcube )
return( out )
}
# base basepoint of the ray, centered
# dir direction of the ray
# pole the "positive" pole, with all coeffs +1/2, the "negative" pole is -pole, centered
# matgen 3 x M matrix of generators
# matproj 2 x 3 projection matrix to plane normal to dir
# tol tolerance for the ray going through a pole
#
# first tests for ray passing within tol of a pole
# next finds which one of the M sectors at that pole that the ray intersects
# the 2 alpha coefficients of that intersection point are non-negative
poletest <- function( base, dir, pole, matgen, matproj, tol=0 )
{
time_start = gettime()
#cat( "base =", base, " dir =", dir, " +pole =", pole, '\n' )
pd = sum( pole*dir )
bd = sum( base*dir )
if( abs(pd) <= bd )
{
# both pole and -pole are on the negative side of the projection plane
#cat( "both poles on - side of plane.", '\n' )
return(NULL)
}
# determine signpole -- select either +pole or -pole
if( pd <= bd )
# pole is on the negative side, so -pole is on the positive side
signpole = -1L
else if( -pd <= bd )
# -pole is on the negative side, so +pole is on the positive side
signpole = +1L
else
{
# both -pole and +pole are on the positive side
# choose the closest pole to base, after projection
poleproj = as.double( matproj %*% pole )
baseproj = as.double( matproj %*% base )
signpole = sign( sum(poleproj*baseproj) )
if( signpole == 0 ) signpole = -1
#cat( "both poles on + side of plane. signpos =", signpole, '\n' )
}
#dirpole = sum( dir * pole )
#dirbase = sum( dir * base )
#if( dirpole < dirbase ) return(NULL) # pole is not on + side of the hyperplane
# get the query point
# in this case we choose a translation so the pole is at 0
q = as.double( matproj %*% (base - signpole*pole) )
normq = sum( abs(q) ) # ;
#cat( "signpole =", signpole, " q =", q, "normq =", normq, " tol =", tol, '\n' )
if( normq <= tol )
{
# ray passes right through one of the poles
out = list()
out$signpole = signpole
out$idxpair = c(NA_integer_,NA_integer_)
out$alpha = rep( (signpole+1)/2, 2 ) # -1 -> 0 and +1 -> 1
return( out )
}
# project generators onto plane, and translate
# at the positive pole (signpole==1) we want to *reverse* direction
gen2 = (-signpole * matproj) %*% matgen # gen2 is 2 x M
# find the norm of all these 2D generators
normgen = .colSums( abs(gen2), nrow(gen2), ncol(gen2) )
if( any( normgen == 0 ) )
{
log_level( FATAL, "Internal error. %d of the %d generators project to 0.",
sum(normgen==0), length(normgen) )
return( NULL )
}
#cat( "normq = ", normq, " max(normgen) =", max(normgen), '\n' )
if( 2 * max(normgen) < normq )
# q is too large
return(NULL)
thetavec = atan2( gen2[2, ], gen2[1, ] )
#cat( "thetavec =", thetavec, '\n' )
thetadiff = diff(thetavec)
#cat( "thetadiff. neg:", sum(thetadiff<0), " pos:", sum(0<thetadiff), " zero:", sum(thetadiff==0), '\n' )
theta = atan2( q[2], q[1] )
# find the generator closest to q in angle
imin = which.min( abs(theta - thetavec) )
#cat( "theta = ", theta, " imin =", imin, " thetavec[imin] =", thetavec[imin], '\n' )
m = length(thetavec)
iprev = ((imin-2L) %% m ) + 1L
inext = ( imin %% m ) + 1L
if( on_arc( theta, c(thetavec[iprev],thetavec[imin]) ) )
iother = iprev
else if( on_arc( theta, c(thetavec[inext],thetavec[imin]) ) )
iother = inext
else
{
log_level( FATAL, "Internal error. Cannot find angular interval containing theta=%g.", theta )
return(NULL)
}
idxpair = c(imin,iother)
# get the order right
if( 0 < signpole )
{
# ray nearly intersects positive pole, so cube point is mostly 1s
# idxpair should be decreasing, except when 1,m
ok = idxpair[2] < idxpair[1]
if( all( idxpair %in% c(1,m) ) ) ok = ! ok
if( ! ok ) idxpair = idxpair[ 2:1 ] # swap
}
else
{
# ray nearly intersects negative pole, so cube point is mostly 0s
# idxpair should be increasing, except when m,1
ok = idxpair[1] < idxpair[2]
if( all( idxpair %in% c(m,1) ) ) ok = ! ok
if( ! ok ) idxpair = idxpair[ 2:1 ] # swap
}
log_level( TRACE, "Found interval idxpair=%d,%d. signpole=%g\n", idxpair[1], idxpair[2], signpole )
mat2x2 = gen2[ , idxpair ] #; print( mat2x2 )
alpha = as.double( solve( mat2x2, q ) )
ok = all( 0 <= alpha )
#cat( "alpha =", alpha, " ok =", ok, '\n' )
if( ! ok )
{
log_level( FATAL, "Internal error. alpha=%g,%g and one is negative.", alpha[1], alpha[2] )
return( NULL )
}
out = list()
out$signpole = signpole
out$idxpair = idxpair
out$alpha = alpha
if( signpole == 1 ) out$alpha = 1 - out$alpha
#cat( "poletest(). time =", gettime() - time_start, '\n' )
return( out )
}
# base basepoint of the ray, centered
# dir direction of the ray
# pole the "positive" pole, with all coeffs +1/2, the "negative" pole is -pole, centered
# matproj 2 x 3 projection matrix to plane normal to dir
# tol tolerance for the ray going through a pole
#
# This is v. 2. It only tests for ray passing within tol of a pole
poletest_v2 <- function( base, dir, pole, matproj, tol=0 )
{
time_start = gettime()
#cat( "base =", base, " dir =", dir, " +pole =", pole, '\n' )
pd = sum( pole*dir )
bd = sum( base*dir )
if( abs(pd) <= bd )
{
# both pole and -pole are on the negative side of the projection plane
#cat( "both poles on - side of plane.", '\n' )
return(NULL)
}
# determine signpole -- select either +pole or -pole
if( pd <= bd )
# pole is on the negative side, so -pole is on the positive side
signpole = -1L
else if( -pd <= bd )
# -pole is on the negative side, so +pole is on the positive side
signpole = +1L
else
{
# both -pole and +pole are on the positive side
# choose the closest pole to base, after projection
poleproj = as.double( matproj %*% pole )
baseproj = as.double( matproj %*% base )
signpole = sign( sum(poleproj*baseproj) )
if( signpole == 0 ) signpole = -1
#cat( "both poles on + side of plane. signpos =", signpole, '\n' )
}
# get the query point
# in this case we choose a translation so the pole is at 0
q = as.double( matproj %*% (base - signpole*pole) )
normq = sum( abs(q) ) # ;
#cat( "signpole =", signpole, " q =", q, "normq =", normq, " tol =", tol, '\n' )
if( normq <= tol )
{
# ray passes right through one of the poles
out = list()
out$signpole = signpole
out$idxpair = c(NA_integer_,NA_integer_)
out$alpha = rep( (signpole+1)/2, 2 ) # -1 -> 0 and +1 -> 1
return( out )
}
# failed to intersect the pole
return( NULL )
}
pgramdf_plus <- function( pgramdf, base_centered=c(0,0,0) )
{
# extend pgramdf with antipodal data
out = data.frame( row.names=1:nrow(pgramdf) )
out$idxpair = pgramdf$idxpair[ , 2:1] # swap
out$gndpair = pgramdf$gndpair[ , 2:1] # swap
out$center = -(pgramdf$center)
out$beta = -(pgramdf$beta)
out = rbind( pgramdf, out )
if( FALSE )
{
# subtract base_centered from all pgram centers
# these points on the unit sphere will be used later
#xyz = duplicate( out$center )
#res = .Call( C_plusEqual, xyz, -base_centered, 1L ) # changes xyz in place
#if( is.null(res) ) return(NULL)
xyz = .Call( C_sumMatVec, out$center, -base_centered, 1L )
# and then unitize
ok = .Call( C_normalizeMatrix, xyz, 1L ) # changes xyz in place
if( ! ok ) return(NULL)
# add xyz as a new column, to be used later
out$unit = xyz
}
return( out )
}
# given a tiling by pgrams of a part of the plane, and a query point
# find the pgram that contains the query point
#
# centerrot N*(N-1) x 3 matrix with pgram centers,
# only the first 2 columns are used, the 3rd z-coordinate might be used in future testing
# baserot 3-vector with query point, 3rd z-coord not used
# the goal is to find the pgram that contains this point
# matgen2 2 x N matrix of generators
# idxpair0 integer pair to start the search
findpgram2D <- function( centerrot, baserot, matgen2, idxpair0 )
{
time_start = gettime()
n = ncol(matgen2)
if( nrow(centerrot) != n*(n-1) )
{
log_level( ERROR, "nrow(centerrot) = %g != %g = n*(n-1). n=%g", nrow(centerrot), n*(n-1), n )
return(NULL)
}
qpoint = baserot[1:2] #; cat( "qpoint = ", qpoint, '\n' )
# use variable i and j to abbreviate 2 ints in idxpair0
i = idxpair0[1]
j = idxpair0[2]
maxiters = as.integer( max( 0.75*n, 50 ) )
success = FALSE
# make increment and decrement vectors
idx_inc = c(2:n,1)
idx_dec = c(n,1:(n-1))
for( iter in 1:maxiters )
{
mat2x2 = matgen2[ , c(i,j) ]
k = PAIRINDEX_plus( i, j, n )
b = qpoint - centerrot[k,1:2]
alpha = as.double( solve( mat2x2, b ) )
absalpha = abs(alpha)
#cat( "===============", " iter ", iter, " =================\n" )
#cat( "i =", i, " j =", j, " alpha =", alpha, " center=", centerrot[k,1:2], " d2 =", sum(b^2), " z_delta =", centerrot[k,3]-baserot[3], '\n' )
#cat( " det =", det2x2(mat2x2), '\n' )
if( all( absalpha <= 1/2 ) )
{
# qpoint is inside the pgram ! We got it !
success = TRUE
break
}
# move to the next pgram2
if( which.max(absalpha) == 1 )
{
# change i
if( 0 < alpha[1] )
{
i = idx_dec[i] # decrement i
if( i == j ) j = idx_dec[j] # decrement j too !
}
else
{
i = idx_inc[i] # increment i
if( i == j ) j = idx_inc[j] # increment j too !
}
}
else
{
# change j
if( 0 < alpha[2] )
{
j = idx_inc[j] # increment j
if( j == i ) i = idx_inc[i] # increment i too !
}
else
{
j = idx_dec[j] # decrement j
if( j == i ) i = idx_dec[i] # decrement i too !
}
}
}
if( ! success )
{
log_level( ERROR, "Reached maximum iterations: %d.", maxiters )
return(NULL)
}
out = list()
out$idxpair = c(i,j)
out$alpha = alpha + 1/2 # from centered to uncentered
out$iters = iter
#print( out )
#cat( "findpgram2D(). timesearch =", gettime() - time_start, '\n' )
return( out )
}
if( FALSE )
{
# given a tiling by pgrams of a part of the plane, and a query point
# find the pgram that contains the query point
#
# centerrot N*(N-1) x 3 matrix with pgram centers,
# only the first 2 columns are used, the 3rd z-coordinate might be used in future testing
# baserot 3-vector with query point, 3rd z-coord not used
# the goal is to find the pgram that contains this point
# matgen2 2 x N matrix of generators
# idxpair0 integer pair to start the search
findpgram2D_v2 <- function( centerrot, baserot, matgen2, idxpair0, tol=5.e-9 )
{
time_start = gettime()
n = ncol(matgen2)
if( nrow(centerrot) != n*(n-1) )
{
log_level( ERROR, "nrow(centerrot) = %g != %g = n*(n-1). n=%g", nrow(centerrot), n*(n-1), n )
return(NULL)
}
qpoint = baserot[1:2]
# at each point in the iteration, the state is determined by 2 things:
# the current pgram2, given by i and j
# ppoint, which is a point inside the current pgram, usually a boundary point but initially the center
# when a boundary point, it is shared with the previous pgram
# use variable i and j to abbreviate 2 ints in idxpair0
i = idxpair0[1]
j = idxpair0[2]
k = PAIRINDEX_plus( i, j, n )
ppoint = centerrot[k,1:2]
maxiters = as.integer( max( 0.5*n, 50 ) )
success = FALSE
# make increment and decrement vectors
idx_inc = c(2:n,1)
idx_dec = c(n,1:(n-1))
for( iter in 1:maxiters )
{
mat2x2 = matgen2[ , c(i,j) ]
mat2x2_inv = solve( mat2x2 )
k = PAIRINDEX_plus( i, j, n )
# test whether qpoint is inside pgram (i,j)
vec = qpoint - centerrot[k,1:2]
alpha = as.double( mat2x2_inv %*% vec ) # solve( mat2x2, vec )
cat( "===============", " iter ", iter, " =================\n" )
dist = sqrt( sum((qpoint - ppoint)^2) )
cat( "i =", i, " j =", j, " alpha =", alpha, " center=", centerrot[k,1:2], " dist =", dist, " unitize(qpoint-ppoint) =", (qpoint-ppoint)/dist, '\n' )
absalpha = abs(alpha)
if( all( absalpha <= 1/2 ) )
{
# qpoint is inside the pgram ! We got it !
success = TRUE
break
}
# move to the next pgram2
# transform point in pgram and direction (qpoint - ppoint) to the unit square
b = mat2x2_inv %*% (ppoint - centerrot[k,1:2]) # b is in square [-1/2,1/2]^2
v = mat2x2_inv %*% (qpoint - ppoint) # v points into the interior of the square
# snap to +-1/2 if within tol
snap = abs(abs(b) - 1/2) <= tol
b[snap] = round( b[snap] + 1/2 ) - 1/2
cat( "b before =", b, " v =", v, " ppoint =", ppoint, '\n' )
b = squareadvance( b, v ) # new b is on boundary of square
ppoint = as.double( mat2x2 %*% b ) + centerrot[k,1:2]
cat( "b after =", b, " ppoint =", ppoint, '\n' )
#return(NULL)
if( abs(b[1]) == 0.5 )
{
# left or right edge, change i
if( b[1] == 0.5 )
{
i = idx_dec[i] # decrement i
if( i == j ) j = idx_dec[j] # decrement j too !
}
else
{
i = idx_inc[i] # increment i
if( i == j ) j = idx_inc[j] # increment j too !
}
}
else
{
# bottom or top edge, change j
if( b[2] == 0.5 )
{
j = idx_inc[j] # increment j
if( j == i ) i = idx_inc[i] # increment i too !
}
else
{
j = idx_dec[j] # decrement j
if( j == i ) i = idx_dec[i] # decrement i too !
}
}
}
if( ! success )
{
log_level( ERROR, "Reached maximum iterations: %d.", maxiters )
return(NULL)
}
out = list()
out$idxpair = c(i,j)
out$alpha = alpha + 1/2 # from centered to uncentered
out$iters = iter
timesearch = gettime() - time_start
cat( "findpgram2D_v2(). timesearch =", timesearch, '\n' )
return( out )
}
}
# in this version, use brute-force search to find the containing pgram
#
# centerrot N*(N-1) x 3 matrix with pgram centers,
# the 3rd z-coordinate is used to skip pgrams that are too far "below" the basepoint
# baserot 3-vector with query point, 3rd z-coord is used too
# the goal is to find the pgram that contains this point
# idxpair_plus N*(N-1) x 2 integer matrix of 1-based generator indexes, including antipodal data
# genrot 3 x N matrix of rotated generators
findpgram2D_v3 <- function( centerrot, baserot, idxpair_plus, genrot )
{
#print( centerrot )
#cat( "baserot =", baserot, '\n' )
res = .Call( C_findpgram2D, centerrot, baserot, idxpair_plus, genrot )
if( res[[1]] < 0 ) return(NULL)
k = res[[1]] + 1L # 0-based to 1-based
alpha = res[[2]]
out = list()
out$idx = k
out$idxpair = idxpair_plus[k, ]
out$alpha = alpha + 1/2 # from centered to uncentered
# out$iters = k
return( out )
}
# base base of ray, uncentered
# dir direction of ray
# center center of of symmetry
# matgen 3 x M matrix of generators
# pgramdf data frame with idxpair, center, and other variables. not extended with antipodal data
# returns a list with these items:
# idxpair indexes of the generators of the pgram, or both NA if the ray intersects a pole
# alpha 2 coords in [0,1] for point inside the pgram
# iters # of iterations, or 0 when the ray intersect a pgram that intersects a pole
# tmax parameter of ray when it intersects the surface, or NA in case of failure
# point intersection of ray with the surface, or NA in case of failure
findpgram3D <- function( base, dir, center, matgen, pgramdf )
{
# extend with antipodal data
pgramdf = pgramdf_plus( pgramdf )
base_centered = base - center
success = FALSE
for( k in 1:nrow(pgramdf) )
{
idxpair = pgramdf$idxpair[k, ]
mat = cbind( matgen[ , idxpair ], -dir )
y = solve( mat, base_centered - pgramdf$center[k, ] ) #; print(y)
alpha = y[1:2]
tau = y[3]
if( all( abs(alpha) <= 0.5 ) && 0 < tau )
{
success = TRUE
break
}
}
if( ! success ) return(NULL)
out = list()
out$idx = k
out$idxpair = idxpair
out$alpha = alpha + 1/2 # centered to uncentered
# print( out )
return( out )
}
# this is for use with pgramdf_plus()
# this is only valid if 1 <= i , j <= n. But if i==j it returns NA_integer_
PAIRINDEX_plus <- function( i, j, n )
{
if( i < j )
out = (i-1L)*n - ((i)*(i+1L)) %/% 2L + j
else if( j < i )
out = (j-1L)*n - ((j)*(j+1L)) %/% 2L + i + (n*(n-1L)) %/% 2L
else
out = NA_integer_
return( out )
}
if( FALSE )
{
# b point in the square [-1/2,1/2]^2
# usually it is on the boundary, or the center c(0,0) - not verified
# v non-zero vector pointing into the interior of the square - not verified
# tol tolerance for snapping output to the boundary
#
# returns point where the ray b + t*v intersects the boundary
# returns NULL in case of problem
squareadvance <- function( b, v, tol=5.e-8 )
{
# check b
#absb = abs(b)
#ok = 1 <= sum( absb==1/2 ) && all( absb <= 1/2 )
#if( ! ok ) return(NULL)
tvec = c( (-0.5 - b)/v, (0.5 - b)/v )
# change any non-positive or NaN entries to Inf
tvec[ tvec <= 0 | is.nan(tvec) ] = Inf
# find the minimum entry
tmin = min( tvec )
if( ! is.finite(tmin) ) return(NULL)
out = b + tmin*v
# snap to +-1/2 if within tol
snap = abs(abs(out) - 1/2) <= tol
out[snap] = round( out[snap] + 1/2 ) - 1/2
# check output
#ok = all( abs(out) <= 1/2 )
#if( ! ok ) return(NULL)
cat( "squareadvance(). tmin =", tmin, " out =", out, '\n' )
return( out )
}
}
# theta query angle
# thetaend 2 angles forming the endpoints of an arc. The shorter arc is the one intended.
#
# returns TRUE if theta is on the arc
on_arc <- function( theta, thetaend )
{
# rotate both endpoints to put theta at 0,
# and both endpoints in [-pi,pi)
thetaend = ( (thetaend - theta + pi) %% (2*pi) ) - pi
# if( any(thetaend == 0) ) return(TRUE) # theta is an endpoint
if( pi <= abs(thetaend[2] - thetaend[1]) )
# 0 is in the wrong arc
return(FALSE)
# check whether the endpoints are on opposite sides of 0
return( prod(thetaend) <= 0 )
}
# mask a logical mask
#
# returns TRUE iff the set of TRUE values in mask[] is contiguous in a cyclic sense
is_contiguousmask <- function( mask )
{
subs = which( mask )
if( all( diff(subs) == 1L ) )
return(TRUE)
# try the complements
subs = which( ! mask )
return( all( diff(subs) == 1L ) )
}
det2x2 <- function( mat2x2 )
{
return( mat2x2[1,1]*mat2x2[2,2] - mat2x2[1,2]*mat2x2[2,1] )
}
if( FALSE )
{
# idxpair count x 2 integer matrix of pgrams to plot
plotpgrams2D <- function( x, base, direction, idxpair, winrad=c(0.001,0.001) )
{
center = getcenter(x)
base_centered = base - center
# from current direction, compute a 2x3 projection matrix
frame3x3 = goodframe3x3( direction )
ok = is.matrix(idxpair) && is.integer(idxpair) && ncol(idxpair)==2
pgramdf = allpgramcenters2trans(x)
if( is.null(pgramdf) ) return(NULL)
matsimple = getsimplified( x$matroid )
matgen = getmatrix( matsimple )
m = ncol(matgen)
matproj = t( frame3x3[ , 1:2 ] )
matgen2 = matproj %*% matgen
temp = pgramdf$center %*% frame3x3
centerrot = .Call( C_extend_antipodal, temp )
baserot = as.double( base_centered %*% frame3x3 )
cat( "baserot = ", baserot, '\n' )
pgrams = nrow(idxpair)
# allocate 3D array for all the vertices
vertex = array( 0, c(pgrams,4,2) )
for( i in 1:pgrams )
{
idx = idxpair[i, ]
k = PAIRINDEX_plus( idx[1], idx[2], m )
center = centerrot[k,1:2]
# get the 2 generators of the pgram
gen = matgen2[ , idx ]
vertex[i,1, ] = center - 0.5*gen[ ,1] - 0.5*gen[ ,2]
vertex[i,2, ] = center - 0.5*gen[ ,1] + 0.5*gen[ ,2]
vertex[i,3, ] = center + 0.5*gen[ ,1] + 0.5*gen[ ,2]
vertex[i,4, ] = center + 0.5*gen[ ,1] - 0.5*gen[ ,2]
}
if( FALSE )
{
# pgrams are typically very thin, so apply whitening
vertex_xy = cbind( as.double(vertex[ , ,1]), as.double(vertex[ , ,2]) ) ;
print( vertex_xy )
# center vertex_xy
vertex_xy = vertex_xy - matrix( colMeans(vertex_xy), nrow(vertex_xy), ncol(vertex_xy), byrow=TRUE )
print( vertex_xy )
res = base::svd( vertex_xy, nu=0, nv=2 )
if( ! all( 0 < res$d ) )
{
log_level( ERROR, "Internal error. vertex matrix is invalid." )
return(NULL)
}
# method == "ZCA" )
# calculate the "whitening", or "sphering" matrix, which is MxM
W = res$v %*% diag( 1/res$d ) %*% t(res$v) ; print( W )
vertex = vertex_xy %*% t(W)
dim(vertex) = c(pgrams,4,2)
}
xlim = range( vertex[ , ,1] )
ylim = range( vertex[ , ,2] )
xlim = baserot[1] + c(-1,1) * winrad[1]
ylim = baserot[2] + c(-1,1) * winrad[2]
plot( xlim, ylim, type='n', las=1, asp=1, lab=c(10,10,7), xlab='x', ylab='y' )
grid( lty=1 )
abline( h=0, v=0 )
for( i in 1:pgrams )
{
col = ifelse( i==pgrams, 'lightyellow', NA )
polygon( vertex[i, ,1], vertex[i, ,2], col=col )
idx = idxpair[i, ]
k = PAIRINDEX_plus( idx[1], idx[2], m )
points( centerrot[k,1], centerrot[k,2], pch=20 )
}
points( baserot[1], baserot[2] )
return( invisible(TRUE) )
}
}
# section() compute intersection of 2-transition sphere and plane(s)
#
# x a zonohedron object
# normal a non-zero numeric vector of length 3, the normal of all the planes
# beta a vector of plane-constants. The equation of plane k is: <x,normal> = beta[k]
#
# value a list of data.frames with length = length(beta).
section2trans <- function( x, normal, beta, invert=FALSE, plot=FALSE, tol=1.e-12, ... )
{
if( ! inherits(x,"zonohedron") )
{
log_level( ERROR, "Argument x is invalid. It's not a zonohedron." )
return(NULL)
}
ok = is.numeric(normal) && length(normal)==3 && all( is.finite(normal) ) && any( normal!=0 )
if( ! ok )
{
log_level( ERROR, "normal is invalid. It must be a non-zero numeric vector of length 3, and all entries finite." )
return(NULL)
}
ok = is.numeric(beta) && 0<length(beta) && all( is.finite(beta) )
if( ! ok )
{
log_level( ERROR, "beta is invalid. It must be a numeric vector of positive length, and all entries finite." )
return(NULL)
}
cnames = names(normal) # save these
dim(normal) = NULL
matsimple = getsimplified(x$matroid)
matgen = getmatrix( matsimple )
gndgen = getground( matsimple )
numgen = ncol( matgen ) # so matgen is 3 x numgen
# make increment and decrement vectors
ij_inc = c(2:numgen,1L)
ij_dec = c(numgen,1:(numgen-1))
# for each generator, compute radius of the projection of the generator onto the line generated by normal
normalgen = as.numeric( normal %*% matgen )
radiusgen = 0.5 * abs(normalgen) # so length(radiusgen) = numgen
pgramdf = allpgramcenters2trans(x) #; print( str(pgramdf) )
if( is.null(pgramdf) ) return(NULL)
# the radius of a pgram is simply the sum of the radii of the generators
myfun <- function( pair ) { sum( radiusgen[ pair ] ) }
radiuspgram = apply( pgramdf$idxpair, 1L, myfun ) #; print( str(radiuspgram) )
# dot products of the pgram centers and the normal vector
cn = as.numeric( pgramdf$center %*% normal )
# append both center and cn with the antipodal data
center = rbind( pgramdf$center, -pgramdf$center )
cn = c( cn, -cn )
idxpair = rbind( pgramdf$idxpair, pgramdf$idxpair[ ,2:1] )
numpgrams = length( cn ) # includes antipodal data
# compute range of normal over each pgram
cnneg = cn - radiuspgram # the recycling rule is used here
cnpos = cn + radiuspgram # the recycling rule is used here
# translate beta to the centered zonogon
cent.norm = sum( x$center * normal )
betacentered = as.numeric(beta) - cent.norm #; print(betacentered)
# load the coefficients that traverse a parallelogram first by generator 1 and then generator 2.
# we call this the "standard" order
vertexcoeff = matrix( c( -0.5,-0.5, 0.5,-0.5, 0.5,0.5, -0.5,0.5), 4, 2, byrow=TRUE )
# scratch vector that holds dot product of the parallelogram vertices with the normal
value = numeric( 4 )
next4 = c( 2:4, 1L )
# the intersection of the plane and the polyhedral surface is a polygon
# make matrix to hold all vertices of the polygons
vertex = matrix( NA_real_, nrow=nrow(center), ncol=3 )
adjacent = integer( numpgrams )
out = vector( length(beta), mode='list' )
names(out) = sprintf( "normal=%g,%g,%g. beta=%g", normal[1], normal[2], normal[3], beta )
for( k in 1:length(beta) )
{
beta_k = betacentered[k]
# find indexes of all pgrams whose *interiors* intersect this plane
# NOTE: If a pgram intersects the plane only in a vertex or edge, it will not be found !
# perhaps fix this later ?
maskinter = cnneg < beta_k & beta_k < cnpos
indexvec = which( maskinter ) #; print( indexvec )
if( length(indexvec) < 3 )
{
# plane does not intersect the zonohedron, there is no section
# or the section is a degenerate single point or an edge
# out[[k]] = list( beta=beta[k], section=matrix( 0, 0, 3 ) )
out[[k]] = data.frame( row.names=integer(0) )
out[[k]]$point = matrix(0,0,3)
next
}
vertex[ , ] = NA_real_ # clear vertex
adjacent[ ] = NA_integer_ # clear adjacent
if( invert ) pcube = matrix( NA_real_, length(indexvec), numgen )
# length(indexvec) is the # of pgrams that the plane intersects
for( ii in 1:length(indexvec) )
{
# idx is a row index into center[] and idxpair and vertex[]
idx = indexvec[ii]
# find point where the plane intersects an edge of the pgram
pair = idxpair[ idx, ] #sort( idxpair[ idx, ] )
i = pair[1]
j = pair[2]
# compute the values at the vertices in the 'standard' order
#for( ii in 1:4 )
# value[ii] = vertexcoeff[ii,1] * normalgen[i] + vertexcoeff[ii,2] * normalgen[j] + cn[idx] - beta_k
value = as.double( vertexcoeff %*% normalgen[pair] ) + cn[idx] - beta_k
# traverse value[] and check that there is exactly 1 transition from neg to non-neg
#itrans = integer(0)
#for( ii in 1:4 )
# {
# if( value[ii]<0 && 0<=value[ next4[ii] ] )
# itrans = c(itrans,ii) #; count = count+1L
# }
itrans = which( value<0 & 0<=value[next4] )
if( length(itrans) != 1 )
{
log_level( ERROR, "Internal Error. pgram idx=%d. value[] has %d transitions, but expected 1.", idx, length(itrans) )
next
}
# we have found the right edge of the pgram
# find the intersection of the edge and the plane
i1 = itrans
i2 = next4[i1]
v1 = as.numeric( matgen[ ,pair] %*% vertexcoeff[i1, ] )
v2 = as.numeric( matgen[ ,pair] %*% vertexcoeff[i2, ] )
lambda = value[i2] / (value[i2] - value[i1])
vertex[idx, ] = center[idx, ] + lambda*v1 + (1-lambda)*v2
# find the pgram on the other side of this edge
# this adjacent pgram must also be in indexvec
if( itrans == 1 )
{
j = ij_dec[j] # decrement j
if( j == i ) i = ij_dec[i] # decrement i too !
alpha = c(1-lambda,0)
}
else if( itrans == 2 )
{
i = ij_dec[i] # decrement i
if( i == j ) j = ij_dec[j] # decrement j too !
alpha = c(1,1-lambda)
}
else if( itrans == 3 )
{
j = ij_inc[j] # increment j
if( j == i ) i = ij_inc[i] # increment i too !
alpha = c(lambda,1)
}
else if( itrans == 4 )
{
i = ij_inc[i] # increment i
if( i == j ) j = ij_inc[j] # increment j too !
alpha = c(0,lambda)
}
adjacent[idx] = PAIRINDEX_plus( i, j, numgen )
if( invert ) pcube[ii, ] = pcubefromdata( pair, alpha, numgen )
#cat( "maskinter[ adjacent[idx] ] =", maskinter[ adjacent[idx] ], " value[i2] =", value[i2], '\n' )
if( abs(value[i2])<=tol && ! maskinter[ adjacent[idx] ] )
{
# degenerate case, try to fix it
#cat( "fixing itrans =", itrans, '\n' )
if( itrans == 1 )
{
if( i == pair[1] ) i = ij_dec[i] # decrement i too !
}
else if( itrans == 3 )
{
if( i == pair[1] ) i = ij_inc[i] # increment i too !
}
# this still might be an invalid pgram; if so it will be caught later
adjacent[idx] = PAIRINDEX_plus( i, j, numgen )
}
}
#print( indexvec )
#print( adjacent )
# put the non-trivial rows of vertex[,] in proper order,
# using the adjacent[] index vector
success = TRUE
indexordered = rep( NA_integer_, length(indexvec) )
# since the *interiors* of all these pgrams intersect the plane,
# we can start this iteration at any one of them
indexordered[1] = indexvec[1]
for( i in 2:length(indexordered) )
{
indexordered[i] = adjacent[ indexordered[i-1] ]
if( is.na(indexordered[i]) )
{
log_level( WARN, "Internal Error. bad adjacent logic. i=%d.", i )
success = FALSE
break
}
if( indexordered[i] == indexordered[1] )
{
# back to the starting pgram, premature stop, back off !
# indexordered[i] = NA_integer_
i = i - 1L
break
}
}
if( FALSE )
{
print( indexvec )
print( vertex )
print( adjacent )
print( indexordered )
}
if( i < length(indexordered) )
{
log_level( WARN, "The plane for beta=%g intersects the surface in more than 1 polygon.", beta[k] ) #; but only 1 polygon is being returned
#log.string( ERROR, "The plane for beta=%g intersects the surface in more than 1 polygon.", beta[k] )
success = FALSE
#indexordered = indexordered[1:i] # trim away the excess
}
if( ! success )
{
# assign the empty section
# out[[k]] = list( beta=beta[k], section=matrix( 0, 0, 3 ) )
out[[k]] = data.frame( row.names=integer(0) )
out[[k]]$point = matrix(0,0,3)
next
}
# extract the polygon
poly = vertex[indexordered, ]
# the poly coordinates are centered, so add back the zono center
res = .Call( C_plusEqual, poly, x$center, 1L ) # changes poly in place
if( is.null(res) ) return(NULL)
df = data.frame( row.names=1:nrow(poly) )
df$point = poly
gndpair = gndgen[ idxpair[ indexordered, ] ]
dim(gndpair) = c( length(indexordered), 2 )
df$gndpair = gndpair
#gndpairadj = gndgen[ idxpair[ adjacent[indexordered], ] ]
#dim(gndpairadj) = c( length(indexordered), 2 )
#df$gndpairadj = gndpairadj
if( invert )
{
perm = order( indexordered )
perm[perm] = 1:length(perm) # invert perm
df$pcube = invertcubepoints( x, pcube[ perm, ] )
#print( indexordered )
#print( perm )
}
out[[k]] = df
if( FALSE && invert )
{
# test the inversion
#print( df$pcube )
matorig = getmatrix( x$matroid )
delta = df$pcube %*% t(matorig) - df$point
# cat( "range(delta)=", range(delta), '\n' )
delta = rowSums( abs(delta) )
#print( t(matorig) )
if( any( tol < delta, na.rm=TRUE ) )
{
#print( delta )
#log.string( WARN, "Inversion test failed. max(delta)=%g > %g=tol",
# max(delta,na.rm=TRUE), tol )
}
out[[k]]$delta = delta
}
}
if( plot )
{
if( ! requireNamespace( 'rgl', quietly=TRUE ) )
{
log_level( WARN, "Package 'rgl' is required for plotting. Please install it." )
}
else if( rgl::cur3d() == 0 )
{
log_level( WARN, "Cannot add section to plot, because there is no rgl window open." )
}
else
{
for( k in 1:length(beta) )
{
xyz = out[[k]]$point
rgl::polygon3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], fill=FALSE, col='red' )
rgl::points3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], col='red', size=13, point_antialias=TRUE )
}
}
}
return( out )
}
transitionsdf <- function( x, trans2=TRUE )
{
if( ! inherits(x,"zonohedron") )
{
log_level( ERROR, "Argument x is invalid. It's not a zonohedron." )
return(NULL)
}
metrics = getmetrics2trans( x, angles=FALSE )
if( is.null(metrics) ) return(NULL)
ok = is.finite(metrics$starshaped) && metrics$starshaped
if( ! ok )
{
log_level( "The zonohedron x is invalid. The 2-transitions surface is not strictly starshaped." )
return(NULL)
}
#print( str( metrics$pgramdf ) )
deficient = metrics$pgramdf$deficient
#deficientcount = sum(deficient)
#dfacets = 2 * deficientcount
#cat( "deficient parallelograms: ", dfacets, " [fraction=", dfacets/facets, ']\n' )
#cat( "deficient area: ", metrics$areadeficient, " [fraction=", metrics$areadeficient/res$area, ']\n' )
#cat( "deficient volume: ", metrics$volumedeficient, " [fraction=", metrics$volumedeficient/res$volume, ']\n' )
#thickness = metrics$volumedeficient / metrics$areadeficient
#cat( "deficient thickness (mean): ", thickness, '\n' )
if( any(deficient) )
{
# compute transitions
pgramdef = metrics$pgramdf[ deficient, ]
dat = boundarypgramdata( x, pgramdef$gndpair )
dat$gndpair = pgramdef$gndpair
dat$area = pgramdef$area
dat$deficit = pgramdef$deficit
# remove unneeded columns
dat$hyperplaneidx = NULL
dat$center = NULL
}
else
dat = NULL
#print( str(dat) )
notdef = ! deficient
if( trans2 && any(notdef) )
{
# append more rows to dat
# append the rows to dat that are *not* deficient and therefore have 2 transitions
dat2 = data.frame( row.names=1:sum(notdef) )
dat2$gndpair = metrics$pgramdf$gndpair[ notdef, ]
dat2$transitions = 2L
dat2$area = metrics$pgramdf$area[ notdef ]
dat2$deficit = metrics$pgramdf$deficit[ notdef ]
#print( str(dat2) )
dat = rbind( dat, dat2 )
}
if( is.null(dat) )
{
log_level( ERROR, "No rows to return." )
return(NULL)
}
# breakdown the deficient parallelograms by transitions
tunique = sort( unique( dat$transitions ) ) #; print( tunique )
m = length(tunique)
pgcount = integer(m)
area.r = matrix( 0, nrow=m, ncol=2 )
colnames(area.r) = c( 'min', 'max' )
deficit.r = matrix( 0, nrow=m, ncol=2 )
colnames(deficit.r) = c( 'min', 'max' )
area.s = numeric( m )
example = character(m)
for( k in 1:m )
{
datsub = dat[ dat$transitions == tunique[k], ]
pgcount[k] = 2L*nrow( datsub )
area.r[k, ] = range( datsub$area )
area.s[k] = 2*sum( datsub$area )
deficit.r[k, ] = range( datsub$deficit )
i = which.max( datsub$area )
gndpair = datsub$gndpair[i, ]
example[k] = sprintf( "{%d,%d}", gndpair[1], gndpair[2] )
}
out = data.frame( row.names = c(1:m,"Totals") )
out$transitions = c(tunique,NA)
out$parallelograms = c( pgcount, sum(pgcount) )
out$area = rbind( area.r, c(NA,NA) )
out$area.sum = c(area.s,sum(area.s))
out$deficit = rbind( deficit.r, c(NA,NA) )
out$example = c( example, '' )
return( out )
}
plothighertrans <- function( x, abalpha=1, defcol='green', defalpha=0, ecol=NA,
connections=FALSE, bgcol="gray40", both=TRUE, ... )
{
if( ! inherits(x,"zonohedron") )
{
log_level( ERROR, "Argument x is invalid. It's not a zonohedron." )
return(NULL)
}
if( ! requireNamespace( 'rgl', quietly=TRUE ) )
{
log_level( ERROR, "Package 'rgl' cannot be loaded. It is required for plotting the zonohedron." )
return(FALSE)
}
if( abalpha<=0 && defalpha<=0 )
{
log_level( WARN, "abalpha=%g and defalpha=%g is invalid.", abalpha, defalpha )
return( FALSE )
}
metrics = getmetrics2trans( x, angles=FALSE, tol=1.e-12 )
if( is.null(metrics) )
return(FALSE)
facesok = ! is.null(metrics$pgramdf$deficient) #; cat( 'facesok ', facesok, '\n' )
if( ! facesok )
{
log_level( WARN, "Cannot draw faces because the parallelogram data are not available." )
return( FALSE )
}
center = x$center
white = 2 * center
# start 3D drawing
rgl::bg3d( color=bgcol )
#cube = rgl::scale3d( rgl::cube3d(col="white"), center[1], center[2], center[3] )
#cube = rgl::translate3d( cube, center[1], center[2], center[3] )
rgl::points3d( 0, 0, 0, col='black', size=10, point_antialias=TRUE )
rgl::points3d( white[1], white[2], white[3], col='white', size=10, point_antialias=TRUE )
rgl::points3d( center[1], center[2], center[3], col='gray50', size=10, point_antialias=TRUE )
# exact diagonal
rgl::lines3d( c(0,white[1]), c(0,white[2]), c(0,white[3]), col=c('black','white'), lwd=3, lit=FALSE )
matsimp = getsimplified( x$matroid )
matgen = getmatrix(matsimp)
numgen = ncol(matgen)
#gndgen = getground(matsimp)
pgramdf = metrics$pgramdf
deficient = pgramdf$deficient
deficientcount = sum( deficient )
if( deficientcount == 0 )
{
log_level( WARN, "Cannot draw because none of the 2-transition facets are deficient." )
return( FALSE )
}
drawedges = ! is.na(ecol)
# extract only the subset of deficient rows
pgramdf = pgramdf[ deficient, ] #; print( pgramdf )
# pgramdf$alphavec = alphavec
step = 4
quadmat = matrix( 0, nrow=step*deficientcount, ncol=3 )
if( 0 < abalpha )
{
# draw filled abundant pgrams
# use the same colorkey as Scott Burns
colorkey = c( "black", "white", "black", "#8c0000", "black", "#ffff19", "black", "#0082c8", "black", "#dcbeff", "green" )
# get bounddf in order to get the # of transitions
bounddf = boundarypgramdata( x, pgramdf$gndpair )
colvec = character( step*deficientcount )
#alphavec = numeric( step*deficientcount )
for( i in 1:deficientcount )
{
center = pgramdf$centermax[i, ]
edge = matgen[ , pgramdf$idxpair[i, ] ] # 3x2 matrix
k = step*(i-1)
quadmat[k+1, ] = center - 0.5 * edge[ , 1] - 0.5*edge[ , 2]
quadmat[k+2, ] = center - 0.5 * edge[ , 1] + 0.5*edge[ , 2]
quadmat[k+3, ] = center + 0.5 * edge[ , 1] + 0.5*edge[ , 2]
quadmat[k+4, ] = center + 0.5 * edge[ , 1] - 0.5*edge[ , 2]
tcount = min( bounddf$transitions[i], length(colorkey) )
colvec[ (k+1):(k+4) ] = colorkey[ tcount ] # pgramdf$colvec[i]
#alphavec[ (k+1):(k+4) ] = pgramdf$alphavec[i]
}
xyz = .Call( C_sumMatVec, quadmat, x$center, 1L )
rgl::quads3d( xyz, col=colvec, alpha=abalpha, lit=FALSE ) # quad filled
if( drawedges )
rgl::quads3d( xyz, col=ecol, lwd=2, front='lines', back='lines', lit=FALSE ) # quad edges
if( both )
{
xyz = .Call( C_sumMatVec, -quadmat, x$center, 1L )
rgl::quads3d( xyz, col=colvec, alpha=abalpha, lit=FALSE )
if( drawedges )
rgl::quads3d( xyz, col=ecol, lwd=2, front='lines', back='lines', lit=FALSE ) # quad edges
}
# rgl::legend3d( "top", c("2D Points", "3D Points"), cex=0.75, pch = c(1, 16) )
# rgl::title3d('main', 'sub', 'xlab', 'ylab', 'zlab')
}
if( 0 < defalpha )
{
# draw filled deficient pgrams
for( i in 1:deficientcount )
{
center = pgramdf$center[i, ]
edge = matgen[ , pgramdf$idxpair[i, ] ] # 3x2 matrix
k = step*(i-1)
quadmat[k+1, ] = center - 0.5 * edge[ , 1] - 0.5*edge[ , 2]
quadmat[k+2, ] = center - 0.5 * edge[ , 1] + 0.5*edge[ , 2]
quadmat[k+3, ] = center + 0.5 * edge[ , 1] + 0.5*edge[ , 2]
quadmat[k+4, ] = center + 0.5 * edge[ , 1] - 0.5*edge[ , 2]
}
xyz = .Call( C_sumMatVec, quadmat, x$center, 1L )
rgl::quads3d( xyz, col=defcol, alpha=defalpha, lit=FALSE ) # quad filled
if( drawedges )
rgl::quads3d( xyz, col=ecol, lwd=2, front='lines', back='lines', lit=FALSE ) # quad edges
if( both )
{
xyz = .Call( C_sumMatVec, -quadmat, x$center, 1L )
rgl::quads3d( xyz, col=defcol, alpha=defalpha, lit=FALSE )
if( drawedges )
rgl::quads3d( xyz, col=ecol, lwd=2, front='lines', back='lines', lit=FALSE ) # quad edges
}
}
# for each pair of pgrams in the zonohedron and the 2-transition polyhedron,
# draw a segment connecting their 2 centers
# these are matching deficient and abundant parallelograms
if( connections )
{
# draw segments between the deficient pgrams in the 2-transition surface,
# and the corresponding pgrams in the zonohedron boundary
#pgramdf = metrics$pgramdf
#centerdef = pgramdf$center[deficient, ] #metrics$signsurf * metrics$
#centermax = pgramdf$centermax[ deficient, ]
centermat = rbind( pgramdf$center, pgramdf$centermax )
mat = matrix( 1:nrow(centermat), nrow=deficientcount, ncol=2 )
idx = as.integer( t(mat) )
centermat = centermat[ idx, ] #; print( centermat )
xyz = .Call( C_sumMatVec, centermat, x$center, 1L )
rgl::segments3d( xyz, col='black' )
rgl::points3d( xyz, col=c('yellow','black'), size=5, point_antialias=TRUE )
if( both ) # both
{
xyz = .Call( C_sumMatVec, -centermat, x$center, 1L )
rgl::segments3d( xyz, col='black' )
rgl::points3d( xyz, col=c('yellow','black'), size=5, point_antialias=TRUE )
}
}
return( invisible(TRUE) )
}
############################## deadwood below ######################################
# arguments:
#
# N dimension of the cube, must be a positive integer
# crange range for the count of +1/2s for the edges, does not affect the vertex
#
# returns list with components:
# N the input N
# vertex (N*(N-1)+2)x2 integer matrix with code for the vertex
# the 1st int is the # of ones, and the 2nd is the starting position
# edge (2N*(N-2) + 2N) x 2 integer matrix with starting and ending index (in vertex) of the edges
# this number applies only when crange=c(0L,N)
#
trans2subcomplex_old <- function( N, crange=c(0L,N) )
{
N = as.integer(N)
ok = length(N)==1 && 0<N
if( ! ok )
{
log_level( ERROR, "N is invalid." )
return(NULL)
}
ok = is.numeric(crange) && length(crange)==2 && 0<=crange[1] && crange[1]<crange[2] && crange[2]<=N
if( ! ok )
{
log_level( ERROR, "crange is invalid." )
return(NULL)
}
vertex = matrix( 0L, nrow=N*(N-1)+2, ncol=2 )
colnames(vertex) = c( "count", "start" )
vertex[ 1, ] = c( 0L, NA_integer_ ) # south pole
k = 2L
for( i in seq_len(N-1) )
{
vertex[ k:(k+N-1L), ] = cbind( i, 1L:N )
k = k + N
}
vertex[ nrow(vertex), ] = c( N, NA_integer_ ) # north pole
out = list()
out$N = N
out$vertex = vertex
if( N == 1 )
{
# trivial case, only 1 edge, and only 2 vertices, the 1-cube
out$edge = matrix( 1:2, nrow=1 )
return(out)
}
midrange = pmin( pmax(crange,1L), N-1L )
edges = 0L
if( crange[1] == 0 ) edges = edges + N
seque = midrange[1] + seq_len( max( diff(midrange), 0 ) ) - 1L #; print(seque)
edges = edges + 2*N*length(seque)
if( crange[2] == N ) edges = edges + N
edge = matrix( 0L, nrow=edges, ncol=2 )
i = 1L
if( crange[1] == 0 )
{
# add the "cap" at the south pole
for( start in 1:N )
{
edge[i, ] = c( vtxidxfromcode(N,0L,NA), vtxidxfromcode(N,1L,start) )
i = i+1L
}
}
start_prev = c(N,1L:(N-1L)) # lookup table
for( count in seque )
{
for( start in 1:N )
{
# find index of current vertex
idx = vtxidxfromcode(N,count,start)
# add 1 on the left
sprev = start_prev[start]
edge[i, ] = c( idx, vtxidxfromcode(N,count+1L,sprev) )
i = i+1L
# add 1 on the right
edge[i, ] = c( idx, vtxidxfromcode(N,count+1L,start) )
i = i+1L
}
}
if( crange[2] == N )
{
# add the "cap" at the north pole
for( start in 1:N )
{
edge[ i, ] = c( vtxidxfromcode(N,N-1L,start), vtxidxfromcode(N,N,NA) )
i = i+1L
}
}
out$edge = edge
return(out)
}
# parameters:
# N the dimension of the cube, must be a positive integer
# count the number of 1s in the vertex
# start the starting index of the run of 1s, 1-based
#
# returns the row index in vertex[], as returned by trans2subcomplex()
vtxidxfromcode <- function( N, count, start )
{
if( count == 0 ) return( 1L )
if( count == N ) return( N*(N-1L) + 2L )
return( N*(count-1L) + start + 1L )
}
# parameters:
# N the dimension of the cube, must be a positive integer
# count integer M-vector with the number of 1s in the vertex
# start integer M-vector with the starting index of the run of 1s, 1-based
#
# returns an MxN matrix where the i'th row is the desired vertex of the N-cube, [-1/2,1/2]^N
vertexfromcode_old <- function( N, count, start )
{
M = length(count)
if( length(start) != M ) return(NULL)
# make wrap-around lookup table
wrap2 = c( 1:N, 1:N )
out = matrix( -1/2, nrow=M, ncol=N )
for( i in 1:M )
{
if( count[i] == 0 ) next # south pole, all -1/2 already
if( count[i] == N ) {
# north pole, all 1/2
out[ i, ] = 1/2
next
}
jvec = wrap2[ start[i]:(start[i] + count[i]-1L) ]
out[ i, jvec ] = 1/2
}
return(out)
}
# matgen M x N matrix of N generators, defining a 2-transition subcomplex in R^M
# centered center the output coordinates
#
# returns data.frame with N*(N-1)/2 rows and these columns
# idxpair integer matrix with 2 columns i and j with 1 <= i < j <= n
# center real matrix with 3 columns containing the corresponding facet center
#
# the row order is the same as the column order returned by allcrossproducts()
allfacetcenters2trans_oldold <- function( matgen, centered=TRUE )
{
ok = is.numeric(matgen) && is.matrix(matgen)
if( ! ok ) return(NULL)
n = ncol(matgen)
m = nrow(matgen)
gensum = .Call( C_cumsumMatrix, matgen, 2L )
idxpair = .Call( C_allpairs, n )
colnames(idxpair) = c('i','j')
facets = nrow(idxpair) #= ( n*(n-1L) ) / 2
# center is loaded with the *uncentered* coords
center = matrix( 0, facets, m )
for( k in 1:facets )
{
i = idxpair[k,1]
j = idxpair[k,2]
v = ( matgen[ ,i] + matgen[ ,j] ) / 2 #+ (gensum[ , j-1L ] - gensum[ , i])
if( i < j-1L )
v = v + (gensum[ , j-1L ] - gensum[ , i])
center[k, ] = v
}
if( centered )
{
centerpoly = gensum[ ,n] / 2
center = .Call( C_sumMatVec, center, -centerpoly, 1L ) # translate to the *centered* polyhedron
}
out = data.frame( row.names=1:facets )
out$idxpair = idxpair
out$center = center
return( out )
}
allfacetcenters2trans_old <- function( matgen, centered=TRUE )
{
ok = is.numeric(matgen) && is.matrix(matgen)
if( ! ok ) return(NULL)
n = ncol(matgen)
m = nrow(matgen)
facets = ( n*(n-1L) ) / 2
gensum = .Call( C_cumsumMatrix, matgen, 2L )
idxpair = matrix( 0L, facets, 2 )
colnames(idxpair) = c('i','j')
# center is loaded with the *uncentered* coords
center = matrix( 0, facets, m )
k = 1L
for( i in 1:(n-1) )
{
for( j in (i+1):n )
{
idxpair[k, ] = c(i,j)
v = ( matgen[ ,i] + matgen[ ,j] ) / 2
if( i < j-1L )
v = v + gensum[ , j-1L ] - gensum[ , i ]
center[k, ] = v
k = k + 1L
}
}
if( centered )
{
centerzono = gensum[ ,n] / 2
center = .Call( C_sumMatVec, center, -centerzono, 1L ) # translate to the *centered* zonohedron
}
out = data.frame( row.names=1:facets )
out$idxpair = idxpair
out$center = center
return( out )
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/trans2.R |
########### argument processing ##############
#
# A a non-empty numeric NxM matrix, or something that can be converted to be one
#
# Nmin the minimum allowed number of rows
#
# returns such a matrix, or NULL in case of error
prepareNxM <- function( A, M, Nmin=1 )
{
ok = is.numeric(A) && M*Nmin<=length(A) && (length(dim(A))<=2) # && (0<M)
ok = ok && ifelse( is.matrix(A), ncol(A)==M, ((length(A) %% M)==0) )
if( ! ok )
{
#print( "prepareNx3" )
#print( sys.frames() )
mess = substr( paste0(as.character(A),collapse=','), 1, 10 )
#arglist = list( ERROR, "A must be a non-empty numeric Nx3 matrix (with N>=%d). A='%s...'", mess )
#do.call( log.string, arglist, envir=parent.frame(n=3) )
#myfun = log.string
#environment(myfun) = parent.frame(3)
Aname = deparse(substitute(A))
# notice hack with 2L to make log.string() print name of parent function
#log.string( c(ERROR,2L), "Argument '%s' must be a non-empty numeric Nx%d matrix (with N>=%d). %s='%s...'",
# Aname, M, Nmin, Aname, mess )
log_level( ERROR, "Argument '%s' must be a non-empty numeric Nx%d matrix (with N>=%d). %s='%s...'",
Aname, M, Nmin, Aname, mess, .topcall=sys.call(-2L) )
return(NULL)
}
if( ! is.matrix(A) )
A = matrix( A, ncol=M, byrow=TRUE )
return( A )
}
# A an integer vector, representing a set of integers
# B an integer vector, representing a set of integers
#
# returns TRUE or FALSE, depending on whether A is a subset of B
#
# subset1() is fastest, because there is no error checking
subset1 <- function( A, B )
{
all( is.finite( match(A,B) ) )
}
subset2 <- function( A, B )
{
length( setdiff(A,B) ) == 0
}
normalizeColumns <- function( mat )
{
normvec = sqrt( .colSums( mat^2, nrow(mat), ncol(mat) ) )
return( t( t(mat) / normvec ) )
}
# A a numeric matrix
# MARGIN 1 (vectors are the rows) or 2 (vectors are the columns)
# for each vector, divide by L2 norm to get a unit vector
#
normalizeMatrix <- function( A, MARGIN )
{
ok = is.double(A) && is.matrix(A)
if( ! ok )
{
return(NULL)
}
MARGIN = as.integer(MARGIN)
ok = length(MARGIN)==1 && MARGIN %in% 1L:2L
if( ! ok )
{
return(NULL)
}
# make a deep (non-shallow) copy of A, because C_normalizeMatrix() modifies in-place
out = duplicate(A)
# change matrix out[] "in place"
ok = .Call( C_normalizeMatrix, out, MARGIN )
if( ! ok ) return(NULL)
return( out )
}
# ground positive integer vector in strictly increasing order. Not checked.
#
# returns lookup table from these integers to the order in the sequence
#
# out[idx] is equivalent to match( idx, ground ) but faster
idxfromgroundfun <- function( ground )
{
# make lookup table from ground to raw column index
numgen = length(ground)
out = integer( ground[numgen] )
out[ ground ] = 1:numgen
return( out )
}
shortlabel <- function( ivec )
{
if( length(ivec) <= 3 )
out = paste(ivec,collapse='+')
else
{
ran = range(ivec)
out = sprintf( "%g+...+%g", ran[1], ran[2] )
}
return(out)
}
# x a numeric matrix
# e0 used when nrow(x) >= 1
# e1 used when nrow(x) >= 2
# ground integer vector labeling the columns of x
#
# the columns of x are taken as generators of a matroid
#
# returns a matrix with no "loops" and "multiple groups"
simplify.matrix <- function( x, e0=0, e1=1.e-6, ground=NULL, ... )
{
ok = is.numeric(x) && is.matrix(x)
if( ! ok )
{
log_level( ERROR, "matrix x is invalid." )
return(NULL)
}
if( is.integer(x) ) storage.mode(x) = 'double'
elist = list(e0,e1)
ok = all( sapply(elist,is.numeric) ) && all( sapply(elist,length) == 1L )
if( ! ok )
{
log_level( ERROR, "One of e0,e1 is invalid." )
return(NULL)
}
if( is.null(ground) )
ground = 1L:ncol(x)
if( length(ground) != ncol(x) )
{
log_level( ERROR, "ground is invalid, because the length is incorrect." )
return(NULL)
}
if( ! all( 0 < diff(ground) ) )
{
log_level( ERROR, "ground is invalid, because it is not in strictly increasing order." )
return(NULL)
}
# look for small columns in x; these are the loops
loopmask = apply( x, MARGIN=2, function(vec) { max(abs(vec)) } ) <= e0
loopraw = which( loopmask )
colnames(x) = as.character( ground )
# convert from matrix column indexes to ground indexes
gnd.noloops = ground[ ! loopmask ]
# extract the matrix of nonloops
x.noloops = x[ , ! loopmask, drop=FALSE ]
if( ncol(x.noloops) == 0 ) return(x.noloops) # special case
xunit = normalizeColumns( x.noloops ) #; print( xunit )
grp = findColumnGroups( xunit, e1, oriented=FALSE ) #; print(grp)
if( all(grp==0) ) return(x.noloops) # special case
# nonloop is a list of vectors, and not a vector
nonloop = setlistfromvec( grp, gnd.noloops )
condata = condenseMatrix( x, ground, nonloop )
if( is.null(condata) ) return(NULL)
return( condata$matrix )
}
# A a numeric matrix
# ground integer vector labeling the columns of A
# conspec a list of integer vectors, all subsets of ground.
# the corresponding columns of A are co-directional, with tolerance e1
#
# for each vector in conspec take the dominant direction of the corresponding columns,
# and copy that direction to output matrix.
# returns a list with items:
# matrix with a column for each vector in conspec. This is the simplified matrix. It has length(conspec) columns.
# multiplesupp a data.frame with a row for each group of multiples in conspec, and these columns
# colidx index of corresponding column in output matrix - this is the index of the simplified generators
# cmax coordinate with largest absolute value, used to compute the following
# mixed logical, which is TRUE iff the group has "mixed directions"
# major the longer vector in the zonoseg spanned by the generators in the group, always non-zero
# minor the shorter vector in the zonoseg; this is non-zero iff the group has "mixed directions"
condenseMatrix <- function( A, ground, conspec )
{
ok = is.double(A) && is.matrix(A)
if( ! ok )
{
return(NULL)
}
ok = length(ground) == ncol(A)
if( ! ok )
{
return(NULL)
}
# make inverse lookup vector
colfromgnd = integer( max(ground) )
colfromgnd[ ground ] = 1L:ncol(A)
if( ! is.list(conspec) )
{
log_level( ERROR, "Argument conspec is invalid." )
return(NULL)
}
if( ! subset1(unlist(conspec),ground))
{
return(NULL)
}
lenvec = lengths( conspec ) #sapply( conspec, length )
conspec1 = conspec
# change all the non-singleton lists to any valid single value
# these columns will be overwritten later
multiplemask = 2 <= lenvec
conspec1[ multiplemask ] = ground[1]
out = list()
out$matrix = A[ , colfromgnd[ unlist(conspec1) ], drop=F ]
# replace all the multiple columns with dominant direction
colidx = which( multiplemask )
m = length(colidx)
cmax = integer(m)
major = matrix( 0, nrow=m, ncol=nrow(A) )
minor = matrix( 0, nrow=m, ncol=nrow(A) )
mixed = logical(m)
for( i in seq_len(m) )
{
k = colidx[i]
idx = colfromgnd[ conspec[[k]] ]
res = dominantDirection( A[ ,idx, drop=FALSE] )
cmax[i] = res$cmax
out$matrix[ ,k] = res$dominant # .rowSums( A[ ,idx], nrow(A), length(idx) )
major[i, ] = res$major
minor[i, ] = res$minor
mixed[i] = any( res$minor != 0 )
}
colnames(out$matrix) = sapply( conspec, shortlabel )
multiplesupp = data.frame( row.names=colidx )
multiplesupp$colidx = colidx
multiplesupp$cmax = cmax
multiplesupp$mixed = mixed
multiplesupp$major = major
multiplesupp$minor = minor
out$multiplesupp = multiplesupp
return( out )
}
emptymultiplesupp <-function( m )
{
out = data.frame( row.names=character(0) )
out$colidx = integer(0)
out$major = matrix( 0, nrow=0, ncol=m )
out$minor = matrix( 0, nrow=0, ncol=m )
out$mixed = integer(0)
return(out)
}
# A a numeric matrix with rank 1
#
# thus the columns of A are collinear and generate a zonoseg Z in space
# there may be vectors in a single direction (0 is an endpoint of Z),
# or both directions (0 is in the interior of Z)
# In the latter case, we say the generators are "mixed".
#
# returns a list with these items:
# cmax the 1-based index of the coordinate with maximum absolute value
# dominant the difference between the 2 endpoints of the zonoseg
# with the direction chosen so agree with the largest of sums in either direction
# NB: out$dominant = out$major - out$minor
# major the largest of the sums
# minor the smaller of the sums, most often it is 0, which means not "mixed"
dominantDirection <- function( A )
{
ok = is.double(A) && is.matrix(A)
if( ! ok )
{
return(NULL)
}
# find which row has the largest norm,
# since the rows are all collinear too, any norm will do so use L^inf
cmax = arrayInd( which.max(abs(A)), dim(A) )[1]
rowmax = A[ cmax, ]
sumneg = rowSums( A[ , rowmax < 0, drop=FALSE ] )
sumpos = rowSums( A[ , 0 < rowmax, drop=FALSE ] )
out = list()
out$cmax = cmax
delta = abs(sumpos[cmax]) - abs(sumneg[cmax])
if( delta == 0 )
# this means that sumpos and sumneg are exact opposites
# in this case, choose the one with positive value at imax
delta = sumpos[cmax] - sumneg[cmax]
if( 0 < delta )
{
out$major = sumpos ; out$minor = sumneg
}
else
{
out$major = sumneg ; out$minor = sumpos
}
out$dominant = out$major - out$minor
return(out)
}
# base a non-zero n-vector
# direction a mxn matrix, whose rows are taken as n-vectors, AND are all multiples of base
#
# returns an m-vector of values, all are +1, -1, or 0, depending on the multiple
#
# a halfspace test would work, but that would be a little slower
signvector <- function( base, direction )
{
if( length(base) != ncol(direction) )
{
log_level( FATAL, "Internal error. %d != %d.", length(base), ncol(direction) )
return(NULL)
}
# find which component of base has the largest norm,
# since the rows of direction are all collinear, any norm will do so use L^inf
imax = which.max( abs(base) )
out = sign( base[imax] ) * sign( direction[ , imax ] )
return(out)
}
# grp integer vector, as returned from grpDuplicated(). 0s are the singletons.
# ground integer vector of the point indexes
#
# returns a list of integer vectors, as indexed by ground[],
# each of which is a group of multiples with more than 1 point; compare with setlistfromvec().
grplistfromvec <- function( grp, ground=NULL )
{
if( is.null(ground) )
ground = 1L:length(grp)
else if( length(ground) != length(grp) )
{
log_level( ERROR, "length(ground)= %d is invalid!", length(ground) )
return(NULL)
}
n = max(grp)
out = vector( n, mode='list' )
for( i in seq_len(n) )
out[[i]] = ground[ which( grp == i ) ]
return(out)
}
# grp integer vector, as returned from grpDuplicated(). 0s are the singletons.
# ground integer vector of the point indexes, with the same length as grp
#
# returns a list of integer vectors, as indexed by ground[],
# This list of sets includes both singletons and multiples; compare with grplistfromvec().
# We always have length(out) <= length(grp)
setlistfromvec <- function( grp, ground=NULL )
{
if( is.null(ground) )
ground = 1L:length(grp)
else if( length(ground) != length(grp) )
{
log_level( ERROR, "length(ground)= %d is invalid!", length(ground) )
return(NULL)
}
out = as.list( ground )
keep = ! logical( length(out) )
n = max(grp)
for( i in seq_len(n) )
{
idx = which( grp == i )
out[[ idx[1] ]] = ground[ idx ]
# remove all except the first point in the group
keep[ idx[-1] ] = FALSE
}
out = out[ keep ]
return(out)
}
# xN one of:
# *) a list of integer vectors
# *) an integer vector
# *) NULL (ignored)
#
# returns the union of all inputs, in strictly ascending order
fastunion <- function( x1, x2=NULL, x3=NULL )
{
return( .Call( C_fastunion, x1, x2, x3 ) )
}
# hyper a list of integer vectors, each one of them defining a nontrivial hyperplane subset, and in increasing order. Not checked
# ground an integer vector - the union of all the sets in hyper - and in increasing order. Not checked
# returns a list of 2-point hyperplanes that, combined with hyper, form a 2-partition of the ground set
# if this is not possible, returns a character message with info on the problem
trivialhypers2 <- function( hyper, ground )
{
out = .Call( C_trivialhypers2, hyper, ground )
if( ! is.null(out$cmax) )
{
# ERROR. make a good error message
mess = "The hyperplanes do not satisfy the paving matroid properties for rank=3."
mess = c( mess, " the point pair %d,%d appears in %d hyperplanes." )
mess = paste0( mess, sep='\n' )
mess = sprintf( mess, out$pmax[1], out$pmax[2], out$cmax )
# mess = c( mess, " Try reducing argument e2." )
return( mess )
}
return( out )
}
matrix2list <- function( x, MARGIN )
{
return( .Call( C_matrix2list, x, as.integer(MARGIN) ) )
}
incidencedata <- function( hyper, ground=NULL )
{
if( is.null(ground) ) ground = fastunion(hyper)
return( .Call( C_incidencedata, hyper, ground ) )
}
incidencematrix <- function( hyper, ground, subset )
{
out = .Call( C_incidencematrix, hyper, ground, subset )
colnames(out) = as.character(subset)
return( out )
}
findRunsTRUE <- function( mask, periodic=FALSE )
{
# put sentinels on either end, to make things far simpler
dif = diff( c(FALSE,mask,FALSE) )
start = which( dif == 1 )
stop = which( dif == -1 )
if( length(start) != length(stop) )
{
log_level( FATAL, "Internal error. %d != %d", length(start), length(stop) )
return(NULL)
}
stop = stop - 1L
if( periodic && 2<=length(start) )
{
m = length(start)
if( start[1]==1 && stop[m]==length(mask) )
{
# merge first and last
start[1] = start[m]
start = start[ 1:(m-1) ]
stop = stop[ 1:(m-1) ]
}
}
return( cbind( start=start, stop=stop ) )
}
# A 2xn matrix, with n vectors in the columns generating a convex cone
# there are no 0s or multiples, so do not have to worry about generators in both directions
#
# if the cone is salient, returns the indexes of the 2 columns, in CCW order
# if the cone is not salient, returns integer(0)
cxconegenerators <- function( A )
{
n = ncol(A)
if( nrow(A)!=2 || n<=1 ) return(NULL)
theta = atan2( A[2, ], A[1, ] )
perm = order( theta )
theta_sorted = theta[perm]
gapvec = c( diff(theta_sorted), 2*pi - (theta_sorted[n] - theta_sorted[1]) )
kmax = which.max( gapvec )
if( gapvec[kmax] <= pi )
# the vectors generate the entire plane, NOT salient
return( integer(0) )
# there is a gap with angle > pi
if( kmax < n ) k2 = kmax+1
else k2 = 1
out = perm[ c(k2,kmax) ] # ;match( c(k2,kmax), perm )
return( out )
}
# u numeric N-vector
#
# returns Nx2 matrix with each row on the circle
tocircle <- function( u, tol=5.e-16 )
{
z = exp( u * 1i )
out = cbind( Re(z), Im(z) )
idx = which( abs(out) < tol )
if( 0 < length(idx) ) out[idx] = 0
return(out)
}
# u, v unit vectors of dimension n
#
# returns nxn rotation matrix that takes u to v, and fixes all vectors ortho to u and v
#
# Characteristic Classes, Milnor and Stasheff, p. 77
rotationshortest <- function( u, v )
{
n = length(u)
if( length(v) != n ) return(NULL)
uv = u + v
if( all(uv == 0) ) return(NULL)
out = diag(n) - (uv %o% uv)/(1 + sum(u*v)) + 2*(v %o% u)
return(out)
}
# dir non-zero 3-vector
#
# returns a 3x3 orthogonal matrix where the 3rd column is parallel to dir
# and the 1st and 2nd are orthogonal to dir
# Thus frame3x3 rotates dir to the z-axis
goodframe3x3 <- function( dir )
{
out = base::svd( dir, nu=3 )$u
# the 1st column is a multiple of dir, but it might be a negative multiple
if( sum( dir * out[ ,1] ) < 0 )
# reverse sign of 1st column
out[ ,1] = -out[ ,1]
# move 1st column to the 3rd
# and ensure that determinant is positive, so it's a rotation
if( 0 < det(out) )
perm = c(2,3,1) # even
else
perm = c(3,2,1) # odd
out = out[ , perm ]
return( out )
}
# point nx3 matrix of points on a great circle of S^2
# normal unit normal to the plane spanned by the great circle, this serves to orient the circle
#
# returns a permutation of 1:n that puts the points in counter-clockwise order,
# using the right-hand-rule with normal
orderoncircle <- function( point, normal )
{
# find a 3x3 rotation matrix that rotates normal to the north pole
pole = c(0,0,1)
if( all( abs(normal+pole) < 1.e-2 ) )
{
# tweak normal away from -pole,
# this will change the projected circle to an ellipse but the order is still the same
normal = c(1,12,-12)/17 # from a Pythagorean quadruple
}
Q = rotationshortest( normal, pole ) #; print( Q )
# compute xy, the z coordinate is very near 0 and ignored
xy = point %*% t( Q[1:2, ] ) #; print(xy) ; print( sqrt( rowSums(xy^2) ) )
theta = atan2( xy[ ,2], xy[ ,1] )
perm = order(theta)
return( perm )
}
# normal a non-zero 3-vector
#
# returns a 3x2 matrix where the columns complete normal to an orthogonal basis
# the 2 columns are unit vectors
# if normal is added as a 3rd column, the 3x3 matrix preserves orientation
# if normal is replaced by -normal, the columns of output matrix are swapped.
frame3x2fun <- function( normal, axischeck=FALSE )
{
ok = is.numeric(normal) && length(normal)==3 && 0<sum(abs(normal))
if( ! ok )
{
log_level( ERROR, "argument normal is invalid." )
return(NULL)
}
out = base::svd( normal, nu=3 )$u[ , 2:3 ] #; print( out )
test = crossproduct( out[ ,1], out[ ,2] )
if( sum(test*normal) < 0 )
{
# swap columns
out = out[ , 2L:1L ] #; cat( 'frame3x2fun(). columns swapped !\n' )
}
if( axischeck && isaxis( -out[ ,1] ) )
{
out[ ,1] = -out[ ,1]
out = out[ , 2L:1L ] # swap
}
else if( axischeck && isaxis( -out[ ,2] ) )
{
out[ ,2] = -out[ ,2]
out = out[ , 2L:1L ] # swap
}
if( FALSE )
{
# multiplication check
test = normal %*% out
if( 1.e-14 < max(abs(test)) )
{
log_level( ERROR, "frame3x2fun() failed orthogonal test = %g!", max(abs(test)) )
}
test = t(out) %*% out - diag(2)
if( 1.e-14 < max(abs(test)) )
{
log_level( ERROR, "frame3x2fun() failed product test = %g!", max(abs(test)) )
}
}
if( FALSE )
{
# orientation check
if( determinant( cbind(out,normal) )$sign <= 0 )
{
log_level( ERROR, "frame3x2fun() failed sign test. det = %g!", det( cbind(out,normal) ) )
}
}
return(out)
}
isaxis <- function( vec )
{
n = length(vec)
return( sum(vec==0)==n-1 && sum(vec==1)==1 )
}
# vec numeric vector
# skip integer coordinate to skip (ignore)
allequalskip <- function( vec, skip )
{
vec = vec[-skip]
return( all( vec==vec[1] ) )
}
gettime <- function()
{
return( microbenchmark::get_nanotime() * 1.e-9 )
}
createtimer <- function()
{
now = microbenchmark::get_nanotime() * 1.e-9
# using a list
return( list( created=now, now=now, elapsed=0, total=0 ) )
# using an array of doubles is actually slower !
#out = c(now,now,0,0)
#names(out) = c("created","now","elapsed","total")
#return( out )
}
updatetimer <- function( x, reset=TRUE )
{
now = microbenchmark::get_nanotime() * 1.e-9
out = x
out$elapsed = now - out$now
out$total = now - out$created
if( reset ) out$now = now
# using integers
#out[3L] = now - out[2L]
#out[4L] = now - out[1L]
#if( reset ) out[2L] = now
# using names
#out['elapsed'] = now - out['now']
#out['total'] = now - out['created']
#if( reset ) out['now'] = now
return(out)
}
# vertex 2n x m matrix with vertices of a convex polygon in the rows
#
# returns 4(n-1) x m matrix with tiling of the polygon into convex quadrangles
# If m=3, it is ready to pass to rgl::quad3d()
makequads <- function( vertex )
{
n = nrow(vertex) / 2L
ok = 2<=n && as.integer(n)==n
if( ! ok )
{
log_level( ERROR, "nrow(vertex)=%g is invalid.", nrow(vertex) )
return(NULL)
}
mat = matrix( c( 1:(n-1), 2:n, (2*n-1):(n+1), (2*n):(n+2) ), ncol=4 )
idx = as.integer( t(mat) ) #; print(idx)
out = vertex[ idx, ]
return(out)
}
# p a point in the n-cube, which we can think of as a transmittance spectrum
#
# returns the min number of transitions between 0 and 1 necessary to achieve such a p, including interpolation
# it always returns an even integer
#
# this is the fast C version
transitioncount <- function( p )
{
.Call( C_transitioncount, p )
}
# p a point in the n-cube, which we can think of as a transmittance spectrum
#
# returns the min number of transitions between 0 and 1 necessary to achieve such a p, including interpolation
# it always returns an even integer
#
# this is the slow R version
transitioncount_old <- function( p )
{
n = length(p)
if( n == 0 ) return(0L)
# find all runs of points in the interior of [0,1], in a periodic way
interior = 0<p & p<1
if( all(interior) )
# special case
return( as.integer( 2 * floor( (n+1)/2 ) ) )
mat = findRunsTRUE( interior, periodic=TRUE )
transitions = 0
if( 0 < nrow(mat) )
{
inext = c(2:n,1)
iprev = c(n,1:(n-1))
for( i in 1:nrow(mat) )
{
start = mat[i,1]
stop = mat[i,2]
m = stop - start + 1
if( m < 0 ) m = m + n
same = p[ iprev[start] ] == p[ inext[stop] ]
inc = ifelse( same, 2*floor( (m+1)/2 ), 2*floor( m/2 ) )
transitions = transitions + inc
}
}
# now remove all the interior coordinates
ppure = p[ ! interior ]
# add usual 0-1 transitions
transitions = transitions + sum( diff(ppure) != 0 )
# add wrap-around transition, if present
if( ppure[1] != ppure[ length(ppure) ] ) transitions = transitions + 1
return( as.integer(transitions) )
}
# this is only valid if 1 <= i < j <= n
PAIRINDEX <- function( i, j, n )
{
(i-1)*n - ((i)*(i+1))/2 + j
}
# subground increasing integer M-vector, and subvector of ground, NOT verified
# ground increasing integer N-vector, with M <= N
#
# each of these inputs has a matrix of all pairs, with standard ordering
# for subground there are M*(M-1)/2 pairs
# for ground there are N*(N-1)/2 pairs
#
# each pair for subground also appears in as a pair for ground
# The function returns an integer vector of length M*(M-1)/2
# giving the row in ground of each pair from subground
translateallpairs <- function( subground, ground )
{
# m = length(subground)
#subidxraw = .Call( C_allpairs, m )
#subidx = subground[ subidxraw ]
#dim(subidx) = dim(subidxraw)
subidx = allpairs( subground )
# subidx has ALL pairs from subground, in standard order
# each pair is also a pair from ground
# translate these to raw indexes in ground
idxfromground = idxfromgroundfun( ground )
idxraw = idxfromground[ subidx ]
dim(idxraw) = dim(subidx)
n = length(ground)
# idxraw has some pairs taken from 1:n
# find their position in the standard order of ALL pairs - N(N-1)/2 of them
out = .Call( C_pairindex, idxraw, n )
return( out )
}
# .vec1 and .vec2 non-zero vectors of the same dimension
#
angleBetween <- function( .vec1, .vec2, unitized=FALSE, eps=5.e-14 )
{
q = sum( .vec1*.vec2 )
if( ! unitized )
{
len1 = sqrt( sum(.vec1^2) )
len2 = sqrt( sum(.vec2^2) ) #; print( denom )
denom = len1 * len2
if( abs(denom) < eps ) return( NA_real_ )
q = q / denom #; print(q)
}
if( abs(q) < 0.99 )
{
# the usual case uses acos
out = acos(q)
}
else
{
# use asin instead
if( ! unitized )
{
.vec1 = .vec1 / len1
.vec2 = .vec2 / len2
}
if( q < 0 ) .vec2 = -.vec2
d = .vec1 - .vec2
d = sqrt( sum(d*d) )
out = 2 * asin( d/2 )
if( q < 0 ) out = pi - out
}
return(out)
}
# returns NULL unless ALL the values are valid integers
# if some are invalid, prints a warning message
intfromchar <- function( charvec )
{
if( is.null(charvec) || ! is.character(charvec) ) return(NULL)
bad = ! grepl( "[ ]*-?[0-9.]+[ ]*", charvec )
if( any(bad) )
{
log_level( WARN, "%d of %d values are invalid integers.", sum(bad), length(bad) )
return(NULL)
}
out = as.integer( charvec )
bad = (out != as.double(charvec))
bad[ is.na(bad) ] = TRUE
if( any(bad) )
{
log_level( WARN, "%d of %d values are invalid integers.", sum(bad), length(bad) )
return(NULL)
}
return( out )
}
################################# deadwood below ##########################
# A a numeric matrix
# ground integer vector labeling the columns of A
# conspec one of these:
# *) integer vector defining a subset of ground
# *) a list of integer vectors, all subsets of ground
# For the integer vector, just copy those columns to output.
# For a list, for each vector take the dominant direction of the corresponding columns,
# and copy that direction to output.
# returns a matrix with length(conspec) columns
condenseMatrix_old <- function( A, ground, conspec )
{
ok = is.double(A) && is.matrix(A)
if( ! ok )
{
return(NULL)
}
ok = length(ground) == ncol(A)
if( ! ok )
{
return(NULL)
}
# make inverse lookup vector
colfromgnd = integer( max(ground) )
colfromgnd[ ground ] = 1L:ncol(A)
if( is.integer(conspec) )
{
# this is the easy case
if( ! subset1(conspec,ground) )
{
return(NULL)
}
out = A[ , colfromgnd[conspec], drop=F ]
}
else if( is.list(conspec) )
{
if( ! subset1(unlist(conspec),ground))
{
return(NULL)
}
lenvec = lengths( conspec ) #sapply( conspec, length )
conspec1 = conspec
# change all the non-singleton lists to any valid single value
multiplemask = 2 <= lenvec
conspec1[ multiplemask ] = ground[1]
out = A[ , colfromgnd[ unlist(conspec1) ], drop=F ]
# replace all the multiple columns with sums
multiple = which( multiplemask )
for( k in multiple )
{
idx = colfromgnd[ conspec[[k]] ]
out[ ,k] = dominantDirection( A[ ,idx, drop=FALSE] )$dominant # .rowSums( A[ ,idx], nrow(A), length(idx) )
}
}
else
{
log_level( ERROR, "conspec is invalid." )
return(NULL)
}
colnames(out) = sapply( conspec, shortlabel )
return( out )
} | /scratch/gouwar.j/cran-all/cranData/zonohedra/R/utils.R |
#
# zonogon is a 2-dimensional zonotope
#
# implemented as a list with items:
# matroid the matroid for the zonogon, which includes the generating matrix, etc.
# center a 2-vector
# facet a data.frame with a row for each hyperplane in the simplified matroid, and in the same order
# and for each facet of the zonogon, and these columns:
# center the center of the edge of the centered zonogon
# normal outward-pointing unit normal
# beta equation of the slab is -beta <= <x,normal> <= beta. We always have beta > 0.
# facet0 integer vector of facet/hyperplane indexes that contain the point 0
# vertex a (2n)x2 matrix with vertex in each row, not centered
# tilingdata data.frame with a row for each pgram tile in the standard tiling, and these columns
# idxpair 2 indexes of the pgram
# center center of the pgram tile, relative to the center of the zonogon
# NOTE: If the zonogon is a pgram, tilingdata has only one row !
# The tiling has N pgrams meeting 0, where N is the number of generators in the simplified matroid.
# and there is only 1 pgram meeting the white point.
# zonogon constructor
#
# mat a numeric matrix with 1 row
# e0 threshold for a column vector to be considered 0
# e1 threshold for codirectionality
# ground ground set, an integer vector in increasing order and length(ground) = ncol(x)
zonogon <- function( mat, e0=0, e1=1.e-6, ground=NULL )
{
ok = is.matrix(mat) && is.numeric(mat) && nrow(mat)==2 && 2<=ncol(mat)
if( ! ok )
{
log_level( ERROR, "mat is invalid." )
return(NULL)
}
matroid = matroid( mat, e0=e0, e1=e1, ground=ground )
if( is.null(matroid) )
return(NULL)
out = list()
class( out ) = c( "zonogon", "zonotope", class(out) )
out$matroid = matroid
# get the simplified generators, and their ground set
matsimple = getsimplified(matroid)
matgen = getmatrix( matsimple )
gndgen = getground( matsimple )
# at this point we use the simplifying fact
# that the n generators in matgen and the n hyperplanes in matsimple
# are in the same order
n = ncol(matgen) # number of generators and hyperplanes
# make a data.frame with all generators and their opposites
facet = data.frame( idx=rep(1:n,2), sign=c(rep(1L,n),rep(-1L,n)) )
# compute all the outward pointing edge normals
normal = cbind( matgen[2, ], -matgen[1, ] )
# now unitize
normal = normalizeMatrix( normal, 1L )
facet$normal = rbind(normal,-normal)
# sort normals and their opposites in cclockwise order,
# starting with the smallest positive angle
theta = atan2( facet$normal[ ,2], facet$normal[ ,1] )
maskneg = theta<0
theta[ maskneg ] = theta[ maskneg ] + 2*pi
perm = order( theta )
# but only keep the first n rows
facet = facet[ perm[1:n], ]
# trace n edges in cclockwise order, initialize the 1st edge
# matrix with the center of each edge
center = matrix( NA_real_, n, 2 )
vertex = matrix( NA_real_, n, 2 )
# facet0 is the vector of all facet pairs that contain 0 and the "whitepoint"
# the length of facet0 is one of:
# 0 0 is in the interior of the zonogon
# 1 0 is in the interior of an edge of the zonogon (rare)
# 2 0 is a vertex of the zonogon
# This facet0 vector depends on whether we are considering the original generators,
# or the generators from the matrix of the simplified matroid.
# We compute it initially for the simplified generators, and then modify it later.
facet0 = integer(0)
# outward pointing normal for the first edge
normal = facet$normal[1, ]
# compute center of first edge
pcube = 0.5 * sign( as.double(normal %*% matgen) ) #; cat( "j=", facet$idx[1], " pcube=", pcube, '\n' )
# ensure that coordinate face$idx[1] is *exactly* zero
# variable j is carried into the following for() loop, and is updated during each iteration
j = facet$idx[1]
pcube[ j ] = 0
for( k in 1:n )
{
#j = facet$idx[k]
if( TRUE )
{
# test that pcube really maps to the center of an edge
idx0 = which( pcube==0 )
if( length(idx0) != 1 )
{
log_level( FATAL, "Internal Error. pcube does not have exactly one 0 value. k=%d.", k )
# cat( "pcube=", pcube, '\n' )
return(NULL)
}
# delete coord idx0 (which has value 0) and test that the others are +0.5 or -0.5
if( ! all( abs(pcube[-idx0]) == 0.5 ) )
{
log_level( FATAL, "Internal Error. pcube to edge is invalid. k=%d.", k )
# cat( "pcube=", pcube, '\n' )
return(NULL)
}
}
# map pcube to center[k, ]
center[k, ] = matgen %*% pcube
if( allequalskip( pcube, j ) )
{
# this edge has 0 as one of the endpoints
facet0 = c( facet0, j )
#cat( "k=", k, " pcube=", pcube, '\n' )
#cat( "facet0=", facet0, '\n' )
}
# advance from edge center to vertex of zonogon
pcube[j] = facet$sign[k] * 0.5 # all coords of pcube are now +1/2 or -1/2
if( TRUE )
{
# test that pcube really maps to a vertex of the zonogon
if( ! all( abs(pcube) == 0.5 ) )
{
log_level( FATAL, "Internal Error. pcube to vertex is invalid. k=%d.", k )
# cat( "pcube=", pcube, '\n' )
return(NULL)
}
}
# map pcube to vertex and save it
vertex[k, ] = matgen %*% pcube
if( k < n )
{
# advance j to the next edge
# advance pcube to the center of the next edge
j = facet$idx[k+1]
pcube[ j ] = 0 #; cat( "j=", j, " pcube=", pcube, '\n' )
}
}
if( TRUE && !( length(facet0) %in% c(0,2) ) )
{
log_level( FATAL, "Internal Error. length(facet0)=%d, which is not 0 or 2.", length(facet0) )
return(NULL)
}
facet$center = center
# put the facets in column index order, which matches the hyperplane index order
# this is currently assumed by section.zonogon
facet = facet[ order( facet$idx ), ]
# drop columns idx and sign since they are no longer needed
facet$idx = NULL
facet$sign = NULL
# calculate the n plane constants beta
# these plane constants are for the centered zonogon
facet$beta = rowSums( facet$normal * facet$center )
# since matgen[] has rank 2, 0 is in the interior of the _centered_ zonogon,
# and this implies that all n beta's are positive.
# verify this
betamin = min( facet$beta )
if( betamin <= 0 )
{
log_level( FATAL, "Internal Error. min(beta)=%g <= 0.", betamin )
return(NULL)
}
out$center = 0.5 * .rowSums( mat, nrow(mat), ncol(mat) )
out$facet = facet
# facet0 is now for the simplified generators.
# modify facet0[] for the original generators, by removing some of them.
# get the "mixed direction" generators,
# which are the generator minors that are non-zero
colidx = getmixed( matroid ) # column indexes of the simplified matrix, but pass the original matroid
if( 0<length(facet0) && 0<length(colidx) )
{
# keep only those hyperplanes in facet0 that contain all mixed generators, and remove all the rest
hyper = matsimple$hyperplane[ facet0 ]
# keep is a logical vector entries corresponding to those in facet0 and hyper
keep = .Call( C_issuperset, hyper, gndgen[colidx] )
#print( hyper )
#cat( "gndgen[colidx]=", gndgen[colidx], '\n' )
#cat( "keep= ", keep, '\n' )
#cat( "changing facet0 from ", facet0, " to ", facet0[keep], '\n' )
facet0 = facet0[ keep ]
if( FALSE )
{
if( length(colidx) == 1 )
{
# colidx is a single facet; is colidx in facet0 ?
k = match( colidx, facet0 )
if( ! is.na(colidx) )
# keep colidx and throw away the other one
facet0 = facet0[k]
else
# facet0 should be empty, for the original generators.
facet0 = integer(0)
}
else
# there are 2 or more "mixed direction" generators
# so 0 is in the interior of the zonogon, and facet0 should be empty.
facet0 = integer(0)
}
}
out$facet0 = facet0
# add vertices by symmetry
vertex = rbind( vertex, -vertex )
# translated to original and non-centered zonogon
out$vertex = vertex + matrix( out$center, nrow(vertex), 2, byrow=TRUE ) # back to original coords
# add data for the standard tiling
tilingdata = NULL
if( n == 2 )
{
# trivial tiling, only 1 tile
tilingdata = data.frame( row.names=1 )
tilingdata$idxpair = matrix( 1:2, nrow=1, ncol=2 )
tilingdata$center = matrix( 0, nrow=1, ncol=2 )
}
else
{
# 3 or more generators, n*(n-1)/2 tiles
zono = liftedzonohedron( matgen, ground=gndgen )
if( ! is.null(zono) )
{
matroidsimp3 = getsimplified(zono$matroid)
if( TRUE && ! all( lengths(matroidsimp3$hyperplane)==2 ) )
{
log_level( FATAL, "Internal Error. Some hyperplanes of lifted zonohedron are non-trivial." )
return(NULL)
}
# make lookup table from ground to column index
idxfromground = idxfromgroundfun( gndgen )
pgrams = nrow(zono$facet) # # of tiles = n*(n-1)/2
idxpair = matrix( NA_integer_, nrow=pgrams, ncol=2 )
for( k in 1:pgrams )
{
gen2 = matroidsimp3$hyperplane[[k]]
idxpair[k, ] = idxfromground[ gen2 ]
# take the center of the pgram on the zonohedron facet, and drop the Z # and the zonogon center
# centerpgram[k, ] = zono$facet$center[k,1:2] #+ out$center
}
centerpgram = zono$facet$center[ ,1:2]
if( TRUE )
{
# verify the correct order
perm = order( idxpair[ ,1], idxpair[ ,2] )
if( ! all( perm == 1:length(perm) ) )
{
log_level( WARN, "idxpair[,] is not in the correct order !" )
return(NULL)
}
}
tilingdata = data.frame( row.names=1:pgrams )
tilingdata$idxpair = idxpair
tilingdata$center = centerpgram
}
}
out$tilingdata = tilingdata
return(out)
}
# n a positive integer, so the step size on the circle is 2*pi/n
# m number of points to compute, starting at 1
polarzonogon <- function( n, m=n, ground=NULL )
{
if( is.null(ground) )
ground = 1L:m
else if( length(ground) != m )
{
log_level( ERROR, "ground is invalid, because the length is incorrect." )
return(NULL)
}
if( n < 3 )
{
log_level( ERROR, "n=%d is invalid.", n )
return(NULL)
}
if( m<2 || n<m )
{
log_level( ERROR, "m=%d is invalid.", m )
return(NULL)
}
u = (0:(m-1)) / n
mat = t( tocircle(2*pi*u) )
return( zonogon(mat,ground=ground) )
}
if( FALSE )
{
# x a zonogon object
# p an Mx2 matrix, etc.
#
# value see inside_zonotope()
inside.zonogon <- function( x, p )
{
p = prepareNxM( p, 2 )
if( is.null(p) ) return(NULL)
# translate p to the centered zonogon
gcentered = p - matrix( x$center, nrow(p), 2, byrow=TRUE ) #; print(gcentered)
hg = tcrossprod( x$facet$normal, gcentered ) #; print( str(hg) )
out = inside_zonotope( x, p, hg )
return( out )
}
# x a zonogon object
# direction Mx2 matrix, with the M directions in the rows, direction (0,0) is invalid
# tol tolerance for argmax, being in the same affine subspace
# value: see support_zonotope()
support.zonogon <- function( x, direction, tol=5.e-15 )
{
return( support_zonotope(x,direction,tol) )
}
}
# x a zonogon object
# base a numeric vector of length 2, the basepoint of all the rays
# base must be in the interior of x,
# or if x is non-negative, base can also be the black or white point on the boundary(x)
# direction an Nx2 matrix with directions in the rows
#
# value a dataframe with columns
# base given basepoint of the ray (all the same)
# direction given direction of the ray
# facetidx of the facet (edge) where ray exits the zonogon
# sign +1 if beta, and -1 if -beta
# tmax ray parameter of intersection with facet
# point point of intersection with the intersection with facet
# timetrace computation time (sec)
raytrace.zonogon <- function( x, base, direction, plot=FALSE, ... )
{
ok = is.numeric(base) && length(base)==2 && all( is.finite(base) )
if( ! ok )
{
log_level( ERROR, "base is invalid. It must be a numeric vector of length 2, and all entries finite." )
return(NULL)
}
direction = prepareNxM( direction, 2 )
if( is.null(direction) ) return(NULL)
# translate base to the centered zonogon
base = as.numeric(base)
gcentered = base - x$center #; print(gcentered)
dim(base) = c(1,2)
dim(gcentered) = c(1,2)
hg = tcrossprod( x$facet$normal, gcentered ) #; print( str(hg) )
# hg = as.numeric( x$facet$normal %*% gcentered ) #; print( str(hg) )
#df = inside_zonotope( x, base, hg )
#if( ! df$inside )
# {
# log_level( ERROR, "point base=(%g,%g) is not in the interior of the zonogon.",
# base[1], base[2] )
# return(NULL)
# }
dim(gcentered) = NULL
# test whether base is black or white point, no tolerance here
blackpt = ifelse( is_salient(x), all(gcentered == -x$center), FALSE )
whitept = ifelse( is_salient(x), all(gcentered == x$center), FALSE )
if( blackpt || whitept )
{
# get the normals for all facets that meet 0
# normal0 is Mx2 where M is the number of these facets
normal0 = x$facet$normal[ x$facet0, , drop=FALSE ]
# get a vector that points from base into the interior
if( blackpt )
interiorvec = x$center
else
interiorvec = -(x$center)
test = normal0 %*% interiorvec
dim(test) = NULL
# in the next line, sign(test) is replicated to all columns of normal0
# normal0 will be used in the for() loop below
normal0 = sign(test) * normal0
if( FALSE )
{
cat( "before test=", test, "\n" )
test = normal0 %*% interiorvec
cat( "after test=", test, "\n" )
}
}
else
{
# not blackpt or whitept, so verify that base is in the *interior* of x
df = inside_zonotope( x, base, hg )
if( 0 <= df$distance )
{
log_level( ERROR, "point base=(%g,%g) is not in the interior of the zonogon. distance=%g >= 0",
base[1], base[2], df$distance )
return(NULL)
}
}
dim(base) = NULL
n = nrow(direction)
tmax = rep(NA_real_,n)
idx = rep(NA_integer_,n)
sign = rep(NA_integer_,n)
point = matrix(NA_real_,n,2)
timetrace = rep(NA_real_,n)
for( k in 1:n )
{
time_start = gettime()
v = direction[k, ]
if( any( is.na(v) ) ) next
if( all( v==0 ) ) next # 0-vector
if( blackpt || whitept )
{
interior = all( 0 < normal0 %*% v )
if( ! interior )
# the ray starts on the boundary and does *not* enter the interior, so give up
next
}
hv = x$facet$normal %*% v
numerator = x$facet$beta - sign(hv)*hg
tvec = numerator / abs(hv)
tvec[ ! is.finite(tvec) ] = Inf
j = which.min( tvec ) # this ignores ties and Infs
if( tvec[j] <= 0 ) next # failed to intersect properly
tmax[k] = tvec[j] # tmax[k] is not negative
idx[k] = j # x$facet$idx[j]
sign[k] = as.integer( sign(hv[j]) )
optcentered = gcentered + tmax[k] * v #; print( optcentered )
point[k, ] = optcentered + x$center
timetrace[k] = gettime() - time_start
}
rnames = rownames(direction)
if( is.null(rnames) || anyDuplicated(rnames) ) rnames = 1:n
out = data.frame( row.names=rnames )
out$base = matrix( base, n, 2, byrow=TRUE ) # replicate base to all rows
out$direction = direction
out$facetidx = idx
out$sign = sign
out$tmax = tmax
out$point = point
out$timetrace = timetrace
cnames = colnames(base)
if( is.null(cnames) ) cnames = colnames(direction)
colnames(out$point) = cnames
if( plot )
{
if( grDevices::dev.cur() == 1 )
{
log_level( WARN, "Cannot add rays to plot, because there is no plotting window open." )
}
else
{
graphics::segments( out$base[ ,1], out$base[ ,2], point[ ,1], point[ ,2], col='red', lty=1 )
graphics::points( point[ ,1], point[ ,2], col='red', pch=20 )
}
}
return( out )
}
# section() compute intersection of zonogon and line(s)
#
# x a zonogon object
# normal a non-zero numeric vector of length 2, the normal of all the lines
# beta a vector of line-constants. The equation of line k is: <x,normal> = beta[k]
#
# value a data.frame with length(beta) rows. And these columns:
# normal the given normal and the same in each row
# beta given line constant of the line
# boundary1 the 1st point, or NA
# boundary2 the 2nd point, or NA
#
# If there are 2 boundary points, then boundary1 is on the left and boundary2 on the right,
# assuming that normal is "up".
# Another way: if normal is "north" then b1 is on "west" and b2 on the "east".
section.zonogon <- function( x, normal, beta, tol=1.e-10, plot=FALSE, ... )
{
ok = is.numeric(normal) && length(normal)==2 && all( is.finite(normal) ) && any( normal!=0 )
if( ! ok )
{
log_level( ERROR, "normal is invalid. It must be a non-zero numeric vector of length 2, and all entries finite." )
return(NULL)
}
ok = is.numeric(beta) && 0<length(beta) && all( is.finite(beta) )
if( ! ok )
{
log_level( ERROR, "beta is invalid. It must be a numeric vector of positive length, and all entries finite." )
return(NULL)
}
normal = as.numeric(normal)
W = t( getmatrix( getsimplified(x$matroid) ) )
# compute functional in R^n
functional = as.numeric( W %*% normal ) #; print( functional )
# find vertex where functional is maximized
vertex_max = 0.5 * sign( functional ) #; print( vertex_max ) # this a vertex of the n-cube, translated by -1/2
betamax = sum( functional * vertex_max ) # the maximum of <x,normal> for x in the zonotope
betamin = -betamax # by symmetry
# the facet centers are much easier to work with when the antipodal ones are added to the originals
center = rbind( x$facet$center, -x$facet$center )
W2 = rbind( W, W )
functional = rep( functional, 2 )
delta = 0.5 * abs(functional)
cn = center %*% normal
# compute range of normal over each edge/facet
cnpos = cn + delta
cnneg = cn - delta
# translate beta to the centered zonogon
betacentered = as.numeric(beta) - sum( x$center * normal ) #; print(betacentered)
n = length(beta)
#out = vector( n, mode='list' )
#names(out) = sprintf( "normal=%g,%g. beta=%g", normal[1], normal[2], beta )
rnames = names(beta)
if( is.null(rnames) || anyDuplicated(rnames)!=0 ) rnames = 1:n
out = data.frame( row.names=rnames )
out$normal = matrix( normal, nrow=n, ncol=2, byrow=TRUE )
out$beta = as.numeric(beta)
boundary1 = matrix( NA_real_, nrow=n, ncol=2 )
boundary2 = matrix( NA_real_, nrow=n, ncol=2 )
for( k in 1:n )
{
beta_k = betacentered[k]
if( beta_k < betamin-tol || betamax+tol < beta_k )
{
# line does not intersect the zonogon. there is no section
next
}
if( abs(beta_k - betamax) < tol )
{
# special case - only one point of intersection
boundary1[k, ] = vertex_max %*% W + x$center
next
}
if( abs(beta_k - betamin) < tol )
{
# special case - only one point of intersection
boundary1[k, ] = -vertex_max %*% W + x$center
next
}
idx = which( cnneg <= beta_k & beta_k < cnpos ) #; print( idx )
count = length(idx)
ok = count %in% 1:2
if( ! ok )
{
# should not happen
log_level( WARN, "count=%d unexpected. count should be 1 or 2.", count )
next
}
section = matrix( NA_real_, nrow=count, ncol=2 )
for( i in 1:count )
{
j = idx[i]
alpha = (beta_k - cn[j])/ functional[j] #; print(alpha)
section[i, ] = center[j, ] + alpha * W2[j, ] + x$center # translate from centered back to original
}
if( count == 1 )
boundary1[k, ] = section[1, ]
else
{
p1 = section[1, ]
p2 = section[2, ]
# put p1 on "west" and p2 on "east"
dif = p2 - p1 # dif must be orthogonal to normal
test = dif[1]*normal[2] - dif[2]*normal[1]
if( test < 0 )
{
# swap p1 and p2
p=p1 ; p1=p2 ; p2=p
}
boundary1[k, ] = p1
boundary2[k, ] = p2
}
#out[[k]] = list()
#out[[k]]$beta = beta[k]
#out[[k]]$section = section
#colnames(out[[k]]$section) = names(normal)
}
out$boundary1 = boundary1
out$boundary2 = boundary2
if( plot )
{
if( grDevices::dev.cur() == 1 )
{
log_level( WARN, "Cannot add section to plot, because there is no plotting window open." )
}
else
{
graphics::segments( boundary1[ ,1], boundary1[ ,2], boundary2[ ,1], boundary2[ ,2], col='red', lty=2 )
graphics::points( boundary1[ ,1], boundary1[ ,2], col='red', pch=1 )
graphics::points( boundary2[ ,1], boundary2[ ,2], col='red', pch=1 )
}
}
return( out )
}
# x a zonogon
# z Nx2 matrix of points inside the zonogon
# tol tolerance for being inside the zonogon,
# a very small positive number allows the points to be slightly outside
# plot if TRUE then plot the points on an existing plot of the zonogon
#
# returns a data.frame with a row for each point z[i, ], and these columns
# hyperidx index of parallelogram that contains z[i, ]
# hyper Nx2 matrix of the generators of the parallelogram
# pcube NxM matrix of points in the original cube that maps to z[i, ]
# where M is the number of generators of the zonogon
invert.zonogon <- function( x, z, tol=0, plot=FALSE, ... )
{
z = prepareNxM( z, 2 )
if( is.null(z) ) return(NULL)
df = inside( x, z )
if( is.null(df) ) return(NULL)
# print( df )
inside = df$distance < tol
if( any( !inside ) )
{
log_level( WARN, "%d of %d points are not inside the zonogon. Try increasing tol=%g.",
sum( !inside ), length(inside), tol )
}
n = nrow(z)
matroidsimp = getsimplified(x$matroid)
matsimp = getmatrix( matroidsimp )
gndsimp = getground( matroidsimp )
centerdiff = 0.5*rowSums(matsimp) - x$center
numgen = ncol(matsimp)
if( 2 < numgen )
{
# the usual nontrivial case
zonoh = liftedzonohedron( matsimp, ground=gndsimp )
if( is.null(zonoh) ) return(FALSE)
matroidsimp3 = getsimplified(zonoh$matroid)
matrixsimp3 = getmatrix(matroidsimp3)
idxfromground = idxfromgroundfun( gndsimp )
# translate z to the centered zonogon
pcentered = .Call( C_sumMatVec, z, -x$center, 1L )
hyperidx = rep( NA_integer_, n )
hyper = matrix( NA_integer_, nrow=n, ncol=2 )
pcube = matrix( NA_real_, nrow=n, ncol=numgen )
colnames(pcube) = gndsimp
for( k in 1:n )
{
if( ! inside[k] ) next
dot2 = as.double( zonoh$facet$normal[ ,1:2] %*% pcentered[k, ] )
znormal = zonoh$facet$normal[ ,3]
tmin = (zonoh$facet$beta - dot2) / znormal
imin = which.max( tmin )
zmin = tmin[imin]
# tmax = (-zonoh$facet$beta - dot2) / znormal
# zmax = min(tmax)
# if( zmax < zmin ) next # z is not inside the zonogon
hyperidx[k] = imin
hyper[k, ] = matroidsimp3$hyperplane[[imin]]
facetnormal = zonoh$facet$normal[imin, ]
pc = (sign( facetnormal %*% matrixsimp3 ) + 1) / 2
dim(pc) = NULL
# force the generators of this parallelogram to 0
colidx = idxfromground[ hyper[k, ] ]
pc[colidx] = 0
# now map pc to the zonogon, giving the origin of the parallelogram
org = as.double(matsimp %*% pc) - centerdiff #; cat( "org=", org, '\n' )
mat2x2 = matsimp[ , colidx ]
pc[colidx] = as.double( solve( mat2x2, z[k, ] - org) )
pcube[k, ] = pc
}
}
else
{
# numgen == 2 is a trivial special case
# translate z to the simplified parallelogram
psimple = .Call( C_sumMatVec, z, centerdiff, 1L )
pcube = t( solve( matsimp, t(psimple) ) )
#cat( "psimple=", as.double(psimple), '\n' )
#cat( "centerdiff=", centerdiff, '\n' )
hyperidx = 1L
hyper = matrix( gndsimp, nrow=nrow(pcube), ncol=2, byrow=T )
}
# clamp pcube to [0,1], to eliminate very slightly outside points, such as -1.e-12
pcube = pmin( pmax( pcube, 0), 1 )
if( plot )
{
if( grDevices::dev.cur() == 1 )
{
log_level( WARN, "Cannot add points to plot, because there is no plotting window open." )
}
else
{
points( z[ ,1], z[ ,2], col='red', pch=4 )
}
}
# lift to the original cube
# since pcube[,] has already been clamped, we can leave tol=0
pcube = invertcubepoints( x, pcube )
if( is.null(pcube) ) return(NULL)
if( FALSE )
{
# test the inversion
matorig = getmatrix( x$matroid )
delta = pcube %*% t(matorig) - z
#cat( "range(delta)=", range(delta), '\n' )
#print( t(matorig) )
#print( delta )
if( any( 5.e-15 < abs(delta) ) )
{
log_level( WARN, "Inversion test failed. max(delta)=%g", max(abs(delta)) )
}
}
rnames = rownames(z)
if( is.null(rnames) || anyDuplicated(rnames)!=0 ) rnames = 1:n
out = data.frame( row.names=rnames )
out$z = z
out$pcube = pcube
out$hyper = hyper
out$hyperidx = hyperidx
return( out )
}
print.zonogon <- function( x, ... )
{
cat( "zonogon:\n" )
pairs = nrow(x$facet)
cat( "number of facets: ", 2*pairs, ' [', pairs, "antipodal facet-pairs]\n" )
#pairs = length( x$matroid$multiple )
#cat( "facets with multiple generators: ", 2*pairs, '\n' )
gndsimp = getground( getsimplified(x$matroid) )
idx = getmixed(x$matroid) #;which( 0 < rowSums( abs(x$matroid$multiplesupp$minor) ) )
gens = length(idx)
cat( "generators with mixed-directions: ", gens, ' {', gndsimp[idx], '}\n' )
cat( "center: ", x$center, '\n' )
cat( "facets that contain 0: (", length(x$facet0), " facets) {", paste(x$facet0,collapse=' '), "}\n" ) #, sep=''
cat( "pointed: ", is_pointed(x), '\n' )
cat( "salient: ", is_salient(x), '\n' )
res = getmetrics(x)
cat( "perimeter: ", res$perim, '\n' )
cat( "area: ", res$area, '\n' )
cat( '\n' )
cat( "matroid:\n" )
print( x$matroid )
return( invisible(TRUE) )
}
plot.zonogon <- function( x, orientation=TRUE, normals=FALSE, elabels=FALSE,
tiling=FALSE, tlabels=FALSE,
trans2=FALSE, trans2type='both', ... )
{
xlim = range( x$vertex[ ,1] )
ylim = range( x$vertex[ ,2] )
if( normals )
{
xlim = xlim + c(-1,1)
ylim = ylim + c(-1,1)
}
else if( elabels )
{
xlim = xlim + 0.1*c(-1,1)
ylim = ylim + 0.1*c(-1,1)
}
plot( xlim, ylim, type='n', las=1, asp=1, lab=c(10,10,7), xlab='x', ylab='y' )
grid( lty=1 )
abline( h=0, v=0 )
col = ifelse( tiling, NA, 'white' )
border = ifelse( orientation || tiling, NA, 'black' )
polygon( x$vertex[ ,1], x$vertex[ ,2], col=col, border=border )
n = nrow(x$facet)
matroidsimp = getsimplified(x$matroid)
matsimp = getmatrix( matroidsimp )
gndsimp = getground( matroidsimp )
numgen = length(gndsimp)
if( elabels )
{
cex = 0.6
h = strheight( "1", cex=cex )
head = x$facet$center + h * x$facet$normal
head = .Call( C_sumMatVec, rbind( head, -head ), x$center, 1L )
text( head[ ,1], head[ ,2], as.character(rep(gndsimp,2)), cex=cex, col='black' )
}
if( tiling && ! is.null(x$tilingdata) )
{
# draw each parallelogram as polygon
# quadmat = matrix( 0, nrow=4, ncol=2 )
edgecoeff = matrix( c( -0.5,-0.5, -0.5,0.5, 0.5,0.5, 0.5,-0.5), 2, 4 ) # 2x4
for( k in 1:nrow(x$tilingdata) )
{
col2 = x$tilingdata$idxpair[k, ]
edge = matsimp[ , col2 ] # 2x2
#quadmat[1, ] = -0.5 * edge[ , 1] - 0.5*edge[ , 2]
#quadmat[2, ] = -0.5 * edge[ , 1] + 0.5*edge[ , 2]
#quadmat[3, ] = 0.5 * edge[ , 1] + 0.5*edge[ , 2]
#quadmat[4, ] = 0.5 * edge[ , 1] - 0.5*edge[ , 2]
quadmat = edge %*% edgecoeff
# add the center of the pgram relative to the zonogon center, and the zonogon center
centerpgram = x$tilingdata$center[k, ] + x$center
polygon( quadmat[1, ] + centerpgram[1], quadmat[2, ] + centerpgram[2], border='red' )
if( tlabels )
{
gen2 = gndsimp[ col2 ]
lab = sprintf( "%d,%d", gen2[1], gen2[2] )
text( centerpgram[1], centerpgram[2], lab, col='red', cex=0.5 )
}
}
}
if( is.logical(trans2) && trans2 ) trans2 = c(0L,n)
if( length(trans2) == 2 )
{
subcomplex = trans2subcomplex( n, trans2, trans2type )
if( is.null(subcomplex) ) return(FALSE)
vertexcube = vertexfromcode( n, subcomplex$vertex[ ,1], subcomplex$vertex[ ,2] )
vertexplane = vertexcube %*% t(matsimp)
#centermat = matrix( x$center, nrow(vertexplane), ncol=2, byrow=TRUE )
xy1 = vertexplane[ subcomplex$edge[ ,1], ] #+ centermat
xy2 = vertexplane[ subcomplex$edge[ ,2], ] #+ centermat
thecolor = 'blue'
segments( xy1[ ,1]+x$center[1], xy1[ ,2]+x$center[2], xy2[ ,1]+x$center[1], xy2[ ,2]+x$center[2], col=thecolor )
wp = 2 * x$center
points( c(0,wp[1]), c(0,wp[2]), pch=20, cex=1.5, col=thecolor )
}
if( orientation )
{
# draw arrows showing orientation of the generators
matgen = getmatrix( x$matroid ) # generators in the columns
if( is_simple(x$matroid) )
{
for( k in 1:n )
{
#idx = x$facet$idx[k] # raw column index
# idxgnd = x$facet$idxgnd[k] # ground set index
gen = matgen[ ,k]
center = x$facet$center[k, ]
for( s in c(1,-1) )
{
p0 = s*center - gen/2 + x$center
p1 = s*center + gen/2 + x$center
arrows( p0[1], p0[2], p1[1], p1[2], angle=15 )
points( p0[1], p0[2], pch=20 )
}
}
}
else
{
# make lookup table from ground to column index
#idxfromground = integer( max(x$matroid$ground) )
#idxfromground[ x$matroid$ground ] = 1:ncol(matgen)
#ground = getground( getsimplified(x$matroid) )
for( k in 1:n )
{
idxgnd = gndsimp[k] # ground set integer
idxgrp = getmultipleindex( x$matroid, idxgnd ) #; print( idxgrp )
dominant = matsimp[ , k ]
if( 0 < idxgrp )
{
minor = x$matroid$multiplesupp$minor[ idxgrp, ]
# res = dominantDirection( matgen[ , idxfromground[idxgrp], drop=FALSE ] )
#print( res )
# in this quotient, any vector norm will do, so use L^inf
beta = max(abs(minor)) / max(abs(dominant))
}
else
beta = 0
# code==2 then arrowhead on p1 end
# code==3 then arrowhead on both ends
code = ifelse( 0<beta && beta<1, 3, 2 )
center = x$facet$center[k, ]
for( s in c(1,-1) )
{
p0 = s*center - dominant/2 + x$center
p1 = s*center + dominant/2 + x$center
arrows( p0[1], p0[2], p1[1], p1[2], angle=15, code=code )
# a point marking origin of both major and minor
q = (1-beta)*p0 + beta*p1
points( q[1], q[2], pch=20 )
}
}
}
}
if( normals )
{
tail = x$facet$center
head = x$facet$center + x$facet$normal
tail = .Call( C_sumMatVec, rbind( tail, -tail ), x$center, 1L )
head = .Call( C_sumMatVec, rbind( head, -head ), x$center, 1L )
arrows( tail[ ,1], tail[ ,2], head[ ,1], head[ ,2], angle=15 )
points( tail[ ,1], tail[ ,2], cex=.5, pch=21, bg='white' )
}
points( x$vertex[ ,1], x$vertex[ ,2], col='black', pch=20 )
# plot the center
points( x$center[1], x$center[2], cex=1, pch=21, bg='white' )
main = sprintf( "zonogon with %d generators and %d facets\n center=(%g,%g)",
length(getground(x$matroid)), 2L*n,
x$center[1], x$center[2] )
if( ! is.null( attr(x,"sphering") ) )
main = paste( main, " [spherized]" )
title( main=main, cex.main=1 )
return( invisible(TRUE) )
}
if( FALSE )
{
# methods taken from:
# Optimal Whitening and Decorrelation
# Agnan Kessy1, Alex Lewin, and Korbinian Strimmer (2016)
#
# returns a new zonogon, as spherical as possible
spherize.zonogon <- function( x, method="ZCA", ... )
{
return( spherize_zonotope( x, method=method ) )
}
# x a zonogon whose matroid is simple
#
# replace each generator g by the pair -g/2 , +g/2
symmetrize.zonogon <- function( x )
{
if( ! is_simple(x$matroid) )
{
log_level( ERROR, "matroid is not simple" )
return(NULL)
}
matgen = getmatrix(x$matroid)
matgen = cbind( -matgen/2, matgen/2 )
gndgen = getground(x$matroid)
gndgen = c( gndgen, gndgen[ length(gndgen) ] + gndgen )
out = zonogon( matgen, e0=0, e1=0, ground=gndgen )
return(out)
}
}
# x a zonogon
# W a 2x2 invertible matrix. Put matrix on the left, and vector on the right.
lintransform.zonogon <- function( x, W )
{
if( length(W) == 1 )
W = W * diag(2)
ok = all( dim(W) == c(2,2) )
if( ! ok )
{
log_level( ERROR, "argument W is invalid." )
return(NULL)
}
# verify W is OK
res = try( solve(W), silent=TRUE )
if( class(res)[1] == "try-error" )
{
log_level( ERROR, "matrix W is not invertible." )
return(NULL)
}
Winv = res
# just copy from x to out, and then make selective changes !
out = x
out$matroid = lintransform( x$matroid, W )
#out$matroid$matrix = W %*% x$matroid$matrix
#if( ! is.null(out$matroid$simplified) )
# out$matroid$simplified$matrix = W %*% x$matroid$simplified$matrix
out$center = as.double( x$center %*% t(W) )
out$facet$center = x$facet$center %*% t(W)
#out$facet$gen = x$facet$gen %*% t(W)
# compute all the outward pointing edge normals
normal = x$facet$normal %*% Winv
# now unitize
normal = normalizeMatrix( normal, 1L )
out$facet$normal = normal
# calculate the n plane constants beta
# these plane constants are for the centered zonogon
out$facet$beta = .rowSums( normal * out$facet$center, nrow(normal), ncol(normal) )
out$vertex = x$vertex %*% t(W)
out$tilingdata$center = x$tilingdata$center %*% t(W)
attr( out, "lintransform" ) = W
return( out )
}
if( FALSE )
{
is_pointed.zonogon <- function( x )
{
return( length(x$facet0) == 2 )
}
is_salient.zonogon <- function( x )
{
return( 0 < length(x$facet0) )
}
}
getmetrics.zonogon <- function( x )
{
matgen = getmatrix( getsimplified(x$matroid) )
lengen = sqrt( colSums( matgen^2 ) )
out = list()
out$vertices = 2L*ncol(matgen)
out$perim = 2*sum(lengen)
out$area = sum(lengen * x$facet$beta)
return( out )
}
################### deadwood below ###########################
if( FALSE )
{
minkowskisum.zonogon <- function( zono1, zono2, e0=0, e1=1.e-6, ground=NULL, ... )
{
if( ! inherits(zono2,"zonogon") )
{
log_level( ERROR, "2nd argument zono2 is invalid." )
return(NULL)
}
# get the 2 matrices and cbind them
mat1 = zono1$matroid$matrix
mat2 = zono2$matroid$matrix
mat = cbind(mat1,mat2)
#gnd1 = getground( zono1$matroid )
#gnd2 = getground( zono2$matroid )
#gnd = c( gnd1, gnd1[length(gnd1)] + gnd2 )
out = zonogon( mat, e0=e0, e1=e1, ground=ground )
return( out )
}
'%+%.zonogon' <- function(zono1,zono2)
{
return( minkowskisum( zono1, zono2 ) )
}
}
if( FALSE )
{
# get the simplified generators, and their ground set
matgen = getmatrix( getsimplified(x$matroid) )
#gndgen = getground( getsimplified(x$matroid) )
# get the column indexes of generators that "meet" blackpt or whitept
#facet0idx = match( x$facet0, gndgen )
facet0idx = x$facet0
matsub = matgen[ , facet0idx ]
if( length(facet0idx) == 2 )
{
# testmat is 2x2
testmat = solve( matsub )
if( whitept ) testmat = -testmat
}
else if( length(facet0idx) == 1 )
{
# testmat is 1x2. It is the inward pointing normal on boundary.
testmat = matrix( c( matsub[2], -matsub[1] ), nrow=1 )
if( blackpt )
test = testmat %*% (-x$center)
else
test = testmat %*% x$center
if( 0 < test ) testmat = -testmat
}
# cat( "testmat:\n" ) ; print( testmat )
}
if( FALSE )
{
zono = liftedzonohedron( matsimp, ground=gndsimp )
if( ! is.null(zono) )
{
# draw each parallelogram as polygon
matroidsimp3 = getsimplified(zono$matroid)
matsimp3 = getmatrix( matroidsimp3 )
# make lookup table from ground to column index
idxfromground = integer( gndsimp[numgen] )
idxfromground[ gndsimp ] = 1:numgen
quadmat = matrix( 0, nrow=4, ncol=2 )
for( k in 1:nrow(zono$facet) )
{
gen2 = matroidsimp3$hyperplane[[k]]
col2 = idxfromground[ gen2 ]
edge = matsimp3[ 1:2, col2 ]
quadmat[1, ] = -0.5 * edge[ , 1] - 0.5*edge[ , 2]
quadmat[2, ] = -0.5 * edge[ , 1] + 0.5*edge[ , 2]
quadmat[3, ] = 0.5 * edge[ , 1] + 0.5*edge[ , 2]
quadmat[4, ] = 0.5 * edge[ , 1] - 0.5*edge[ , 2]
# add the center of the facet (drop the Z) and the zonogon center
center = zono$facet$center[k,1:2] + x$center
polygon( quadmat[ ,1] + center[1], quadmat[ ,2] + center[2], border='red' )
if( tlabels )
{
lab = sprintf( "%d,%d", gen2[1], gen2[2] )
text( center[1], center[2], lab, col='red', cex=0.5 )
}
}
}
} | /scratch/gouwar.j/cran-all/cranData/zonohedra/R/zonogon.R |
#
# zonohedron is a 3-dimensional zonotope
#
# implemented as a list with items:
# matroid the matroid for it, which includes the generating matrix, etc.
# center a 3-vector
# facet a data.frame with a row for each facet-pair of the zonohedron,
# but data for only one of the facets is stored in this data.frame,
# and the antipodal facet is derived from this one.
# The length and order of facet[] is the same as the order of the hyperplane list
# in the (simplified) matroid.
# The data.frame has these columns:
# center the center of the facet, in the centered zonohedron, the other facet is antipodal (opposite sign).
# normal outward-pointing unit normal, the antipodal facet has the opposite normal
# beta equation of the slab is -beta <= <x,normal> <= beta. We always have beta>0.
# sign +1 or -1. It's the difference between the facet normal and the crossproduct coming from the matroid hyperplane.
# facet0 indexes (from facet) of those facets that contain 0
# beltlist the i'th entry in this list is 1/2 of the belt of the i'th generator, as a vector of facet indexes
# frame3x2 list of 3x2 matrices for the non-trivial facets. Index is the same as simplify(x$matroid)$hyperplane
# zonogon list of zonogons for the non-trivial facets. frame3x2 maps the facet to the zonogon in the plane.
# signtile list of integer vectors, all +1 or -1, for the non-trivial facets.
# It's the difference between the crossprod for the pgram and the facet normal
# zonoseg list of zonosegs for the multiple groups. Only present if there are multiple groups
# zonohedron constructor
#
# mat a numeric matrix with 3 rows
# e0 threshold for a column vector to be considered 0, used when nrow(x) >= 1
# e1 threshold for codirectionality, used when nrow(x) >= 2
# e2 threshold for hyperplane normals, used when nrow(x) == 3
# ground ground set, an integer vector in increasing order and length(ground) = ncol(x)
zonohedron <- function( mat, e0=0, e1=1.e-6, e2=1.e-10, ground=NULL )
{
timermain = createtimer()
ok = is.matrix(mat) && is.numeric(mat) && nrow(mat)==3 && 3<=ncol(mat)
if( ! ok )
{
log_level( ERROR, "mat is invalid." )
return(NULL)
}
matroid = matroid( mat, e0=e0, e1=e1, e2=e2, ground=ground )
if( is.null(matroid) )
return(NULL)
timermain = updatetimer(timermain)
timematroid = timermain$elapsed
out = list()
class(out) = c( "zonohedron", "zonotope", class(out) )
# copy non-special attributes from matrix to matroid
for( a in names(attributes(mat)) )
{
if( ! ( a %in% c('class','dim','dimnames','names') ) ) attr(out,a) = attr(mat,a)
}
out$matroid = matroid
out$center = 0.5 * rowSums( getmatrix(matroid) )
matsimple = getsimplified(matroid)
# get the simplified generators, and their ground set
matgen = getmatrix( matsimple )
gndgen = getground( matsimple )
numgen = length(gndgen) # number of generators
# make lookup table from ground to column index
idxfromground = integer( gndgen[numgen] )
idxfromground[ gndgen ] = 1:numgen
hyperplane = gethyperplane( matsimple )
numhypers = length(hyperplane)
# 2 variables for the facet centers
center = matrix( NA_real_, numhypers, 3 )
delta = double( numhypers )
beltmat = .Call( C_beltmatrix, hyperplane, gndgen )
if( is.null(beltmat) ) return(NULL)
# facet0 is the vector of hyperplane/facet indexes that meet 0, for the simplified generators
facet0 = integer(0)
hyperplanes = rep( NA_integer_, numgen )
timebuild = rep( NA_real_, numgen )
timesort = rep( NA_real_, numgen )
timecenter1 = rep( NA_real_, numgen )
timebeltdata = rep( NA_real_, numgen )
timecentersassign = rep( NA_real_, numgen )
timetotal = rep( NA_real_, numgen )
timermain = updatetimer(timermain)
timeprep = timermain$elapsed
# edge0 is the vector of simplified generators with an edge that meets 0
# start with a logical mask and convert to vector later, using which()
#edge0 = logical(numgen)
# beltlist[[i]] holds the 'zone' of the i'th generator, as a vector of hyperplane indexes
beltlist = vector( numgen, mode='list' )
for( i in 1:numgen )
{
timerbelt = createtimer()
gen = matgen[ ,i] # gen is the i'th generator
# get the indexes of all hyperplanes in the i'th belt
hyperidx = beltmat[i, ]
hyperidx = hyperidx[ 0<hyperidx ] # trim trailing 0s
#cat( "------------------ gen=", i, " hyperidx=", hyperidx, '\n' )
m = length(hyperidx)
hyperplanes[i] = m
# get normals to the facet-pairs that have i'th generator as an edge
# all these normals are orthogonal to gen, and so lie on a great circle
normalfacet = getnormal( matsimple, hyperidx ) #; print(normalfacet)
normalfacet = t(normalfacet)
# make a scratch data.frame suitable for ordering these points on the great circle
df = data.frame( hyperidx=rep(hyperidx,2), sign=c(rep(1L,m),rep(-1L,m)) )
# make the facet normal for the complete belt, including both facets in a pair
# but not in the correct order
df$normal = rbind(normalfacet,-normalfacet)
#print( str(df) )
if( FALSE && ! all( is.finite(df$normal) ) )
{
log_level( FATAL, "Internal Error. in df$normal." ) ; # print( df$normal )
return(NULL)
}
timerbelt = updatetimer(timerbelt)
timebuild[i] = timerbelt$elapsed
# unitize the generator
unit = gen / sqrt( sum(gen^2) )
# put the true facet normals in order around the great circle
perm = orderoncircle( df$normal, unit ) #; print(perm) ; print( df[perm, ] )
# reorder and only keep the first m normals, every hyperplane will be represented exactly once
df = df[ perm[1:m], ]
# record the sorted hyperplane indexes for later use
beltlist[[i]] = df$hyperidx
# find the first hyperplane with positive sign
#ifirst = which( 0 < df$sign )[1]
# keep only m of these hyperplanes, starting with ifirst
#df = df[ ifirst:(ifirst+m-1L), ] #; print(df)
timerbelt = updatetimer(timerbelt)
timesort[i] = timerbelt$elapsed
# find the center of hyperplane df$hyperidx[1]
hyper = matsimple$hyperplane[[ df$hyperidx[1] ]]
generatoridx = match( hyper, gndgen )
pcube = as.double(df$normal[1, ] %*% matgen)
# force the vertex at generators to be exactly 0
pcube[ generatoridx ] = 0
pcube = 0.5 * sign( pcube )
# center1 = matgen %*% pcube don't remember what center1 was used for
timerbelt = updatetimer(timerbelt)
timecenter1[i] = timerbelt$elapsed
res = getbeltdata( matsimple, df$hyperidx, pcube, gndgen[i], df$normal )
if( is.null(res) ) return(NULL)
radmat = res[[1]]
centermat = res[[2]]
#print( res[[3]] ) # res[[2]] is a logical mask
if( any( res[[3]] ) )
{
facet0 = c( facet0, df$hyperidx[ res[[3]] ] )
#edge0[i] = TRUE
}
if( FALSE )
{
# verify that radmat has the right direction, using facet normal
for( k in 1:m )
{
# get the k'th hyperplane in this belt
# hyper = hyperplane[[ df$hyperidx[k] ]]
# the "diameter vector" of this facet is a signed linear combination of the "other" generators
#D[ ,k] = df$sign[k] * getdiameter( matsimple, df$hyperidx[k], gndgen[i] )
# do a sign test
d2 = sum( crossproduct(radmat[ ,k], gen) * df$normal[k, ] )
if( d2 <= 0 )
{
log_level( FATAL, "Internal sign error. k=%d. d2=%g <= 0.", k, d2 )
return(NULL)
}
}
}
timerbelt = updatetimer(timerbelt)
timebeltdata[i] = timerbelt$elapsed
# load centermat into center.
# since each facet is contained in 2 or more belts,
# centers will be assigned 2 or more times.
# The array delta[] records the tiny differences. It is modified in place.
# the vector df$sign is replicated to all 3 columns
#center[ df$hyperidx, ] = df$sign * t(centermat)
res = .Call( C_multicopy, center, delta, df$sign * t(centermat), df$hyperidx )
if( is.null(res) )
{
log_level( FATAL, "Internal Error. C_multicopy() fail. i=%d.", i )
return(NULL)
}
timerbelt = updatetimer(timerbelt)
timecentersassign[i] = timerbelt$elapsed
timetotal[i] = timerbelt$total
}
timermain = updatetimer(timermain)
timebelts = timermain$elapsed
# test that all facet centers were assigned
if( any( is.na(center) ) )
{
log_level( ERROR, "some facet centers not assigned." )
return(NULL)
}
# test that facet centers have negligible disagreement
centernorm = .rowSums( abs(center), nrow(center), ncol(center) )
mask = (centernorm == 0)
if( any(mask) )
{
log_level( ERROR, "%d facet centers are 0.", sum(mask) )
return(NULL)
}
deltarel = delta / centernorm #; cat( "deltarel=", deltarel, '\n' )
tol = 5.e-12
mask = tol < deltarel
if( any(mask) )
{
log_level( WARN, "%d facet centers have disagreement > %g.", sum(mask), tol )
}
normal = t( getnormal( matsimple, NULL ) ) # get ALL the normal vectors
if( TRUE && any( out$center!=0 ) )
{
# modify center and normal so the *chosen* facet of the pair is closer to 0 (uncentered)
# beta[] remains unchanged
# signvec = -as.numeric( sign( center %*% out$center ) )
signvec = -sign( normal %*% out$center )
signvec[ signvec==0 ] = 1 # change any 0s to 1s, should be extremely rare
#center = signvec * center # uses recycling rule so signvec is multiplied by all columns
#normal = signvec * normal # uses recycling rule so signvec is multiplied by all columns
.Call( C_timesEqual, center, signvec, 2L ) # multiply in place
.Call( C_timesEqual, normal, signvec, 2L ) # multiply in place
}
else
{
# do not modify center and normal
signvec = rep( 1L, nrow(center) )
}
# build the facet data.frame and add to output list
facet = data.frame( row.names=1:numhypers )
facet$center = center
facet$normal = normal
facet$beta = .rowSums( center*normal, nrow(center), ncol(center) )
facet$sign = as.integer( signvec )
out$facet = facet
# build the facet0 data.frame and (possibly) modify it for the original generators
facet0 = sort( unique(facet0) ) # unique(facet0)
#cat( "simplified facet0=", facet0, '\n' )
if( FALSE )
{
# convert from logical vector to indexes
edge0 = which( edge0 )
if( length(edge0) != length(facet0) )
{
log_level( FATAL, "Internal Error. length(edge0) = %d != %d = length(facet0).",
length(edge0), length(facet0) )
return(NULL)
}
}
# facet0 is now for the simplified generators.
# modify facet0[] for the original generators, by removing some of them.
# get the "mixed direction" generators, which are the generator minors that are non-zero
colidx = getmixed( matroid ) # column indexes of the simplified matrix, but pass the original matroid
if( 0<length(facet0) && 0<length(colidx) )
{
# keep only those hyperplanes in facet0 that contain all mixed generators, and remove all the rest
hyper = matsimple$hyperplane[ facet0 ]
# keep is a logical vector entries corresponding to those in facet0 and hyper
keep = .Call( C_issuperset, hyper, gndgen[colidx] )
#print( hyper )
#cat( "gndgen[colidx]=", gndgen[colidx], '\n' )
#cat( "keep= ", keep, '\n' )
#cat( "changing facet0 from ", facet0, " to ", facet0[keep], '\n' )
facet0 = facet0[ keep ]
}
out$facet0 = facet0
#out$edge0 = edge0
out$beltlist = beltlist
timermain = updatetimer(timermain)
timefacets = timermain$elapsed
# for each non-trivial facet (non-parallelogram) compute a zonogon
lenvec = lengths( hyperplane )
idxnontriv = which( 2 < lenvec )
hypersnt = length(idxnontriv)
if( 0 < hypersnt )
{
if( ! all( idxnontriv == 1:hypersnt ) )
{
log_level( FATAL, "Internal Error. %d nontrivial hyperplanes are not contiguous.", hypersnt )
return(NULL)
}
frame3x2 = vector( hypersnt, mode='list' )
zonogon = vector( hypersnt, mode='list' )
signtile = vector( hypersnt, mode='list' )
for( k in 1:hypersnt )
{
# get the facet normal and make frame from it
#normal = facet$normal[ k, ]
frame3x2[[k]] = frame3x2fun( facet$normal[ k, ] )
# get the column indexes for the hyperplane
idxhyper = idxfromground[ hyperplane[[k]] ]
# extract only the generators for this face
matsub = matgen[ , idxhyper ]
# rotate generators to dimension 2
matsub2 = t( frame3x2[[k]] ) %*% matsub
# construct the non-trivial zonogon
zonogon[[k]] = zonogon( matsub2, e0=0, e1=0, ground=gndgen[idxhyper] )
if( is.null(zonogon[[k]]) )
{
log_level( FATAL, "Internal Error. Cannot construct zonogon %d.", k )
return(NULL)
}
# each element in signtile[[k]] is +1 or -1, depending on whether the crossproduct
# agrees with the chosen facet normal, or is its negative
# get all crossprods for this non-trivial facet
# NOTE: if the facet is trivial, the chosen normal *is* the cross-product
idx = translateallpairs( hyperplane[[k]], getground(matsimple) )
crossprods = matsimple$crossprods[ , idx ]
signtile[[k]] = as.integer( sign( facet$normal[ k, ] %*% crossprods ) )
}
out$frame3x2 = frame3x2
out$zonogon = zonogon
out$signtile = signtile
}
# for each multiple group, compute the corresponding zonoseg
nummultiples = length(matroid$multiple)
if( 0 < nummultiples )
{
out$zonoseg = makezonoseglist( matroid )
}
timermain = updatetimer(timermain)
timezonogons = timermain$elapsed
timeall = timermain$total
if( FALSE )
{
perf = data.frame( point=gndgen, hyperplanes=hyperplanes )
perf$build = timebuild * 1000
perf$sort = timesort * 1000
perf$center1 = timecenter1 * 1000
perf$beltdata = timebeltdata * 1000
perf$beltdatamean = perf$beltdata/hyperplanes
perf$centersassign = timecentersassign * 1000
perf$total = timetotal * 1000
print( perf )
}
if( FALSE )
{
cat( "matroid: ", timematroid * 1000, " msec\n" )
cat( "preparation: ", timeprep * 1000, " msec\n" )
cat( "belts: ", timebelts * 1000, " msec. ", "belts=", numgen, "\n" )
cat( "facets: ", timefacets * 1000, " msec\n" )
cat( "zonogons: ", timezonogons * 1000, " msec\n" )
cat( "total: ", timeall * 1000, " msec\n" )
}
return( out )
}
# n a positive integer, so the step size on the circle is 2*pi/n
# m number of points to compute, starting at 1
# height height (of "white point") when m==n
polarzonohedron <- function( n, m=n, height=pi, ground=NULL )
{
if( is.null(ground) )
ground = 1:m
else if( length(ground) != m )
{
log_level( ERROR, "ground is invalid, because the length is incorrect." )
return(NULL)
}
if( m<3 || n<m )
{
log_level( ERROR, "m=%d is invalid.", m )
return(NULL)
}
u = (0:(m-1)) / n
mat = t( tocircle(2*pi*u) )
mat = rbind( mat, 1 ) * height/n
return( zonohedron(mat,e0=0,e1=0,e2=0,ground=ground) )
}
# mat 2xN matrix giving N points in the plane, N>=3 Assumed to be already simplified, this is verified.
# the order of the column generators does not affect the returned zonohedron
# returns zonohedron with matroid simple and uniform, these are verified.
# As customary, only half of the facets are returned, and the antipodal ones are comitted.
# Only the "lower" facets are returned, these project down to a regular tiling of the zonogon defined by mat.
# These "lower" facets are the ones visible from *below*, on the xy-plane.
# out$facet is modified so all the facets have normals with negative Z-component
liftedzonohedron <- function( mat, e0=0, e1=0, e2=0, ground=NULL )
{
ok = is.matrix(mat) && is.numeric(mat) && nrow(mat)==2 && 3<=ncol(mat)
if( ! ok )
{
log_level( ERROR, "argument mat is invalid." )
return(NULL)
}
# 'lift' generators to a cone with vertex at 0
mat3 = rbind( mat, sqrt(colSums(mat^2)) )
out = zonohedron( mat3, e0=e0, e1=e1, e2=e2, ground=ground )
if( is.null(out) ) return(NULL)
if( ! is_simple(out$matroid) )
{
log_level( ERROR, "matroid is not simple." )
return(NULL)
}
if( ! is_uniform(out$matroid) )
{
log_level( ERROR, "matroid is not uniform." )
return(NULL)
}
# force all facet normals to point "down", so all facets are on the "lower" side
signz = sign( out$facet$normal[ ,3] )
if( any(signz==0) )
{
log_level( ERROR, "%d facets have horizontal facet normal.", sum(signz==0) )
return(NULL)
}
maskpos = (0 < signz)
if( any(maskpos) )
{
# reverse normal and center, so all facets in the data.frame are on the "lower" side
out$facet$normal[maskpos, ] = -out$facet$normal[maskpos, ]
out$facet$center[maskpos, ] = -out$facet$center[maskpos, ]
}
return( out )
}
# n a positive integer, so the step size on the circle is 2*pi/n
# m number of points to compute, starting at 1
# axis extrusion axis, z must be nonzero
regularprism <- function( n, m=n, axis=c(0,0,1), ground=NULL )
{
if( is.null(ground) )
ground = 1:(m+1)
else if( length(ground) != m+1 )
{
log_level( ERROR, "ground is invalid, because the length is incorrect." )
return(NULL)
}
if( m<1 || n<m )
{
log_level( ERROR, "m=%d is invalid.", m )
return(NULL)
}
ok = is.numeric(axis) && (length(axis) %in% c(1,3))
if( ! ok )
{
log_level( ERROR, "axis is invalid." )
return(NULL)
}
if( length(axis) == 1 )
axis = c(0,0,axis)
if( axis[3] == 0 )
{
log_level( ERROR, "axis is invalid, because its z coordinate is 0." )
return(NULL)
}
u = (0:(m-1)) / n
mat = t( tocircle(2*pi*u) ) # m columns
mat = rbind( mat, 0 ) # m columns
mat = cbind( mat, axis ) # m+1 columns
return( zonohedron(mat,ground=ground) )
}
quasicube <- function( count=c(1,1,1) )
{
count = as.integer(count)
if( length(count) == 1 )
count = rep( count, 3 )
else if( length(count) == 2 )
count = c( count, 0L )
ok = length(count)==3 && all(0<=count) && sum( (count==0) <=1 )
if( ! ok )
{
log_level( ERROR, "vector count=%d,%d,%d is invalid", count[1], count[2], count[3] )
return(NULL)
}
n = count[1]
if( 0 < n )
{
# xy plane
u = seq( 0, by=2*pi/n, length.out=n )
mat1 = t( cbind( tocircle(u), 0 ) )
}
else
mat1 = matrix( 0, nrow=3, ncol=0 )
n = count[2]
if( 0 < n )
{
# yz plane
u = seq( 0, by=2*pi/n, length.out=n )
mat2 = t( cbind( tocircle(u), 0 ) )
mat2 = mat2[ c(3,1,2), , drop=F]
}
else
mat2 = matrix( 0, nrow=3, ncol=0 )
n = count[3]
if( 0 < n )
{
# zx plane
u = seq( 0, by=2*pi/n, length.out=n )
mat3 = t( cbind( tocircle(u), 0 ) )
mat3 = mat3[ c(2,3,1), , drop=F]
}
else
mat3 = matrix( 0, nrow=3, ncol=0 )
matgen = cbind( mat1, mat2, mat3 ) #; print(matgen)
out = zonohedron( matgen )
return( out )
}
print.zonohedron <- function( x, trans2=FALSE, matroid=TRUE, ... )
{
cat( "zonohedron:\n" )
ground = getground( getsimplified(x$matroid) )
fullname = attr( x, "fullname" )
if( ! is.null(fullname) )
cat( "fullname: ", fullname, "\n" )
gens = length( getground(x$matroid) )
cat( "generators (original): ", gens, '\n' )
gens = length( x$matroid$multiple )
cat( "generators with multiples: ", gens, '\n' )
if( 0 < gens )
{
colidx = getmixed(x$matroid) #;which( 0 < rowSums( abs(x$matroid$multiplesupp$minor) ) )
gens = length(colidx)
cat( "generators with mixed-directions: ", gens, ' {', ground[colidx], '}\n' )
}
gens = length( getground(getsimplified(x$matroid)) )
cat( "generators (simplified): ", gens, '\n' )
facets = 2*nrow(x$facet)
cat( "number of facets: ", facets, ' [', facets/2, " antipodal facet-pairs]\n", sep='' )
cat( "facets that contain 0: ", length(x$facet0), " {", x$facet0, "}\n" )
res = getmetrics( x )
cat( "number of edges: ", res$edges, "\n" )
# cat( "edges that contain 0: ", length(x$edge0), " {", ground[x$edge0], "}\n" )
cat( "center: ", x$center, '\n' )
cat( "pointed: ", is_pointed(x), '\n' )
cat( "salient: ", is_salient(x), '\n' )
cat( "area: ", res$area, '\n' )
cat( "volume: ", res$volume, '\n' )
flush.console()
if( trans2 )
{
extra = getmetrics2trans( x )
cat( '\n' )
cat( "2-Transition Polyhedron Metrics:\n" )
cat( "orientation: ", extra$signsurf, '\n' )
mess = sprintf( " [signcounts: %d + %d + %d = %d]",
extra$signcount[1], extra$signcount[2], extra$signcount[3], extra$signcount[4] )
cat( "strictly starshaped at center: ", extra$starshaped, mess, '\n' )
cat( "injective: ", extra$injective, '\n' )
if( is.finite(extra$starshaped) && extra$starshaped )
{
deficient = extra$pgramdf$deficient
dfacets = 2 * sum( deficient )
cat( "deficient parallelograms: ", dfacets, " [fraction=", dfacets/facets, ']\n' )
cat( "deficient area: ", extra$areadeficient, " [fraction=", extra$areadeficient/res$area, ']\n' )
cat( "deficient volume: ", extra$volumedeficient, " [fraction=", extra$volumedeficient/res$volume, ']\n' )
thickness = extra$volumedeficient / extra$areadeficient
cat( "deficient thickness (mean): ", thickness, '\n' )
# compute transitions
pgramdef = extra$pgramdf[ deficient, ]
dat = boundarypgramdata( x, pgramdef$gndpair )
#dat$gndpair = pgramdef$gndpair
dat$area = pgramdef$area
# breakdown the deficient parallelograms by transitions
tunique = sort( unique( dat$transitions ) ) #; print( tunique )
m = length(tunique)
pgcount = integer(m)
area = numeric(m)
example = character(m)
for( k in 1:m )
{
datsub = dat[ dat$transitions == tunique[k], ]
pgcount[k] = nrow( datsub )
area[k] = sum( datsub$area )
i = which.max( datsub$area )
gndpair = datsub$gndpair[i, ]
example[k] = sprintf( "{%d,%d}", gndpair[1], gndpair[2] )
}
dftemp = data.frame( transitions=c(tunique, "Totals") )
dftemp$parallelograms = c( 2L*pgcount, 2L*sum(pgcount) )
dftemp$area = c( 2*area, 2*sum(area) )
dftemp$example = c( example, '' )
cat( '\n' )
print( dftemp, row.names=FALSE )
cat( '\n' )
edges_total = 2L * nrow(extra$anglesDH)
edges_concave = 2L * sum( extra$anglesDH$angle < 0 )
convex = edges_concave==0
cat( "total edges: ", edges_total, '\n' )
cat( "concave edges: ", edges_concave, " [fraction=", edges_concave/edges_total, ']\n' )
if( ! convex )
{
theta = pi + extra$anglesDH$angle
imin = which.min( theta ) #; print( imin )
thetamin = theta[imin]
dfsub = extra$anglesDH[imin, ]
pivot = ground[ dfsub$pivot[1] ]
wing1 = ground[ dfsub$wing1[1,1] ]
wing2 = ground[ dfsub$wing2[1,1] ]
mess = sprintf( "at edge common to parallelograms {%g,%g} and {%g,%g}",
pivot, wing1, pivot, wing2 )
cat( "minimum external angle: ", thetamin, 'radians, ', mess, '\n' )
}
cat( "convex surface: ", convex, '\n' )
}
else
{
cat( '\n' )
cat( "**** Cannot print more 2-Transition metrics, since the surface is not strictly starshaped at the center. ****\n" )
}
}
if( matroid )
{
cat( '\n' )
cat( "matroid:\n" )
print( x$matroid )
}
return( invisible(TRUE) )
}
# returns a list with items:
# vertices number of vertices, computed from edges and facets using Euler characteristic
# edges number of edges
# facets number of facets
# area surface area
# volume volume
getmetrics.zonohedron <- function( x )
{
matsimple = getsimplified( x$matroid )
facets = 2L * nrow(x$facet)
edges = 2L * sum( lengths(gethyperplane(matsimple)) )
vertices = edges - facets + 2L
# matrix crossprods[,] was already computed when the matroid was, but was then unitized
# NOTE: this matrix crossprodsraw[,] is raw and nonunitized
crossprodsraw = allcrossproducts( getmatrix(matsimple) )
areavec = sqrt( .colSums( crossprodsraw^2, nrow(crossprodsraw), ncol(crossprodsraw) ) )
out = list()
out$vertices = vertices
out$edges = edges
out$facets = facets
out$area = 2*sum(areavec)
# matsimple$hyperplaneidx is a LUT from crossprod index to hyperplane index
# so for non-trivial hyperplanes, the beta is duplicated many times
betavec = x$facet$beta[ matsimple$hyperplaneidx ] #; print(betavec)
if( length(areavec) != length(betavec) )
{
log_level( FATAL, "Internal Error. length(areavec)=%d != %d=length(betavec).",
length(areavec), length(betavec) )
#print( matsimple$hyperplaneidx )
#print( str(betavec) )
return(out)
}
out$volume = (2/3) * sum( areavec * betavec )
# if( trans2 )
# {
# extra = getmetrics2trans( x, tol )
# if( is.null(extra) ) return(NULL)
#
# out$trans2 = extra
# }
return(out)
}
# x a zonohedron
# i index of a hyperplane of the simplified matroid of x
gettilecenters3D <- function( x, i )
{
if( i <= length(x$zonogon) )
{
# non-trivial hyperplane, many tiles
zono = x$zonogon[[i]]
# find center of all pgram tiles in 3D
center3D = zono$tilingdata$center %*% t( x$frame3x2[[i]] )
# translate each pgram center to the zonogon facet in 3D
center3D = .Call( C_sumMatVec, center3D, x$facet$center[i, ], 1L )
}
else
{
# non-trivial hyperplane, 2 generators, only 1 tile = the pgram itself
center3D = x$facet$center[i, , drop=FALSE]
}
return( center3D )
}
if( FALSE )
{
# x a zonohedron object
# p an Mx3 matrix, etc.
#
# value see inside_zonotope()
inside.zonohedron <- function( x, p )
{
p = prepareNxM( p, 3 )
if( is.null(p) ) return(NULL)
# translate p to the centered zonohedron
gcentered = p - matrix( x$center, nrow(p), 3, byrow=TRUE ) #; print(gcentered)
hg = tcrossprod( x$facet$normal, gcentered ) #; print( str(hg) )
out = inside_zonotope( x, p, hg )
return( out )
}
}
# x a zonohedron object
# ... possibly even *more* zonohedron objects !!
#
# return a data.frame with a row for each zonohedron, with important metrics. N rows
summary.zonohedron <- function( object, ... )
{
# combine all the zonohedron objects into a single list
zlist = c( list(object), list(...) ) #; print( zlist )
summary_from_zlist( zlist )
}
# zlist a list of zonohedron object
# full if TRUE, then include area and volume and pointed columns
#
# return a data.frame with a row for each zonohedron, with important metrics. N rows
summary_from_zlist <- function( zlist, full=TRUE )
{
ok = sapply( zlist, inherits, what="zonohedron" ) #; print(ok)
if( ! all(ok) )
{
log_level( ERROR, "%d of the %d objects in the list are not zonohedra.", sum(!ok), length(ok) )
return(NULL)
}
n = length(zlist)
shortname = rep( NA_character_, n )
fullname = rep( NA_character_, n )
generators = rep( NA_integer_, n )
#center = matrix( NA_real_, nrow=n, ncol=3 )
vertices = rep( NA_integer_, n )
edges = rep( NA_integer_, n )
facets = rep( NA_integer_, n )
area = rep( NA_real_, n )
volume = rep( NA_real_, n )
pointed = rep( NA, n )
for( k in 1:n )
{
zono = zlist[[k]]
sn = attr(zono,"shortname")
if( ! is.null(sn) ) shortname[k] = sn
fn = attr(zono,"fullname")
if( ! is.null(fn) ) fullname[k] = fn
generators[k] = ncol( getmatrix( zono$matroid ) )
# center[k, ] = zono$center
mets = getmetrics(zono)
vertices[k] = mets$vertices
edges[k] = mets$edges
facets[k] = mets$facets
area[k] = mets$area
volume[k] = mets$volume
pointed[k] = is_pointed( zono )
}
rnames = names(zlist)
if( is.null(rnames) || anyDuplicated(rnames)!=0 )
rnames = 1:n
out = data.frame( row.names=rnames )
# out$shortname = shortname
out$fullname = fullname
out$generators = generators
#$out$center = center
out$vertices = vertices
out$edges = edges
out$facets = facets
if( full )
{
out$area = area
out$volume = volume
out$pointed = pointed
}
return( out )
}
# x a zonohedron object
# base a numeric vector of length 3, the basepoint of all the rays
# base must be in the interior of x,
# or if x is non-negative, base can also be the black or white point on the boundary(x)
# direction an Nx3 matrix with directions in the rows; each direction must be non-zero
#
# value a dataframe with columns
# base given basepoint of all the rays (all the same)
# direction given directions of the rays
# facetidx idx of the facet where ray exits the zonohedron
# sign +1 if beta, and -1 if -beta
# tmax ray parameter of intersection with facet
# point point of intersection ot the ray and the facet
# timetrace time to do the trace, in sec
#
raytrace.zonohedron <- function( x, base, direction, invert=FALSE, plot=FALSE, ... )
{
ok = is.numeric(base) && length(base)==3 && all( is.finite(base) )
if( ! ok )
{
log_level( ERROR, "base is invalid. It must be a numeric vector of length 3, and all entries finite." )
return(NULL)
}
direction = prepareNxM( direction, 3 )
if( is.null(direction) ) return(NULL)
# translate base to the centered zonohedron
base = as.numeric(base)
gcentered = base - x$center #; print(gcentered)
dim(base) = c(1,3)
dim(gcentered) = c(1,3)
hg = tcrossprod( x$facet$normal, gcentered ) #; print( str(hg) )
# hg = as.numeric( x$facet$normal %*% gcentered ) #; print( str(hg) )
# test whether base is black or white point, no tolerance here
dim(gcentered) = NULL
blackpt = ifelse( is_salient(x), all(gcentered == -x$center), FALSE )
whitept = ifelse( is_salient(x), all(gcentered == x$center), FALSE )
if( blackpt || whitept )
{
# get the normals for all facets that meet 0
# normal0 is Mx3 where M is the number of these facets
normal0 = x$facet$normal[ x$facet0, , drop=FALSE ]
# get a vector that points from base into the interior
if( blackpt )
interiorvec = x$center
else
interiorvec = -(x$center)
test = normal0 %*% interiorvec
dim(test) = NULL
# in the next line, sign(test) is replicated to all columns of normal0
# normal0 will be used in the for() loop below
normal0 = sign(test) * normal0
if( FALSE )
{
cat( "before test=", test, "\n" )
test = normal0 %*% interiorvec
cat( "after test=", test, "\n" )
}
}
else
{
# not blackpt or whitept, so verify that base is in the *interior* of x
df = inside_zonotope( x, base, hg )
if( 0 <= df$distance )
{
log_level( ERROR, "point base=(%g,%g,%g) is not in the interior of the zonohedron. distance=%g >= 0",
base[1], base[2], base[3], df$distance )
return(NULL)
}
}
dim(base) = NULL
n = nrow(direction)
tmax = rep(NA_real_,n)
idx = rep(NA_integer_,n)
sign = rep(NA_integer_,n)
point = matrix(NA_real_,n,3)
timetrace = rep(NA_real_,n)
for( k in 1:n )
{
time_start = gettime()
v = direction[k, ]
if( any( is.na(v) ) ) next
if( sum(v*v) == 0 ) next # 0-vector
if( blackpt || whitept )
{
interior = all( 0 < normal0 %*% v )
if( ! interior )
# the ray starts on the boundary and does *not* enter the interior, so give up
next
}
hv = x$facet$normal %*% v
numerator = x$facet$beta - sign(hv)*hg
tvec = numerator / abs(hv)
tvec[ ! is.finite(tvec) ] = Inf
j = which.min( tvec ) # this ignores ties and Infs
if( tvec[j] <= 0 )
{
log_level( WARN, "Internal Warning. direction=(%g,%g,%g) failed to intersect the boundary properly.",
v[1], v[2], v[3] )
next # failed to intersect properly
}
tmax[k] = tvec[j] # tmax[k] is positive
idx[k] = j # x$facet$idx[j]
sign[k] = as.integer( sign(hv[j]) )
point[k, ] = base + tmax[k] * v #; print( optcentered )
timetrace[k] = gettime() - time_start
}
rnames = rownames(direction)
if( is.null(rnames) || anyDuplicated(rnames) ) rnames = 1:n
out = data.frame( row.names=rnames )
out$base = matrix( base, n, 3, byrow=TRUE ) # replicate base to all rows
out$direction = direction
out$facetidx = idx
out$sign = sign
out$tmax = tmax
out$point = point
out$timetrace = timetrace
cnames = colnames(base)
if( is.null(cnames) ) cnames = colnames(direction)
colnames(out$point) = cnames
if( invert )
{
dat = invertboundarydata( x, out )
if( ! is.null(dat) )
{
out$distance = dat$distance
out$pcube = dat$pcube
out$transitions = dat$transitions
}
}
if( plot )
{
if( ! requireNamespace( 'rgl', quietly=TRUE ) )
log_level( WARN, "Package 'rgl' is required for plotting. Please install it." )
else if( rgl::cur3d() == 0 )
log_level( WARN, "Cannot add raytrace to plot, because there is no rgl window open." )
else
{
xyz = matrix( base, nrow=n, ncol=3, byrow=T )
xyz = rbind( xyz, point )
perm = 1:(2*n)
dim(perm) = c(n,2L)
perm = t(perm)
dim(perm) = NULL
# print( perm )
xyz = xyz[ perm, ]
col = 'red'
rgl::segments3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], col=col )
rgl::points3d( base[1], base[2], base[3], col=col, size=5, point_antialias=TRUE )
rgl::points3d( point[ ,1], point[ ,2], point[ ,3], col=col, size=5, point_antialias=TRUE )
out = invisible(out)
}
}
return( out )
}
# section() compute intersection of zonohedron and plane(s)
#
# x a zonohedron object
# normal a non-zero numeric vector of length 3, the normal of all the planes
# beta a vector of plane-constants. The equation of plane k is: <x,normal> = beta[k]
#
# value a list of length = length(beta). Each item in the list is a list with these items:
# beta given plane constant of the plane
# section Mx3 matrix of points on the section = a polygon, in order around the boundary
# M=1 for a supporting plane, and if there is no intersection, then M=0 rows.
section.zonohedron <- function( x, normal, beta, tol=1.e-10, plot=FALSE, ... )
{
timermain = createtimer()
ok = is.numeric(normal) && length(normal)==3 && all( is.finite(normal) ) && any( normal!=0 )
if( ! ok )
{
log_level( ERROR, "normal is invalid. It must be a non-zero numeric vector of length 3, and all entries finite." )
return(NULL)
}
ok = is.numeric(beta) && 0<length(beta) && all( is.finite(beta) )
if( ! ok )
{
log_level( ERROR, "beta is invalid. It must be a numeric vector of positive length, and all entries finite." )
return(NULL)
}
cnames = names(normal) # save these
dim(normal) = NULL
# unitize normal
#normal = normal / sqrt( sum(normal^2) )
matsimple = getsimplified(x$matroid)
matgen = getmatrix( matsimple )
gndgen = getground( matsimple )
numgen = ncol( matgen ) # so matgen is 3 x numgen
# for each edge, compute radius of the projection of the edge onto the line generated by normal
normalgen = as.numeric( normal %*% matgen )
radiusgen = 0.5 * abs(normalgen) # so length(radiusgen) = numgen
timermain = updatetimer( timermain )
timeedges = timermain$elapsed
# for each facet/hyperplane, compute radius of the projection of the facet onto the line generated by normal
if( FALSE )
{
# make lookup table from ground to column index
idxfromground = integer( gndgen[numgen] )
idxfromground[ gndgen ] = 1:numgen
myfun <- function( hyper ) { sum( radiusgen[ idxfromground[hyper] ] ) }
radiusfacet = sapply( matsimple$hyperplane, myfun ) #; print(radiusfacet)
}
else
{
# faster method in plain C
radiusfacet = .Call( C_radiusfacet, matsimple$hyperplane, gndgen, radiusgen )
}
timermain = updatetimer( timermain )
timefacets = timermain$elapsed
# numfacets is really the number of facet pairs
numfacets = nrow( x$facet )
# make matrix of *all* facet centers with 2*numfacets rows
center = rbind( x$facet$center, -x$facet$center )
# make matrix of *all* facet normals with 2*numfacets rows
normalfacet = rbind( x$facet$normal, -x$facet$normal )
# make matching products of these centers and normal vector
cn = as.numeric( x$facet$center %*% normal )
cn = c( cn, -cn )
# compute range of normal over each facet
cnneg = cn - radiusfacet
cnpos = cn + radiusfacet
hyperplane2 = rep( matsimple$hyperplane, 2 ) # or should it be c(matsimple$hyperplane,matsimple$hyperplane) ?
timermain = updatetimer( timermain )
timecenters = timermain$elapsed
# translate beta to the centered zonogon
cent.norm = sum( x$center * normal )
betacentered = as.numeric(beta) - cent.norm #; print(betacentered)
# compute radius of the entire zonohedron, after projection onto the line generated by normal
betamax = sum( radiusgen )
betamin = -betamax # by symmetry
#cat( "centered betamin=", betamin, " betamax=", betamax, '\n' )
#cat( "betacentered=", betacentered, '\n' )
res = support( x, normal )
argmax = res$argmax
argmin = 2*x$center - argmax #-(argmax - x$center) + cent.norm
#cat( "betamax=", betamax, " maxvalue - cent.norm = ", res$value - cent.norm, '\n' )
# find 3x2 matrix for projection to 2D
# this works for every section, because all are parallel
frame3x2 = frame3x2fun( normal )
out = vector( length(beta), mode='list' )
names(out) = sprintf( "normal=%g,%g,%g. beta=%g", normal[1], normal[2], normal[3], beta )
for( k in 1:length(beta) )
{
beta_k = betacentered[k]
if( beta_k < betamin-tol || betamax+tol < beta_k )
{
# plane does not intersect the zonohedron, there is no section
df = data.frame( row.names=character(0) )
df$point = matrix( 0, 0, 3 )
out[[k]] = df # list( beta=beta[k], section=matrix( 0, 0, 3 ) )
next
}
if( abs(beta_k - betamax) < tol )
{
# special case - only one point of intersection
df = data.frame( row.names=1 )
df$point = matrix( argmax, 1, 3 )
out[[k]] = df # list( beta=beta[k], section= matrix( argmax, 1, 3 ) )
next
}
if( abs(beta_k - betamin) < tol )
{
# special case - only one point of intersection
df = data.frame( row.names=1 )
df$point = matrix( argmin, 1, 3 )
out[[k]] = df # = list( beta=beta[k], section= matrix( argmin, 1, 3 ) )
next
}
# find indexes of all facets that intersect this plane
indexvec = which( cnneg < beta_k & beta_k < cnpos ) #; print( indexvec )
if( length(indexvec) == 0 )
{
# should not happen
log_level( WARN, "Internal Error. length(indexvec) == 0." )
next
}
#cat( "indexvec=", indexvec, '\n' )
log_level( INFO, "beta[%d] = %g. section has %d edges.", k, beta[k], length(indexvec) )
# extract the sublist of only those facets/hyperplanes that intersect the plane
hypersub = hyperplane2[ indexvec ]
# in the C function, non-trivial hyperplanes are skipped and the row of section is filled with NAs
section = .Call( C_sectionzonohedron, hypersub, center[indexvec, ], normalfacet[indexvec, ], cn[indexvec], beta_k,
gndgen, normalgen, matgen, matsimple$crossprods )
if( is.null(section) ) return(NULL)
# assign rownames(section)
idxhyperplane2 = rep( 1:length(matsimple$hyperplane), 2 )
idxhypersub = idxhyperplane2[ indexvec ]
antipodalmask = (numfacets < indexvec)
signchar = ifelse( antipodalmask, '-', '' )
rownames( section ) = paste( idxhypersub, signchar, sep='' )
lenvec = lengths(hypersub)
if( any( 2 < lenvec ) )
{
# fill in the gaps in section[]
idxnt = which( 2 < lenvec )
log_level( INFO, "section k=%d has %d nontrivial facets.", k, length(idxnt) )
for( j in 1:length(idxnt) )
{
idxhyper = idxhypersub[ idxnt[j] ]
hyper = matsimple$hyperplane[[idxhyper]]
#cat( "j=", j, " idxnt[j]=", idxnt[j], " idxhyper=", idxhyper, " length(hyper)=", length(hyper), '\n' )
# for the facet center we only use the "original" non-antipodal facet
facetcenter = x$facet$center[ idxhyper, ]
#idx2 = indexvec[ idxnt[j] ]
antipodal = antipodalmask[ idxnt[j] ]
#cat( "Antipodal = ", antipodal, '\n' )
thesign = sign( 0.5 - antipodal ) # so FALSE -> 1 and TRUE -> -1
#cat( "thesign = ", thesign, '\n' )
#print( hyper )
# get the zonogon and the 3x2 matrix
zonogon = x$zonogon[[ idxhyper ]]
A = x$frame3x2[[ idxhyper ]]
if( is.null(zonogon) || is.null(A) )
{
# should not happen
log_level( ERROR, "Internal Error. zonogon or A is NULL." )
return(NULL)
}
# change of coordinates from 3D to 2D
norm2 = as.double( normal %*% A )
temp3 = as.double( -A %*% zonogon$center ) + facetcenter
beta2 = thesign * beta_k - sum( temp3 * normal )
sec2 = section( zonogon, norm2, beta2 )
#print( sec2 )
p2 = sec2$boundary2[1, ]
# now map p2 back to 3D and the centered zonohedron
p3 = as.double( A %*% (p2 - zonogon$center) ) + facetcenter
section[ idxnt[j], ] = thesign * p3
}
#print( section )
}
# check for NAs
bad = is.na( section[ ,1] )
if( any(bad) )
{
log_level( WARN, "%d points of the section (beta=%g) could not be computed, and have been set to NA.",
sum(bad), beta[k] )
}
if( FALSE )
{
# check the result
test = as.double( section %*% normal) #; print(test - beta_k)
test = 5.e-10 < abs(test - beta_k) #; print(test)
if( any(test,na.rm=TRUE) )
{
log_level( ERROR, "Internal Error. k=%d. %d points not on beta_k = %g plane.",
k, sum(test,na.rm=TRUE), beta_k )
print(section)
next
}
}
# find suitable point in the interior of this section, using argmin and argmax
s = (beta_k - betamin) / (betamax - betamin)
center_section = (1-s)*argmin + s*argmax - x$center
# project to 2D
p2D = section %*% frame3x2
# subtract projection center_section, so points of section now go around the origin
c2D = center_section %*% frame3x2
res = .Call( C_plusEqual, p2D, -c2D, 1L )
if( is.null(res) ) return(NULL)
# not a polygon yet, the points must be ordered by angle using atan2()
perm = order( atan2(p2D[ ,2],p2D[ ,1]), na.last=FALSE )
# add back the center of the zonohedron
res = .Call( C_plusEqual, section, x$center, 1L )
if( is.null(res) ) return(NULL)
# reorder the data
section = section[perm, ]
idxhypersub = idxhypersub[perm]
antipodalmask = antipodalmask[perm]
df = data.frame( row.names=rownames(section) )
df$point = section
df$hyperidx = idxhypersub
df$sign = 1L - 2L * antipodalmask # so TRUE -> -1L and FALSE -> +1L. ifelse( antipodalmask, -1L, 1L )
out[[k]] = df # list( beta=beta[k], section=section )
}
if( FALSE )
{
timermain = updatetimer( timermain )
timesections = timermain$elapsed
timeall = timermain$total
cat( "edges: ", timeedges * 1000, " msec\n" )
cat( "facets: ", timefacets * 1000, " msec\n" )
cat( "centers: ", timecenters * 1000, " msec\n" )
cat( "sections: ", timesections * 1000, " msec", " sections=", length(beta), " ", timesections*1000/length(beta), "msec per section\n" )
cat( "total: ", timeall * 1000, " msec\n" )
}
if( plot )
{
if( ! requireNamespace( 'rgl', quietly=TRUE ) )
{
log_level( WARN, "Package 'rgl' is required for plotting. Please install it." )
}
else if( rgl::cur3d() == 0 )
{
log_level( WARN, "Cannot add section to plot, because there is no rgl window open." )
}
else
{
for( k in 1:length(beta) )
{
xyz = out[[k]]$point
rgl::polygon3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], fill=FALSE, col='red' )
}
}
}
return(out)
}
# given a point on the boundary, return a point in the unit n-cube that maps to it, under W
# x the zonohedron
# point Mx3 matrix with points on the boundary of x in the rows
# Such points typically come in 1 of 2 ways:
# 1) as computed by raytrace()
# 2) as computed by section()
# tol tolerance for being on the boundary
#
# returns a data.frame with M rows and these columns:
# point the original given matrix of boundary points
# distance signed distance to the boundary of the zonohedron
# facetidx index of the facet pair
# sign sign of the facet in the pair
# pcube an MxN matrix, where M=nrow(boundarydata) and N=number of points in x$matroid
# each row of the matrix is a point in the n-cube
# that maps to the boundary point on the zonohedron
# transitions the number of transitions - a non-negative even integer
invertboundary.zonohedron <- function( x, point, tol=5.e-14 )
{
point = prepareNxM( point, 3 )
if( is.null(point) ) return(NULL)
ok = is.numeric(point) && is.matrix(point) && 0<nrow(point) && ncol(point)==3
if( ! ok )
{
log_level( ERROR, "argument point is invalid." )
return(NULL)
}
# compute boundarydata from x and boundary
direction = .Call( C_sumMatVec, point, -x$center, 1L )
# in the next call, invert=FALSE
boundarydata = raytrace( x, x$center, direction )
if( is.null(boundarydata) ) return(NULL)
# replace computed boundary points with the originals
boundarydata$point = point
#print( boundarydata )
# check tmax
#delta = max( abs(boundarydata$tmax-1) )
#if( tol < delta )
# {
# log.string( WARN, "boundary delta = %g > %g.", delta, tol )
# }
dat = invertboundarydata( x, boundarydata, tol=tol )
if( is.null(dat) ) return(NULL)
out = data.frame( row.names=rownames(dat) )
out$point = point
out$distance = dat$distance
out$facetidx = boundarydata$facetidx
out$sign = boundarydata$sign
out$pcube = dat$pcube
out$transitions = dat$transitions
return( out )
}
# x the zonohedron
# boundarydata as computed by raytrace()
# the only columns required are: point, facetidx, sign
#
# this function does the real work for invertboundary()
# it is also called from raytrace(), when the arg invert=TRUE
#
# returns a data.frame with the same number of rows, and new columns:
# distance signed distance to the boundary of the zonohedron
# pcube an MxN matrix, where M=nrow(boundarydata) and N=number of points in x$matroid
# each row of the matrix is a point in the n-cube
# that maps to the boundary point on the zonohedron
# transitions the number of transitions - a non-negative even integer
invertboundarydata <- function( x, boundarydata, tol=5.e-14 )
{
ok = is.data.frame(boundarydata) && ! is.null(boundarydata$point) && ! is.null(boundarydata$facetidx) && ! is.null(boundarydata$sign)
if( ! ok )
{
log_level( ERROR, "data.frame argument boundarydata is invalid." )
return(NULL)
}
point = boundarydata$point
matroidsimple = getsimplified(x$matroid)
matrixsimple = getmatrix( matroidsimple )
m = nrow( boundarydata )
nsimp = ncol( matrixsimple )
idxfromground = idxfromgroundfun( matroidsimple$ground )
distance = rep( NA_real_, m )
pcube = matrix( NA_real_, m, nsimp )
tolW = 5.e-9
tolE = 5.e-5
for( k in 1:m )
{
# get facet center and normal, in the centered zonohedron
facetidx = boundarydata$facetidx[k]
normal = boundarydata$sign[k] * x$facet$normal[facetidx, ]
center = boundarydata$sign[k] * x$facet$center[facetidx, ]
# translate given point to centered zonohedron
pcentered = point[k, ] - x$center
if( is.na(facetidx) || is.na(pcentered[1]) ) next
# computed signed distance to the boundary
distance[k] = sum( pcentered * normal ) - x$facet$beta[ facetidx ]
if( tol < abs(distance[k]) ) next # too far from boundary
colidx = idxfromground[ matroidsimple$hyperplane[[ facetidx ]] ]
# cat( "colidx=", colidx, '\n' )
numgen = length( colidx )
# now compute the vector alpha, which has length numgen
# alpha has the coefficients of the edges of the facet
if( numgen == 2 )
{
# facet is a parallelogram, which is the usual case and easier case
edge2 = matrixsimple[ , colidx ] #; cat( "colidx=", colidx, '\n' )
M = cbind( edge2, normal ) #; print( M ) # M is 3x3
y = base::solve( M, pcentered - center ) #; print(y)
y = y[1:2]
# test for inside parallelogram, with a tolerance
ok = all( abs(y) <= 0.5 + tolW )
if( ! ok )
{
if( all( abs(y) <= 0.5 + tolE ) ) # tolE is much bigger than tolW
{
lev = WARN # keep going and translate and clamp
tol2 = tolW
}
else
{
lev = ERROR # this will force stoppage
tol2 = tolE
}
log_level( lev, "Internal problem. y=[%.15g,%.15g] is outside the square [-1/2,1/2]^2. tol2=%g",
y[1], y[2], tol2 )
# next # something went wrong
}
# translate from [-0.5,0.5] to [0,1]
alpha = y + 0.5
# clamp alpha to [0,1]
alpha = pmin( pmax( alpha, 0), 1 )
}
else
{
# facet is a more complex zonogon, with 6 or more sides
zono2D = x$zonogon[[ facetidx ]]
# check sizes
if( numgen != length( getground( getsimplified(zono2D$matroid) ) ) )
{
log_level( ERROR, "Internal error. numgen=%d != %d.",
numgen, length( getground( getsimplified(zono2D$matroid) ) ) )
next # something went wrong
}
A = x$frame3x2[[ facetidx ]]
p2D = as.double( (pcentered - center) %*% A ) + zono2D$center
# cat( "p2D=", p2D, '\n' )
res = invert( zono2D, p2D, tol=tol )
if( is.null(res) ) next # something went wrong
# res$pcube has already been clamped to [0,1]
alpha = as.double( res$pcube )
# cat( "alpha=", alpha, '\n' )
}
# assign the source point in the cube,
# except that the generators corresponding to the edges of this facet are wrong,
# and fixed by overriding with alpha in the next line
pcube[ k, ] = (sign(normal %*% matrixsimple) + 1) / 2
# override the coefficients of the edges of the facet
pcube[ k, colidx ] = alpha
}
# print( pcube )
pcube = invertcubepoints( x, pcube, tol=tol )
if( is.null(pcube) ) return(NULL)
# colnames(pcube) = as.character( x$matroid$ground )
transitions = rep( NA_integer_, m )
for( k in 1:m )
{
if( is.finite( pcube[k,1] ) ) transitions[k] = transitioncount( pcube[k, ] )
}
rnames = rownames(point)
if( is.null(rnames) || anyDuplicated(rnames)!=0 ) rnames = 1:m
out = data.frame( row.names=rnames )
out$point = point
out$distance = distance
#out$facetidx = boundarydata$facetidx
#out$sign = boundarydata$sign
out$pcube = pcube
out$transitions = transitions
if( FALSE )
{
# test the inversion
matorig = getmatrix( x$matroid )
delta = abs( pcube %*% t(matorig) - point)
#cat( "range(delta)=", range(delta), '\n' )
#print( t(matorig) )
#print( delta )
delta = rowSums( delta )
if( any( tol < delta, na.rm=TRUE ) )
{
log_level( WARN, "Inversion test failed. max(delta)=%g > %g=tol",
max(delta,na.rm=TRUE), tol )
}
out$delta = delta
}
return( out )
}
# x a zonohedron object
# gndpair Mx2 integer matrix, where rows define 2 points in the ground set of the simplified matroid
# these define 2 generators of the matroid, and a pgram in the boundary of the zonohedron.
# cube should corresponding point in the source cube be returned
#
# returns a data.frame with M rows and these columns:
# gndpair the given gndpair
# hyperplaneidx the index of the hyperplane that contains gndpair
# center Mx3 matrix with center of the facet, relative to the center of the zonohedron
# transitions the number of transitions - a non-negative even integer
# And if cube is TRUE, then this column
# pcube an MxN matrix, where M=nrow(boundarydata) and N=number of points in x$matroid$simplified
# each row of the matrix is a point in the n-cube
# that maps to the boundary point on the zonohedron
#
# the row.names are set to the index of the corresponding gndpair
boundarypgramdata <- function( x, gndpair, cube=FALSE )
{
if( ! inherits(x,"zonohedron") )
{
log_level( ERROR, "Argument x is invalid. It is not a zonohedron." )
return(NULL)
}
matsimple = getsimplified( x$matroid )
idxfromground = idxfromgroundfun( matsimple$ground )
gndpair = prepareNxM( gndpair, 2 )
if( is.null(gndpair) ) return(NULL)
idxpair = idxfromground[ gndpair ] # this also converts to an integer
dim( idxpair ) = dim( gndpair ) # ; print( idxpair )
if( ! is.integer(gndpair) )
{
gndpair = matsimple$ground[ idxpair ]
dim(gndpair) = dim(idxpair)
}
m = nrow( idxpair )
nsimp = length( matsimple$ground )
# in the next line, invalid idxpair entries lead to NA values in pairidx
pairidx = .Call( C_pairindex, idxpair, nsimp ) #; print( pairidx )
# NA values in pairidx lead to NA values in hyperplaneidx
hyperplaneidx = matsimple$hyperplaneidx[ pairidx ] ;
if( cube ) pcube = matrix( NA_real_, m, nsimp )
transitions = rep( NA_integer_, m )
matrixsimple = getmatrix( matsimple )
for( k in 1:m )
{
# cat( "------------ k=", k, '---------\n' )
hyperidx = hyperplaneidx[k]
if( is.na(hyperidx) ) next
normal = x$facet$normal[hyperidx, ]
colidx = idxfromground[ matsimple$hyperplane[[ hyperidx ]] ] #; print( colidx )
pc = normal %*% matrixsimple #; print( pc[colidx] )
# pc[colidx] should be 0 or nearly 0.
# override the coefficients of the edges of the facet
pc[colidx] = 0
if( FALSE )
{
# check that the zeros of pc[] are exactly colidx
idx = which(pc==0)
ok = length(idx)==length(colidx) && all( idx == colidx )
if( ! ok )
{
log_level( WARN, "internal error. which(pc==0) != colidx" )
}
}
# scale to interval [0,1]
pc = (sign(pc) + 1) / 2 #; print( pc )
transitions[k] = transitioncount( pc )
if( cube ) pcube[k, ] = pc
}
if( anyDuplicated(pairidx)==0 && all(is.finite(pairidx)) )
rnames = pairidx
else
rnames = 1:length(pairidx)
out = data.frame( row.names=rnames )
out$gndpair = gndpair
out$hyperplaneidx = hyperplaneidx
out$center = x$facet$center[ hyperplaneidx, , drop=FALSE]
out$transitions = transitions
if( cube ) out$pcube = pcube
return( out )
}
if( FALSE )
{
# x a zonohedron object
# direction Mx3 matrix, with the M directions in the rows, direction (0,0,0) is invalid
# tol tolerance for argmax, being in the same affine subspace
#
# returns a data.frame with M rows and these columns:
# direction the given matrix of directions
# value the value of the support function of x, in the given direction
# argmax a point on the boundary of x where the max is taken
# dimension of the set argmax, 0 means a vertex and 1 means a facet
#
# value: see support_zonotope()
support.zonohedron <- function( x, direction, tol=5.e-15 )
{
return( support_zonotope(x,direction,tol) )
}
# x a zonohedron whose matroid is simple
#
# replace each generator g by the pair -g/2 , +g/2
symmetrize.zonohedron <- function( x )
{
if( ! is_simple(x$matroid) )
{
log_level( ERROR, "matroid is not simple" )
return(NULL)
}
matgen = getmatrix(x$matroid)
matgen = cbind( -matgen/2, matgen/2 )
gndgen = getground(x$matroid)
gndgen = c( gndgen, gndgen[ length(gndgen) ] + gndgen )
out = zonohedron( matgen, e0=0, e1=0, ground=gndgen )
return(out)
}
minkowskisum.zonohedron <- function( zono1, zono2, e0=0, e1=1.e-6, e2=1.e-10, ground=NULL, ... )
{
if( ! inherits(zono2,"zonohedron") )
{
log_level( ERROR, "2nd argument zono2 is invalid. It's not a zonohedron." )
return(NULL)
}
# get the 2 matrices and cbind them
mat1 = zono1$matroid$matrix
mat2 = zono2$matroid$matrix
out = zonohedron( cbind(mat1,mat2), e0=e0, e1=e1, e2=e2, ground=ground )
return( out )
}
'%+%.zonohedron' <- function(zono1,zono2)
{
return( minkowskisum( zono1, zono2 ) )
}
}
if( FALSE )
{
is_pointed.zonohedron <- function( x )
{
return( 3 <= length(x$facet0) )
}
is_salient.zonohedron <- function( x )
{
return( 0 < length(x$facet0) )
}
}
# x a zonohedron
# gen point in the ground set refering to a generator
# full if TRUE, return both halves of the belt, if FALSE then only return half the belt
# returns a dataframe with 2 columns
# *) point0 m x 2 matrix of edge start points
# *) point1 m x 2 matrix of edge stop points
getbeltedges <- function( x, gen, full=TRUE )
{
matsimp = getsimplified( x$matroid )
gen = as.integer(gen) #; print( str(gen) )
genidx = match( gen, matsimp$ground )
# midpointmat is for the centered zonohedron
midpointmat = .Call( C_beltmidpoints, matsimp$hyperplane, x$beltlist[[genidx]], gen, x$facet$center, x$facet$normal,
matsimp$ground, matsimp$matrix, matsimp$crossprods )
if( is.null(midpointmat) ) return(NULL)
if( full ) midpointmat = rbind( midpointmat, -midpointmat )
.Call( C_plusEqual, midpointmat, x$center, 1L )
edge = matsimp$matrix[ ,genidx]
#point0 = duplicate(midpointmat)
#.Call( C_plusEqual, point0, -0.5*edge, 1L )
point0 = .Call( C_sumMatVec, midpointmat, -0.5*edge, 1L )
#point1 = duplicate(midpointmat)
#.Call( C_plusEqual, point1, 0.5*edge, 1L )
point1 = .Call( C_sumMatVec, midpointmat, 0.5*edge, 1L )
out = data.frame( row.names=1:nrow(midpointmat) )
out$midpointmat = midpointmat
out$point0 = point0
out$point1 = point1
return(out)
}
if( FALSE )
{
# methods taken from:
# Optimal Whitening and Decorrelation
# Agnan Kessy1, Alex Lewin, and Korbinian Strimmer (2016)
#
# returns a new zonohedron, as spherical as possible
spherize.zonohedron <- function( x, method="ZCA", ... )
{
return( spherize_zonotope( x, method=method ) )
}
}
# x a zonohedron
# W a 3x3 invertible matrix. Put matrix on the left, and vector on the right.
lintransform.zonohedron <- function( x, W )
{
if( length(W) == 1 )
W = W * diag(3)
ok = all( dim(W) == c(3,3) )
if( ! ok )
{
log_level( ERROR, "argument W is invalid." )
return(NULL)
}
# verify W is OK
res = try( solve(W), silent=TRUE )
if( class(res)[1] == "try-error" )
{
log_level( ERROR, "matrix W is not invertible." )
return(NULL)
}
Winv = res
# just copy from x to out, and then make selective changes !
out = x
out$matroid = lintransform( x$matroid, W )
if( is.null(out$matroid) ) return(NULL)
out$center = as.double( x$center %*% t(W) )
out$facet$center = x$facet$center %*% t(W)
# compute all the outward pointing edge normals
normal = x$facet$normal %*% Winv
# now unitize
normal = normalizeMatrix( normal, 1L )
out$facet$normal = normal
# calculate the n plane constants beta
# these plane constants are for the centered zonogon
out$facet$beta = .rowSums( normal * out$facet$center, nrow(normal), ncol(normal) )
if( ! is.null(out$frame3x2) )
{
for( k in 1:length(out$frame3x2) )
out$frame3x2[[k]] = W %*% out$frame3x2[[k]]
}
if( ! is.null(out$zonoseg) )
{
# recompute out$zonoseg list completely
out$zonoseg = makezonoseglist( out$matroid )
}
attr( out, "lintransform" ) = W
return( out )
}
initplot3D <- function( zono, bgcol )
{
center = zono$center
white = 2 * center
# start 3D drawing
rgl::bg3d( color=bgcol )
rgl::points3d( 0, 0, 0, col='black', size=10, point_antialias=TRUE )
rgl::points3d( white[1], white[2], white[3], col='white', size=10, point_antialias=TRUE )
rgl::points3d( center[1], center[2], center[3], col='gray50', size=10, point_antialias=TRUE )
# exact diagonal of zono
rgl::lines3d( c(0,white[1]), c(0,white[2]), c(0,white[3]), col=c('black','white'), lwd=3, lit=FALSE )
return(TRUE)
}
# x a zonohedron object
# type 'e' for edges, 'p' for points drawn at the center of each p-face, 'f' for filled faces
# both draw both symmetric halves
plot.zonohedron <- function( x, type='e', pcol=NULL, ecol=NULL, ewd=3, etcol=NA,
fcol=NULL, falpha=1, normals=FALSE, bgcol="gray40", both=TRUE, ... )
{
if( ! requireNamespace( 'rgl', quietly=TRUE ) )
{
log_level( ERROR, "Package 'rgl' cannot be loaded. It is required for plotting the zonohedron." )
return(FALSE)
}
center = x$center
white = 2 * center
# start 3D drawing
rgl::bg3d( color=bgcol )
cube = rgl::scale3d( rgl::cube3d(col="white"), center[1], center[2], center[3] )
cube = rgl::translate3d( cube, center[1], center[2], center[3] )
rgl::points3d( 0, 0, 0, col='black', size=10, point_antialias=TRUE )
rgl::points3d( white[1], white[2], white[3], col='white', size=10, point_antialias=TRUE )
rgl::points3d( center[1], center[2], center[3], col='gray50', size=10, point_antialias=TRUE )
# exact diagonal of box
rgl::lines3d( c(0,white[1]), c(0,white[2]), c(0,white[3]), col=c('black','white'), lwd=3, lit=FALSE )
matsimp = getsimplified( x$matroid )
matgen = getmatrix(matsimp)
numgen = ncol(matgen)
gndgen = getground(matsimp)
edgecoeff = matrix( c( -0.5,-0.5, -0.5,0.5, 0.5,0.5, 0.5,-0.5), 2, 4 ) # 2x4
if( grepl( 'e', type ) )
{
# wireframe
# rgl::wire3d( cube, lit=FALSE )
n = length(gndgen)
if( is.null(ecol) )
colvec = rainbow( n )
else
{
colvec = ecol
m = length(colvec)
if( m < n ) colvec = c( colvec, rep(colvec[m],n-m) ) # extend with the last color
}
for( i in 1:n )
{
edgedf = getbeltedges( x, gndgen[i], full=both )
xyz = rbind( edgedf$point0, edgedf$point1 )
m = nrow(edgedf)
perm = 1:(2*m)
dim(perm) = c(m,2L)
perm = t(perm)
dim(perm) = NULL
# print( perm )
xyz = xyz[ perm, ]
rgl::segments3d( xyz, col=colvec[i], lwd=ewd ) #, front=polymode, back=polymode, col=col, lit=FALSE )
}
facetsNT = length(x$zonogon) # the number of Non-Trivial facets in the zonohedron
if( ! is.na(etcol) && 0<facetsNT )
{
# draw the pgram edges for the tiling of each non-pgram facet
for( i in seq_len(facetsNT) )
{
zono = x$zonogon[[i]]
matrixzono = getmatrix( getsimplified(zono$matroid) )
# rotate into 3D
matrixzono = x$frame3x2[[i]] %*% matrixzono
# find center of all pgram tiles in 3D
center3D = zono$tilingdata$center %*% t( x$frame3x2[[i]] )
# translate each pgram center to the zonogon facet in 3D
center3D = .Call( C_sumMatVec, center3D, x$facet$center[i, ], 1L )
pgrams = nrow(center3D)
step = 4
quadmat = matrix( 0, nrow=step*pgrams, ncol=3 )
for( j in 1:pgrams )
{
col2 = zono$tilingdata$idxpair[j, ]
edge = matrixzono[ , col2 ] # 3x2
k = step*(j-1)
#quadmat[k+1, ] = center3D[j, ] - 0.5 * edge[ , 1] - 0.5*edge[ , 2]
#quadmat[k+2, ] = center3D[j, ] - 0.5 * edge[ , 1] + 0.5*edge[ , 2]
#quadmat[k+3, ] = center3D[j, ] + 0.5 * edge[ , 1] + 0.5*edge[ , 2]
#quadmat[k+4, ] = center3D[j, ] + 0.5 * edge[ , 1] - 0.5*edge[ , 2]
quadmat[ (k+1):(k+4), ] = .Call( C_sumMatVec, t(edge %*% edgecoeff), center3D[j, ], 1L )
}
xyz = .Call( C_sumMatVec, quadmat, x$center, 1L )
if( both )
xyz = rbind( xyz, .Call( C_sumMatVec, -quadmat, x$center, 1L ) )
rgl::quads3d( xyz, col=etcol, lwd=1, front='lines', back='lines', lit=FALSE ) # quad edges
}
}
}
if( grepl( 'p', type ) )
{
colvec = pcol
if( is.null(colvec) )
colvec = c( 'black', 'red' )
else if( length(colvec) == 1 )
colvec = rep( colvec[1], 2 )
# draw first half in 'black'
#xyz = duplicate( x$facet$center )
#.Call( C_plusEqual, xyz, x$center, 1L )
xyz = .Call( C_sumMatVec, x$facet$center, x$center, 1L )
rgl::points3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], col=colvec[1], size=6, point_antialias=TRUE )
if( both )
{
# draw 2nd half in 'red'
#xyz = duplicate( -(x$facet$center) )
#.Call( C_plusEqual, xyz, x$center, 1L )
xyz = .Call( C_sumMatVec, -(x$facet$center), x$center, 1L )
rgl::points3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], col=colvec[2], size=6, point_antialias=TRUE )
}
}
if( grepl( 'f', type ) )
{
# draw filled quads
# make lookup table from ground to column index
idxfromground = integer( gndgen[numgen] )
idxfromground[ gndgen ] = 1:numgen
hyper = matsimp$hyperplane
lenvec = lengths(hyper)
idx2 = which( lenvec==2 )
pgrams = length(idx2)
step = 4
quadmat = matrix( 0, nrow=step*pgrams, ncol=3 )
if( is.null(fcol) )
fcol = c( 'blue', 'red', 'yellow', 'green', 'orange', 'purple' )
# plot the parallelograms
for( j in 1:pgrams )
{
i = idx2[j] # index into hyper *and* facet
center = x$facet$center[i, ]
edge = matgen[ , idxfromground[ hyper[[i]] ] ] # 3x2 matrix
k = step*(j-1)
#quadmat[k+1, ] = center - 0.5 * edge[ , 1] - 0.5*edge[ , 2]
#quadmat[k+2, ] = center - 0.5 * edge[ , 1] + 0.5*edge[ , 2]
#quadmat[k+3, ] = center + 0.5 * edge[ , 1] + 0.5*edge[ , 2]
#quadmat[k+4, ] = center + 0.5 * edge[ , 1] - 0.5*edge[ , 2]
quadmat[ (k+1):(k+4), ] = .Call( C_sumMatVec, t(edge %*% edgecoeff), center, 1L )
}
xyz = .Call( C_sumMatVec, quadmat, x$center, 1L )
if( both )
# add opposite half
xyz = rbind( xyz, .Call( C_sumMatVec, -quadmat, x$center, 1L ) )
rgl::quads3d( xyz, col=fcol[1], alpha=falpha ) #, front=polymode, back=polymode, col=col, lit=FALSE )
# plot the non-parallelograms
for( k in seq_len( length(x$zonogon) ) )
{
zonok = x$zonogon[[k]]
# center the vertices of the zonogon
vertex = .Call( C_sumMatVec, zonok$vertex, -zonok$center, 1L )
# map from 2D to 3D, center remains at 0
vertex = vertex %*% t(x$frame3x2[[k]])
# add the facet center and zonohedron center
xyz = .Call( C_sumMatVec, vertex, x$center + x$facet$center[k, ], 1L )
jmax = which.max( abs(x$facet$normal[k, ]) )
coord = 1:3
coord = coord[-jmax]
numgen = ncol( zonok$matroid$matrix )
col = fcol[ min( numgen-1, length(fcol) ) ]
quadmat = makequads( xyz )
rgl::quads3d( quadmat, col=col, alpha=falpha )
if( both )
{
# draw opposite half
xyz = .Call( C_sumMatVec, vertex, x$center - x$facet$center[k, ], 1L )
quadmat = makequads( xyz )
rgl::quads3d( quadmat, col=col, alpha=falpha )
#rgl::polygon3d( xyz[ ,1], xyz[ ,2], xyz[ ,3], fill=TRUE, coord=coord, col=col, random=FALSE )
}
}
}
if( normals )
{
xyz = .Call( C_sumMatVec, x$facet$center, x$center, 1L )
for( i in 1:nrow(xyz) )
rgl::arrow3d( xyz[i, ], xyz[i, ] + x$facet$normal[i, ], type="lines", col="black" )
if( both )
{
xyz = .Call( C_sumMatVec, -x$facet$center, x$center, 1L )
for( i in 1:nrow(xyz) )
rgl::arrow3d( xyz[i, ], xyz[i, ] - x$facet$normal[i, ], type="lines", col="black" )
}
}
return( invisible(TRUE) )
}
plotpolygon <- function( x, normal=NULL, points=TRUE, labels=TRUE )
{
if( ! inherits(x,"zonohedron") )
{
log_level( ERROR, "Argument x is invalid. It's not a zonohedron." )
return(NULL)
}
if( ! is_pointed(x) )
{
log_level( ERROR, "Cannot plot polygon because the zonohedron is not pointed." )
return( FALSE )
}
if( is.null(normal) )
{
# make a matrix of candidates to try
normalmat = matrix( c(1,1,1, 0,0,1, 0,1,0, 1,0,0), ncol=3, byrow=TRUE )
normalmat = rbind( normalmat, supportingnormal0(x) )
}
else
{
ok = is.numeric(normal) && length(normal)==3 && any( normal!=0 )
if( ! ok )
{
log_level( ERROR, "Argument normal is invalid." )
return(FALSE)
}
normalmat = matrix( normal, nrow=1 )
}
genmat = getmatrix( getsimplified(x$matroid) ) # 3 x N
# try all rows in normalmat
found = FALSE
for( i in 1:nrow(normalmat) )
{
dots = normalmat[i, ,drop=F] %*% genmat
# print( i ) ; print( dots )
if( all( 0 < dots ) )
{
# verified
found = TRUE
normal = normalmat[i, ]
break
}
}
if( ! found )
{
if( is.null(normal) )
log_level( ERROR, "Cannot find a valid normal vector and halfspace." )
else
log_level( ERROR, "The given normal=%g,%g,%g is invalid.", normal[1], normal[2], normal[3] )
return( FALSE )
}
# transpose and rescale genmat
genmat = t(genmat) / as.double(dots) # N x 3 using recycling rule
if( all(normal==c(1,1,1)) )
frame3x2 = matrix( c(1,0,0, 0,1,0), nrow=3, ncol=2 )
else
frame3x2 = frame3x2fun( normal, TRUE )
uv = genmat %*% frame3x2 # N x 2
xlim = range( uv[ ,1] )
ylim = range( uv[ ,2] )
plot( xlim, ylim, type='n', asp=1, lab=c(10,10,7), xlab='u', ylab='v', las=1 )
grid( lty=1 )
abline( h=0, v=0 )
polygon( uv[ ,1], uv[ ,2], col=NA, border='black' )
if( points ) points( uv[ ,1], uv[ ,2], pch=20, cex=0.8 )
if( labels )
{
text( 1.05 * uv[ ,1], 1.05 * uv[ ,2], rownames(genmat) )
}
zononame = deparse(substitute(x))
main = sprintf( "%s\n normal=[%g,%g,%g]", zononame, normal[1], normal[2], normal[3] )
title( main=main, cex.main=0.8 )
return( invisible(TRUE) )
}
print.genlist <- function( x, full=TRUE, ... )
{
zlist = lapply( x, zonohedron )
# names(zlist) = NULL
#print( do.call( summary, zlist ) )
print( summary_from_zlist( zlist, full=full ) )
return( invisible(TRUE) )
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/zonohedron.R |
#
# zonoseg is a 1-dimensional zonotope
#
# implemented as a list with items:
# matroid the matroid for it, which includes the generating matrix, etc.
# segment the actual segment [ segment[1], segment[2] ]
# center midpoint of the segment
# segment2trans the subsegment coming from 2-transition points in the cube
# zonoseg constructor
#
# mat a numeric matrix with 1 row
# e0 threshold for a column vector to be considered 0
zonoseg <- function( mat, e0=0, ground=NULL )
{
if( ! is.matrix(mat) )
{
temp = names(mat)
mat = matrix( mat, nrow=1 )
colnames(mat) = temp
}
ok = is.matrix(mat) && is.numeric(mat) && nrow(mat)==1 && 1<=ncol(mat)
if( ! ok )
{
log_level( ERROR, "mat is invalid." )
return(NULL)
}
matroid = matroid( mat, e0=e0, ground=ground )
if( is.null(matroid) )
return(NULL)
out = list()
class( out ) = c( "zonoseg", "zonotope", class(out) )
out$matroid = matroid
# shift is the sum of all negative entries
# and used later in invert()
#out$shift = sum( mat[ mat < 0 ] )
generator = as.double(mat)
df = dfminmax( generator )
out$segment = as.double(df$value)
names(out$segment) = c("min","max")
out$center = mean( out$segment )
df = dftrans2( generator )
out$segment2trans = as.double(df$value)
names(out$segment2trans) = c("min","max")
return(out)
}
print.zonoseg <- function( x, ... )
{
# convert the 1-row matrix to a plain vector
generator = as.double( x$matroid$matrix )
names(generator) = as.character( x$matroid$ground )
npoints = length(generator)
idxfromground = integer( max(x$matroid$ground) )
idxfromground[ x$matroid$ground ] = 1:npoints
idxloop = idxfromground[ x$matroid$loop ]
maskloop = logical(npoints)
maskloop[ idxloop ] = TRUE
generator[ maskloop ] = 0 # near 0 to exactly 0
maskneg = (generator < 0) # & ! maskloop
maskpos = (0 < generator) # & ! maskloop
mess = sprintf( "%d -- %d negative, %d positive, and %d loops.\n",
npoints, sum(maskneg), sum(maskpos), sum(maskloop) )
cat( "generators: ", mess )
#tmin = sum( generator[maskneg] )
#tmax = sum( generator[maskpos] )
cat( '\n' )
df = dfminmax( generator )
mess = sprintf( "[%g,%g]\n", df$value[1], df$value[2] )
cat( "segment: ", mess )
print( df )
cat( '\n' )
df = dftrans2( generator )
mess = sprintf( "[%g,%g]\n", df$value[1], df$value[2] )
cat( "2-transition subsegment: ", mess )
print( df )
cat( '\n' )
cat( "matroid:\n" )
print( x$matroid )
return( invisible(TRUE) )
}
# x a zonoseg object
# z a M-vector of real numbers, all should be inside the zonoseg segment
#
# returns a data frame with columns:
# z the given vector z
# pcube N x M matrix whose rows are points in the unit cube, that map to the given numbers
# N is the # of generators
#
# this version tries to be as economical as possible, with the fewest number of non-zero coefficients
# the actual code is more verbose, and not "slick"
invert.zonoseg <- function( x, z, tol=0, ... )
{
numpoints = length(z)
ok = is.numeric(z) && 0<numpoints
if( ! ok )
{
log_level( ERROR, "z is invalid." )
return(NULL)
}
generator = as.double( x$matroid$matrix )
#dfmm = dfminmax( generator )
#zmin = dfmm$value[1]
#zmax = dfmm$value[2]
zmin = x$segment[1]
zmax = x$segment[2]
#absgen = abs(generator)
idxneg = which( generator < 0 )
numneg = length(idxneg)
if( 0 < numneg )
{
bkneg = c( cumsum( generator[ idxneg[numneg:1] ] )[ numneg:1 ], 0 ) #; cat( "bkneg=", bkneg, '\n' )
}
idxpos = which( 0 < generator )
numpos = length(idxpos)
if( 0 < numpos ) bkpos = c( 0, cumsum( generator[idxpos] ) )
n = length( generator )
pcube = matrix( NA_real_, numpoints, n )
colnames(pcube) = as.character( x$matroid$ground )
s = numeric( n )
for( i in 1:numpoints )
{
zi = z[i]
inside = zmin-tol <= zi && zi <= zmax+tol
if( ! inside )
{
log_level( WARN, "invert.zonoseg(). zi=%g and segment is [%g,%g]. delta=%g,%g",
zi, zmin, zmax, zi-zmin, zmax-zi )
next
}
if( zi == 0 ) { pcube[i, ] = 0 ; next }
if( zi <= zmin ) { pcube[i, ] = 0 ; pcube[i,idxneg]=1 ; next } # dfmm$pcube[1, ] ;
if( zmax <= zi ) { pcube[i, ] = 0 ; pcube[i,idxpos]=1 ; next } # dfmm$pcube[2, ] }
s[ 1:n ] = 0
if( 0 < zi )
{
# numpos must be nonzero
j = findInterval( zi, bkpos, rightmost.closed=TRUE )
if( 1 < j ) { s[ idxpos[1:(j-1)] ] = 1 }
s[ idxpos[j] ] = (zi - bkpos[j]) / generator[ idxpos[j] ]
}
else if( zi < 0 )
{
# numneg must be nonzero
j = findInterval( zi, bkneg, rightmost.closed=TRUE, left.open=TRUE ) #; cat( "j=", j, '\n' )
if( j < length(bkneg) ) { s[ idxneg[ (j+1):length(bkneg) ] ] = 1 }
s[ idxneg[j] ] = 1 - (zi - bkneg[j]) / (-generator[ idxneg[j] ])
}
masklo = (s < 0)
maskhi = (1 < s)
if( any(masklo | maskhi) )
{
count = sum(masklo)
if( 0 < count )
{
deltamax = max( -s )
log_level( WARN, "invert.zonoseg(). %d output values slightly less than 0 (deltamax=%g) changed to 0.",
count, deltamax )
s[ masklo ] = 0
}
count = sum(maskhi)
if( 0 < count )
{
deltamax = max( s-1 )
log_level( WARN, "invert.zonoseg(). %d output values slightly greater than 1 (deltamax=%g) changed to 1.",
count, deltamax )
s[ maskhi ] = 1
}
}
pcube[i, ] = s
}
if( FALSE )
{
# test it
test = as.double( x$matroid$matrix %*% t(pcube) ) - z
print( range(test) )
}
#rnames = names(z)
#if( is.null(rnames) ) rnames = 1:numpoints
#out = data.frame( row.names=rnames )
out = data.frame( z=z )
# out$z = z
out$pcube = pcube
return( out )
}
inside_zonoseg <- function( x, p )
{
ok = is.numeric(p) && 0<length(p)
if( ! ok )
{
log_level( ERROR, "p is invalid." )
return(NULL)
}
# rnames = names(p)
# if( is.null(rnames) || anyDuplicated(rnames)!=0 )
# rnames = 1:length(p)
# compute distances from x$segment endpoints
distance = pmax( x$segment[1] - p, p - x$segment[2] )
out = data.frame( p=p )
# out$p = p
out$inside = distance <= 0
out$distance = distance
out$idxhyper = 1L # there is only 1 hyperplane, the empty set {}
return( out )
}
getsegment.zonoseg <- function( x )
{
return( x$segment )
}
getsegment2trans.zonoseg <- function( x )
{
return( x$segment2trans )
}
dfminmax <- function( generator )
{
pcube = rbind( ifelse(generator<0,1,0), ifelse(0<generator,1,0) )
#colnames(pcube) = names(generator)
#pcube = rbind( pcube, 0.5 )
out = data.frame( row.names=c('zmin','zmax') )
out$value = pcube %*% generator
out$pcube = pcube
return( out )
}
dftrans2 <- function( generator )
{
# treat special cases
for( pass in 1:2 )
{
if( pass == 1 )
mask = (0 < generator)
else
mask = (0 <= generator) # only different if generator has a 0
dfrun = findRunsTRUE(mask,TRUE)
if( nrow(dfrun) <= 1 )
{
# mask is optimal and has 2 (or 0) transitions, so we are done with little work
source_max = as.numeric(mask) #; print(source_max )
source_min = 1 - source_max #; print( source_min )
source = rbind( source_min, source_max )
colnames(source) = names(generator)
out = data.frame( row.names=c('zmin-2trans','zmax-2trans') )
out$value = source %*% generator
out$source = source
return(out)
}
}
# now we have to do more work
generator2 = rep( generator, 2 )
# find local mins and maxs in cumsum(generator2)
minlocal = integer(0)
maxlocal = integer(0)
genprev = generator2[1]
if( genprev == 0 )
{
# wrap around to find a non-zero one
idx = which( sign(generator2) != 0 )
genprev = generator2[ idx[ length(idx) ] ]
}
n = length(generator2)
for( k in 2:n )
{
gen = generator2[k]
if( gen == 0 ) next # ignore it
if( genprev<0 && 0<gen )
# local min
minlocal = c( minlocal, k-1 )
else if( 0<genprev && gen<0 )
# local max
maxlocal = c( maxlocal, k-1 )
genprev = gen
}
#cat( "minlocal =", minlocal, '\n' )
#cat( "maxlocal =", maxlocal, '\n' )
minmax = expand.grid( minlocal, maxlocal )
colnames(minmax) = c('minlocal','maxlocal')
#print( minmax )
rowdiff = minmax[ ,2] - minmax[ ,1]
minmax = minmax[ 0<rowdiff & rowdiff<n, ] #; print( minmax )
cs = cumsum( generator2 ) #; print(cs)
delta = cs[ minmax[ ,2] ] - cs[ minmax[ ,1] ]
minmax = cbind( minmax, delta ) #; print( minmax )
idx = which.max(delta)[1]
#cat( "argmin=", minmax[idx,1], " argmax=", minmax[idx,2], " sum=", delta[idx], '\n' )
imin = minmax[idx,1] + 1L # the 1L is tricky ! missed it first time
imax = minmax[idx,2]
# now make it wrap around if necessary
run = ( ((imin:imax)-1L) %% length(generator) ) + 1L
source_max = numeric( length(generator) )
source_max[run] = 1 #; print(source_max )
source_min = 1 - source_max #; print( source_min )
source = rbind( source_min, source_max )
out = data.frame( row.names=c('tmin-2trans','tmax-2trans') )
out$value = source %*% generator
out$source = source
return( out )
}
# x a matroid, generated from a matrix
#
# returns a list of zonoseg's
# if there is no matrix, or no multiples, then it returns NULL
makezonoseglist <- function( x )
{
mat = getmatrix( x )
if( is.null(mat) ) return(NULL)
nummultiples = length(x$multiple)
if( nummultiples == 0 ) return(NULL)
gndorig = getground(x)
idxfromgroundORIG = idxfromgroundfun( gndorig )
out = vector( nummultiples, mode='list' )
for( i in 1:nummultiples )
{
cmax = x$multiplesupp$cmax[i]
idxcol = idxfromgroundORIG[ x$multiple[[i]] ]
#cat( "cmax=", cmax, " idxcol=", idxcol, '\n' )
mat1 = mat[ cmax, idxcol, drop=FALSE] # ; print( mat1 )
out[[i]] = zonoseg( mat1, ground=gndorig[idxcol] )
}
return( out )
}
if( FALSE )
{
is_pointed.zonoseg <- function( x )
{
return( 0 %in% x$segment )
}
is_salient.zonoseg <- function( x )
{
return( 0 %in% x$segment )
}
}
if( FALSE )
{
minkowskisum.zonoseg <- function( zono1, zono2, e0=0, ground=NULL, ... )
{
if( ! inherits(zono2,"zonoseg") )
{
log_level( ERROR, "2nd argument zono2 is invalid." )
return(NULL)
}
# get the 2 matrices and cbind them
mat1 = zono1$matroid$matrix
mat2 = zono2$matroid$matrix
mat = cbind(mat1,mat2)
#gnd1 = getground( zono1$matroid )
#gnd2 = getground( zono2$matroid )
#gnd = c( gnd1, gnd1[length(gnd1)] + gnd2 )
out = zonoseg( mat, e0=e0, ground=ground )
return( out )
}
'%+%.zonoseg' <- function(zono1,zono2)
{
return( minkowskisum( zono1, zono2 ) )
}
}
#-------- UseMethod() calls --------------#
getsegment <- function( x )
{
UseMethod("getsegment")
}
getsegment2trans <- function( x )
{
UseMethod("getsegment2trans")
}
##################### deadwood below ###################################
# shift the sum of the negative generators, subtracting this from the zonoseg
# translates it to a "standard" segment [0,X]
# x a zonoseg object
# z a vector of real numbers, all should be inside the zonoseg segment
invert_old.zonoseg <- function( x, z, ... )
{
n = length(z)
ok = is.numeric(z) && 0<n
if( ! ok )
{
log_level( ERROR, "z is invalid." )
return(NULL)
}
generator = as.double( x$matroid$matrix )
dfmm = dfminmax( generator )
zmin = dfmm$value[1]
zmax = dfmm$value[2]
absgen = abs(generator)
bkpnt = c( 0, cumsum(absgen) )
maskneg = (generator < 0)
m = length( x$matroid$ground )
source = matrix( NA_real_, n, m )
colnames(source) = as.character( x$matroid$ground )
s = numeric( m )
for( i in 1:n )
{
zi = z[i]
inside = zmin <= zi && zi <= zmax
if( ! inside ) next
if( zi == zmin ) { source[i, ] = dfmm$source[1, ] ; next }
if( zi == zmax ) { source[i, ] = dfmm$source[2, ] ; next }
zprime = zi - x$shift
j = findInterval( zprime, bkpnt, rightmost.closed=TRUE )
s[ 1:m ] = 0
if( 1 < j ) { s[ 1:(j-1) ] = 1 }
s[ j ] = (zprime - bkpnt[j]) / absgen[j]
# negative generators are special
s[maskneg] = 1 - s[maskneg]
source[i, ] = s
}
rnames = names(z)
if( is.null(rnames) ) rnames = 1:n
out = data.frame( row.names=rnames )
out$z = z
out$source = source
return( out )
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/zonoseg.R |
# zonotope.R
#
# functions common to zonoseg, zonogon, and zonohedron
getmatrix.zonotope <- function( x )
{
return( x$matroid$matrix )
}
getmatroid.zonotope <- function( x )
{
return( x$matroid )
}
getcenter.zonotope <- function( x )
{
return( x$center )
}
minkowskisum.zonotope <- function( zono1, zono2, e0=0, e1=1.e-6, e2=1.e-10, ground=NULL, ... )
{
# get the 2 matrices and cbind them
mat1 = zono1$matroid$matrix
mat2 = zono2$matroid$matrix
m1 = nrow(mat1)
m2 = nrow(mat2)
if( m1 != m2 )
{
log_level( ERROR, "dimension mismatch m1 = %d != %d = m2.", m1, m2 )
return(NULL)
}
mat = cbind(mat1,mat2)
colnames(mat) = NULL
if( is.null(ground) )
{
# concat g1 and translated g2
g1 = getground(zono1$matroid)
g2 = getground(zono2$matroid)
ground = c( g1, g1[ length(g1) ] + g2 )
}
if( m1 == 3 )
out = zonohedron( mat, e0=e0, e1=e1, e2=e2, ground=ground )
else if( m1 == 2 )
out = zonogon( mat, e0=e0, e1=e1, ground=ground )
else if( m1 == 1 )
out = zonoseg( mat, e0=e0, ground=ground )
else
{
log_level( ERROR, "dimension m1 = %d is invalid.", m1 )
return(NULL)
}
return( out )
}
'%+%.zonotope' <- function(zono1,zono2)
{
return( minkowskisum( zono1, zono2 ) )
}
is_pointed.zonotope <- function( x )
{
m = nrow( getmatrix(x) )
if( m == 3 )
# zonohedron
return(3 <= length(x$facet0))
else if( m == 2 )
# zonogon
return(length(x$facet0) == 2)
else if( m == 1 )
# zonoseg
return(0 %in% x$segment)
log_level( ERROR, "m = %d is invalid.", m )
return( NULL )
}
is_salient.zonotope <- function( x )
{
m = nrow( getmatrix(x) )
if( m == 3 )
# zonohedron
return( 0 < length(x$facet0) )
else if( m == 2 )
# zonogon
return( 0 < length(x$facet0) )
else if( m == 1 )
# zonoseg
return(0 %in% x$segment)
log_level( ERROR, "m = %d is invalid.", m )
return( NULL )
}
# support.zonotope()
#
# x a zonogon or zonohedron object
# direction MxD matrix, with the M directions in the rows, direction (0,0) is invalid
# tol tolerance for argmax, being in the same affine subspace
#
# returns a data.frame with M rows and these columns:
# direction the given matrix of directions
# value the value of the support function of x, in the given direction
# argmax a point on the boundary of x where the max is taken
# dimension of the set argmax, 0 means a vertex and 1 means an edge, and 2 means a face (of zonohedron)
support.zonotope <- function( x, direction, tol=5.e-15 )
{
genmat = getmatrix( getsimplified(x$matroid) )
direction = prepareNxM( direction, nrow(genmat) )
if( is.null(direction) ) return(NULL)
n = ncol(genmat)
m = nrow(direction)
value = rep( NA_real_, m )
argmax = matrix( NA_real_, m, nrow(genmat) )
dimension = rep( NA_integer_, m )
functionalmat = direction %*% genmat # mxn
if( 0 < tol )
# make entries close to 0, exactly 0
functionalmat[ which( abs(functionalmat) <= tol, arr.ind=TRUE ) ] = 0
for( i in 1:m )
{
functional = functionalmat[i, ]
fun0 = (functional==0)
if( all(fun0) ) next # ignore the 0 functional
pcube = 0.5*sign(functional) # cube here is [-1/2,1/2]^N
z = as.double(genmat %*% pcube) + x$center # z is in the non-centered zonogon
value[i] = sum( direction[i, ] * z )
argmax[i, ] = z
dimension[i] = sum( fun0 )
}
rnames = rownames(direction)
if( is.null(rnames) || anyDuplicated(rnames)!=0 ) rnames = 1:m
out = data.frame( row.names=rnames )
out$direction = direction
out$value = value
out$argmax = argmax
out$dimension = dimension
return(out)
}
# x a zonotope whose matroid is simple
#
# replace each generator g by the pair -g/2 , +g/2
symmetrize.zonotope <- function( x, e0=0, e1=1.e-6, e2=1.e-10, ... )
{
if( ! is_simple(x$matroid) )
{
log_level( ERROR, "matroid is not simple" )
return(NULL)
}
matgen = getmatrix(x$matroid)
matgen = cbind( matgen/2, -matgen/2 )
colnames(matgen) = NULL
gndgen = getground(x$matroid)
gndgen = c( gndgen, gndgen[ length(gndgen) ] + gndgen )
m = nrow( matgen )
if( m == 3 )
out = zonohedron( matgen, e0=e0, e1=e1, e2=e2, ground=gndgen )
else if( m == 2 )
out = zonogon( matgen, e0=e0, e1=e1, ground=gndgen )
else if( m == 1 )
out = zonoseg( matgen, e0=e0, ground=gndgen )
else
{
log_level( ERROR, "dimension m = %d is invalid.", m )
return(NULL)
}
return(out)
}
# methods taken from:
# Optimal Whitening and Decorrelation
# Agnan Kessy1, Alex Lewin, and Korbinian Strimmer (2016)
#
# returns a new zonogon or zonohedron, as spherical as possible
spherize.zonotope <- function( x, method="ZCA", ... )
{
M = x$matroid$rank
if( M == 1 )
{
log_level( WARN, "Cannot spherize a zonoseg, returning the original zonoseg." )
return( x )
}
full = c("ZCA","PCA-COR")
idx = pmatch( toupper(method), full )
ok = is.finite(idx) && length(idx)==1
if( ! ok )
{
log_level( ERROR, "method='%s' is invalid.", method )
return(NULL)
}
method = full[idx]
center = x$facet$center
center = rbind( center, -center ) # add the other half, by symmetry
res = base::svd( center, nu=0, nv=M )
if( ! all( 0 < res$d ) )
{
log_level( ERROR, "Internal error. center matrix is invalid." )
return(NULL)
}
if( method == "ZCA" )
{
# calculate the "whitening", or "sphering" matrix, which is MxM
W = res$v %*% diag( 1/res$d ) %*% t(res$v)
}
else if( method == "PCA-COR" )
{
sigma = t(center) %*% center
e = diag( sigma ) #; print(e)
P = diag( 1/sqrt(e) ) %*% sigma %*% diag( 1/sqrt(e) )
#cat( "P=", P, '\n' )
decomp = eigen( P, symmetric=TRUE ) #; print( decomp )
G = decomp$vectors
theta = decomp$values
Ptest = G %*% diag(theta) %*% t(G)
#cat( "Ptest=", P, '\n' )
W = diag( sqrt(1/theta) ) %*% t(G) %*% diag( sqrt(1/e) )
#W = t(W)
}
# test the W
#centerp = center %*% t(W) #; print( t(centerp) %*% centerp )
out = lintransform( x, W )
attr( out, "sphering" ) = W
return( out )
}
#-------- UseMethod() calls --------------#
spherize <- function( x, ... )
{
UseMethod("spherize")
}
symmetrize <- function( x, ... )
{
UseMethod("symmetrize")
}
lintransform <- function( x, W )
{
UseMethod("lintransform")
}
is_pointed <- function( x )
{
UseMethod('is_pointed')
}
is_salient <- function( x )
{
UseMethod('is_salient')
}
inside <- function( x, p )
{
UseMethod("inside")
}
raytrace <- function( x, base, direction, ... )
{
UseMethod("raytrace")
}
section <- function( x, normal, beta, ... )
{
UseMethod("section")
}
invert <- function( x, z, ... )
{
UseMethod("invert")
}
invertboundary <- function( x, point, tol )
{
UseMethod("invertboundary")
}
support <- function( x, direction, tol=5.e-15 )
{
UseMethod("support")
}
'%+%' <- function(zono1,zono2)
{
UseMethod('%+%')
}
minkowskisum <- function(zono1,zono2,...)
{
UseMethod("minkowskisum")
}
getnormal <- function( x, ... )
{
UseMethod("getnormal")
}
getmetrics <- function( x )
{
UseMethod("getmetrics")
}
getmatrix <- function( x )
{
UseMethod('getmatrix')
}
getmatroid <- function( x )
{
UseMethod("getmatroid")
}
getcenter <- function( x )
{
UseMethod("getcenter")
}
canonicalboundary <- function( x, gndpair, cube=FALSE )
{
UseMethod("canonicalboundary")
}
# x a zonotope object
# p an Mx3 matrix, etc.
#
# value see inside_zonotope()
inside.zonotope <- function( x, p )
{
m = nrow( getmatrix(x) )
if( m == 1 )
{
# a zonoseg is special
return( inside_zonoseg(x,p) )
}
p = prepareNxM( p, m )
if( is.null(p) ) return(NULL)
# translate p to the centered zonohedron
# subract x$center from every row of p
# gcentered = p - matrix( x$center, nrow(p), m, byrow=TRUE ) #; print(gcentered)
#gcentered = duplicate( p )
#res = .Call( C_plusEqual, gcentered, -x$center, 1L ) # gcentered point_centered in place
#if( is.null(res) ) return(NULL)
gcentered = .Call( C_sumMatVec, p, -x$center, 1L )
hg = tcrossprod( x$facet$normal, gcentered ) #; print( str(hg) )
out = inside_zonotope( x, p, hg )
return( out )
}
# inside_zonotope()
#
# x a zonogon or zonohedron object
# p Mx2 matrix, with the M query points in the rowSums
# gcentered Mx2 matrix the same as p, but after subtracting the center of x$center
# hg NxM matrix, where N is the number of facets/hyperplanes of getsimplified(x).
# hg = x$facet$normal %*% t(gcentered) = tcrossprod( x$facet$normal, gcentered )
# Nx2 %*% 2xM
#
# value a dataframe with columns
# p the given Mx2 input matrix
# inside TRUE means inside the closed zonotope - interior OR boundary
# distance numeric signed distance to zonogon boundary
# negative or 0 means inside, and positive means in the exterior
# NOTE: if positive then the distance is only approximate.
# idxhyper the index, in the simplified matroid, of the critical facet
inside_zonotope <- function( x, p, hg )
{
distance = abs(hg) - matrix( x$facet$beta, nrow(hg), ncol(hg) ) #; print(distance)
if( TRUE )
{
data = .Call( C_whichMaxMatrix, distance, 2L )
idx = data[[1]] #; print( idx )
distance = data[[2]] #; print( distance )
}
else if( FALSE )
{
dtrans = t(distance)
idx = max.col(dtrans)
distance = dtrans[ cbind( 1:nrow(dtrans), idx ) ]
}
else
{
myfun <- function(z) { idx=which.max(z) ; return( c(idx,z[idx]) ) }
data = apply( distance, 2, myfun ) #function(z) {suppressWarnings( max(z,na.rm=TRUE) ) } ) #; print(distance)
idx = as.integer( data[1, ] )
distance = data[2, ]
}
# distance[ ! is.finite(distance) ] = NA_real_
if( is_salient(x) )
{
# special override for black
black = apply( p, 1, function(v) { isTRUE(all(v==0)) } ) #; print(black)
if( any(black) )
distance[black] = 0
# special override for white. Fortunately multiplication by 0.5 and 2 preserves all precision.
white = apply( p, 1, function(v) { isTRUE(all(v==2*x$center)) } ) #; print(white)
if( any(white) )
distance[white] = 0
}
rnames = rownames(p)
if( is.null(rnames) || anyDuplicated(rnames)!=0 ) rnames = 1:nrow(p)
#gndgen = getground( getsimplified(x$matroid) )
out = data.frame( row.names=rnames )
out$p = p
out$inside = distance <= 0
out$distance = distance
out$idxhyper = idx
return(out)
}
# x a zonogon or zonohedron
# pmat P x N matrix of points in the N-cube, where N is the number of generators of the original matroid
#
# return P x M matrix of points in the M-cube, where M is the number of generators of the simplified matroid
# the new M-D point maps the same point in the zonogon or zonohedron as the given N-point,
# except for the possible offset from the original zono* and the simplified zono*
# the offset happens when there is a multiple group with "mixed" generators
projectcubepoints <- function( x, pmat )
{
ok = is.matrix(pmat) && is.numeric(pmat)
if( ! ok )
{
log_level( ERROR, "matrix pmat is invalid." )
return(NULL)
}
matorg = getmatrix( x$matroid )
n = ncol(matorg)
if( ncol(pmat) != n )
{
log_level( ERROR, "matrix pmat has ncol(pmat)=%d != %d.", ncol(pmat), n )
return(NULL)
}
if( is_simple(x$matroid) ) return( pmat ) # no change !!
gndorg = getground( x$matroid )
matsimp = getmatrix( getsimplified(x$matroid) )
m = ncol(matsimp)
idxfromgroundORIG = idxfromgroundfun( gndorg )
nummultiples = length(x$matroid$multiple)
if( nrow(x$matroid$multiplesupp) != nummultiples )
{
log_level( ERROR, "nrow(x$matroid$multiplesupp)=%d != %d.",
nrow(x$matroid$multiplesupp), nummultiples )
return(NULL)
}
multiplesupp = x$matroid$multiplesupp
#if( 0 < nummultiples && any( multiplesupp$mixed ) )
# {
# log_level( ERROR, "Cannot project points, because the matroid has a multiple group with mixed directions." )
# return(NULL)
# }
if( ! is.null( x$zonoseg ) )
{
# zonoseg list is already computed
#cat( "found zonoseg[[]] list.\n" )
zonoseg = x$zonoseg
}
else if( 0 < nummultiples )
{
# compute the zonoseg list now
zonoseg = makezonoseglist( x$matroid )
}
affectedORIG = logical(n)
affectedORIG[ idxfromgroundORIG[ x$matroid$loop ] ] = TRUE
affectedORIG[ idxfromgroundORIG[ fastunion(x$matroid$multiple) ] ] = TRUE
#cat( "affectedORIG=", affectedORIG, '\n' )
affectedSIMP = logical(m)
affectedSIMP[ multiplesupp$colidx ] = TRUE
#cat( "affectedSIMP=", affectedSIMP, '\n' )
points = nrow(pmat)
out = matrix( NA_real_, points, m )
rownames(out) = rownames(pmat)
colnames(out) = colnames(matsimp)
for( k in 1:points )
{
if( ! is.finite( pmat[k,1] ) ) next # bad point
pcube = pmat[k, ]
#cat( "simple pcube=", pcube, '\n' )
# ok = all( 0 <= pcube & pcube <= 1 )
pout = rep( NA_real_, m )
# assign the unaffected coords, which we hope are in the majority
pout[ ! affectedSIMP ] = pcube[ ! affectedORIG ]
# handle the multiple groups
for( i in seq_len(nummultiples) )
{
#cat( "multiple ", i, '\n' )
#cmax = multiplesupp$cmax[i]
mat1 = getmatrix( zonoseg[[i]] )
#idxcol = idxfromgroundORIG[ x$matroid$multiple[[i]] ]
seg = getsegment( zonoseg[[i]] )
zmin = seg[1]
zmax = seg[2]
# compute point z in the zonoseg.
#cmax = multiplesupp$cmax[i]
idxcol = idxfromgroundORIG[ x$matroid$multiple[[i]] ]
#cat( "cmax=", cmax, " idxcol=", idxcol, '\n' )
#mat1 = matorg[ cmax, idxcol, drop=FALSE] # ; print( mat1 )
z = as.double( mat1 %*% pcube[idxcol] ) # z is just a scalar
# compute lambda from z
# lambda=0 -> minor and lambda=1 -> major
# lambda = ( z - multiplesupp$minor[i,cmax] ) / ( multiplesupp$major[i,cmax] - multiplesupp$minor[i,cmax] )
# lambda = z / multiplesupp$major[i,cmax]
# map interval [zmin,zmax] in the zonoseg to [0,1] in cube
pout[ multiplesupp$colidx[i] ] = (z - zmin) / (zmax - zmin)
# cat( "z=", z, " x =", (z-zmin)/(zmax-zmin), " multiplesupp$colidx[i]=", multiplesupp$colidx[i], '\n' )
}
# loops in the original matroid can be ignored, they are dropped here
out[k, ] = pout
}
return( out )
}
# x a zonogon or zonohedron
# pmat P x M matrix of points in the M-cube, where M is the number of generators of the simplified matroid
# tol boundary tolerance
#
# return P x N matrix of points in the N-cube, where N is the number of generators of the original matroid
# the new N-point maps the same point in the zonogon or zonohedron as the given M-point,
# except for the offset from the original zono* and the simplified zono*
invertcubepoints <- function( x, pmat, tol=5.e-15 )
{
ok = is.matrix(pmat) && is.numeric(pmat)
if( ! ok )
{
log_level( ERROR, "matrix pmat is invalid." )
return(NULL)
}
matsimp = getmatrix( getsimplified(x$matroid) )
if( ncol(pmat) != ncol(matsimp) )
{
log_level( ERROR, "matrix pmat has ncol(pmat)=%d != %d.", ncol(pmat), ncol(matsimp) )
return(NULL)
}
if( is_simple(x$matroid) ) return( pmat ) # no change !!
matorg = getmatrix( x$matroid )
gndorg = getground( x$matroid )
idxfromgroundORIG = idxfromgroundfun( gndorg )
#cat( "idxfromgroundORIG=", idxfromgroundORIG, '\n' )
nummultiples = length(x$matroid$multiple)
if( nrow(x$matroid$multiplesupp) != nummultiples )
{
log_level( ERROR, "nrow(x$matroid$multiplesupp)=%d != %d.",
nrow(x$matroid$multiplesupp), nummultiples )
return(NULL)
}
multiplesupp = x$matroid$multiplesupp
#if( 0 < nummultiples && any( multiplesupp$mixed ) )
# {
# log.string( ERROR, "Cannot invert points, because the matroid has a multiple group with mixed directions." )
# return(NULL)
# }
if( ! is.null( x$zonoseg ) )
{
# zonoseg list is already computed
#cat( "found zonoseg[[]] list.\n" )
zonoseg = x$zonoseg
}
else if( 0 < nummultiples )
{
# compute the zonoseg list now
zonoseg = makezonoseglist( x$matroid )
}
n = length(gndorg)
affectedORIG = logical(n)
affectedORIG[ idxfromgroundORIG[ x$matroid$loop ] ] = TRUE
affectedORIG[ idxfromgroundORIG[ fastunion(x$matroid$multiple) ] ] = TRUE
#cat( "affectedORIG=", affectedORIG, '\n' )
affectedSIMP = logical(ncol(pmat))
affectedSIMP[ multiplesupp$colidx ] = TRUE
#cat( "affectedSIMP=", affectedSIMP, '\n' )
# make simple lookup tables
jnext = c( 2:n, 1L )
jprev = c( n, 1:(n-1L) )
points = nrow(pmat)
m = ncol(pmat)
out = matrix( NA_real_, points, n )
rownames(out) = rownames(pmat)
colnames(out) = as.character( x$matroid$ground )
for( k in 1:points )
{
if( ! is.finite( pmat[k,1] ) ) next # bad point
pcube = pmat[k, ]
#cat( "simple pcube=", pcube, '\n' )
# ok = all( 0 <= pcube & pcube <= 1 )
pout = numeric(n) # rep( NA_real_, n )
# assign the unaffected coords, which we hope are in the majority
pout[ ! affectedORIG ] = pcube[ ! affectedSIMP ]
# handle the multiple groups
for( i in seq_len(nummultiples) )
{
#cat( "multiple ", i, '\n' )
j = multiplesupp$colidx[i] # j is in 1:m
s = pcube[ j ]
sprev = pcube[ (j-2) %% m + 1L ]
snext = pcube[ (j) %% m + 1L ]
# compute point s in the zonoseg. 0 -> minor and 1 -> major
# cmax = multiplesupp$cmax[i]
# s = (1-lambda)*multiplesupp$minor[i,cmax] + lambda*multiplesupp$major[i,cmax]
#cat( "lambda=", lambda, " s=", s, '\n' )
# lift/invert s to the original cube
# lift = invert( zonoseg[[i]], s, tol=tol )
#print( lift )
#print( idxfromgroundORIG[ x$matroid$multiple[[i]] ] )
vec = invertval( zonoseg[[i]], s, sprev, snext, tol=tol )
if( is.null(vec) ) return(NULL)
pout[ idxfromgroundORIG[ x$matroid$multiple[[i]] ] ] = vec # lift$pcube
}
# handle the loops, which we set to 0 or 1
# but to minimize the # of transitions,
# we prefer 0 in some cases, and 1 in other cases
for( i in x$matroid$loop )
{
j = idxfromgroundORIG[ i ]
pp = pout[ jprev[j] ]
pn = pout[ jnext[j] ]
if( pp==0 || pp==1 )
# on boundary of square
pout[j] = pp
else if( pn==0 || pn==1 )
# also on boundary of square
pout[j] = pn
else
{
# in interior of square, find out whether 0s or 1s are dominant
if( sum( pout==0 ) < sum( pout==1 ) )
# 1s are dominant so choose 0
pout[j] = 0
else
# 0s are dominant so choose 1
pout[j] = 1
}
}
out[k, ] = pout
}
return( out )
}
# zono the zonoseg,
# s value to invert, in [0,1]
# sprev previous value in some hi-dimensional cube, in [0,1]
# snext next value in some hi-dimensional cube, in [0,1]
# tol tolerance passed to zonoseg.invert
#
# returns a point in cube corresponding to zono
invertval <- function( zono, s, sprev, snext, tol=5.e-15 )
{
# get the number of generators
n = length( zono$matroid$ground )
out = numeric( n )
lambda = lambdavec( sprev, snext ) #; print(lambda)
zmin = getsegment(zono)[1] # if the generators are not mixed, zmin is 0
zmax = getsegment(zono)[2]
if( 0 < lambda[1] )
{
# this is the increasing part
# convert decreasing to increasing by taking complements
lift = invert( zono, s*zmin + (1-s)*zmax, tol=tol )
out = out + lambda[1] * (1 - lift$pcube)
}
if( 0 < lambda[2] )
{
# this is the decreasing part
lift = invert( zono, (1-s)*zmin + s*zmax, tol=tol )
out = out + lambda[2] * lift$pcube
}
return( out )
}
# zono a zonotope that is salient, which means that 0 is in the boundary
#
# returns a normal vector so that the zonotope is in the non-negative closed halfspace
# if zono is also pointed, then the zonotope is in the positive open halfspace (except for 0 itself)
supportingnormal0 <- function( zono )
{
matgen = getmatrix( getsimplified(zono$matroid) )
m = nrow( matgen )
if( ! is_salient(zono) ) return( rep(NA_real_,m) )
if( m == 1 )
{
# this is a zonoseg and *not* mixed, so all generators are the same sign, or 0
return( sign( sum(matgen) ) )
}
# get the normals for all facets that meet 0
# normal0 is Fxm where F is the number of these facets
normal0 = zono$facet$normal[ zono$facet0, , drop=FALSE ]
# unfortunately we do no know whether these normals are inward are outward
# use the center to orient them all inward
interiorvec = zono$center
test = normal0 %*% interiorvec
dim(test) = NULL
# in the next line, sign(test) is recycled to all columns of normal0
normal0 = sign(test) * normal0 # use recyling rule
# normal0 now has rows that all point inward
# take their sum
out = .colSums( normal0, nrow(normal0), ncol(normal0) )
# unitize
out = out / sqrt( sum(out^2) )
if( TRUE && is_pointed(zono) )
{
# verify the result
test = out %*% matgen
if( ! all( 0 < test ) )
{
log_level( FATAL, "computed normal vector is invalid. %d of %d generators failed the test.",
sum(test<=0), length(test) )
return(NULL)
}
}
return( out )
}
# xprev, xnext point in the unit square, not verified
#
# returns a pair of coefficent weights, suitable for a convex combination
#
lambdavec <- function( xprev, xnext )
{
denom = xprev*(1-xprev) + xnext*(1-xnext)
lambda = numeric(2)
if( denom == 0 )
{
# at a corner of the square
if( xprev==0 && xnext==1 )
# limit is 1
lambda[1] = 1
else if( xprev==1 && xnext==0 )
# limit is 0
lambda[1] = 0
else
# no limit at (0,0) and (1,1), so just choose something valid
return( c(0,1) )
}
else
{
# not a corner of the square
lambda[1] = ( (1-xprev) * xnext * ((1-xnext) + xprev) ) / denom
}
lambda[2] = 1 - lambda[1]
return( lambda )
}
| /scratch/gouwar.j/cran-all/cranData/zonohedra/R/zonotope.R |
## ----setup, include=FALSE---------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=120 )
## ---- echo=FALSE, results='asis'----------------------------------------------
options(old_opt)
sessionInfo()
| /scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/matroids.R |
---
title: "Matroids"
author: "Glenn Davis"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
toc_depth: 2
number_sections: false
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
# csl: institute-of-mathematical-statistics.csl
# csl: transactions-on-mathematical-software.csl
vignette: >
%\VignetteIndexEntry{Matroids}
%\VignetteEngine{knitr::rmarkdown}
---
```{css, echo=FALSE}
body {
max-width: 750px; /* make a little wider, default is 700px */
}
```
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=120 )
```
<br><br>
# Introduction
The focus of this vignette is the `zonohedron()` constructor
and specifically its tolerance argument `e2`,
whose default value is `1.e-10`.
One goal of the **zonohedra** package is to handle all possible
zonogon facets, not just the parallelograms in the generic case.
The input to the constructor is matrix whose columns are the generators
of the zonohedron.
The generators of a specific facet span a plane, and adding another
generator increases the span to all of $\mathbb{R}^3$.
Stated another way, the set of generators of a specific facet has rank 2,
and is maximal with respect to this property.
So a naive way of determining the facets is to examine *all* subsets
of the generators and determine whether each one has this property.
This is hopelessly impractical.
Moreover, although the rank function is well-defined for matrices with
numbers in $\mathbb{R}$,
it is not computationally meaningful for floating-point numbers.
For example, if a set of floating-point vectors spans the xy-plane,
their rank is unambiguously 2; the smallest singular value is 0.
But if the set is given a random rotation,
the smallest singular value will be very small, but non-zero.
Some sort of tolerance is needed.
The central dogma is that there are vector generators in $\mathbb{R}^3$
that are very close to the given (dyadic rational floating point) vectors,
and have actual rank 2.
The package does a feasibility test that the floating point generators
could have come from true real vectors.
This test comes from the axioms of matroid theory.
The facet-finding method chosen for `zonohedron()` does not use rank,
but it also requires a tolerance - the argument `e2`.
The computational steps in `zonohedron()` are:
<ol>
<li>
Eliminate the zero generators; argument `e0` is used here
</li>
<li>
Unify the non-zero generators that are multiples of each other;
argument `e1` is used here.
Every set of two distinct generators $\{ v_i, v_j \}$ now has rank 2,
so their cross-product $v_i \times v_j \neq 0$.
</li>
<li>
Compute all pairwise cross-products of the generators,
and unitize them to the unit sphere.
For generators $v_i$ and $v_j$, denote the unit vector by
$u_{i,j} := v_i \times v_j / || v_i \times v_j ||$.
</li>
<li>
Perform a cluster analysis for the unitized cross-products,
using `e2` as a "pseudo-angular" threshold.
Special measures are taken so that vector $u_{i,j}$ is considered
identical to $-u_{i,j}$.
</li>
<li>
for each cluster of unit vectors,
take all the generators associated with this cluster and call them
the generators of a pair of antipodal facets.
Most of the clusters have only one unit vector,
and thus only 2 generators of antipodal parallelogram facets.
But some facets may have 3 or even more generators.
</li>
<li>
Perform a feasibility test on these subsets of generators,
and if the test fails, the zonohedron is invalid and the constructor fails.
This test depends on the hyperplane axioms of matroid theory,
and is outlined in the rest of the vignette.
</li>
</ol>
<br><br>
# Rank Functions
Let $E$ be a finite set of vectors in $\mathbb{R}^n$.
For any $A \subseteq E$ the _rank function_
$r(A) := \operatorname{dim}( \operatorname{span}(A) )$
has these properties:
<ul style="line-height: 2em; margin-bottom: 15px">
<li style="list-style: none">
(R1) $0 \le r(A) \le |A|$ (_cardinality bound_)
</li>
<li style="list-style: none">
(R2) If $A \subseteq B$, then $r(A) \le r(B)$ (_monotonicity_)
</li>
<li style="list-style: none">
(R3) $r(A \cup B) + r(A \cap B) \le r(A) + r(B)$ (_submodularity_)
</li>
</ul>
If $E$ is changed to be just a set of abstract _points_,
then an integer-valued function defined on subsets of
$E$ that satisfies the axioms
(R1), (R2), and (R3) defines a _matroid_ on the _ground set_ $E$.
The _rank_ of the matroid is defined to be $r(E)$.
We mostly follow references @Welsh1976 and @White1986.
A given matroid $M$ may not be represented by a set of vectors in $\mathbb{R}^n$.
But if it _is_, we say that $M$ is _representable over_ $\mathbb{R}$.
We also say that $M$ is a _vector matroid_.
From (R1) it follows that a point has rank 0 or 1.
A point of rank 0 is called a _loop_;
in a vector matroid a loop corresponds to the 0 vector.
A _multiple group_ is a subset of size 2 or more,
which has rank 1, and with all points of rank 1,
and which is maximal.
In a vector matroid a multiple group is a maximal set of
2 or more non-zero vectors
that are all multiples of each other.
A _simple matroid_ is a matroid with no loops or multiple groups.
A rank function is defined for every subset of $E$,
and is much too large to deal with directly.
Matroid theory provides more efficient alternatives.
<br><br>
# Matroid Hyperplanes
In a matroid $M$ on a ground set $E$, a _hyperplane_ is a maximal subset
$H \subseteq E$ with $r(H)=r(E)-1$.
One can show that the set of hyperplanes has these properties:
<ul style="line-height: 2em; margin-bottom: 15px">
<li style="list-style: none">
(H1)
$E$ is not a hyperplane (_nontriviality_)
</li>
<li style="list-style: none">
(H2)
if $H_1$ and $H_2$ are hyperplanes and $H_1 \subseteq H_2$,
then $H_1 = H_2$ (_incomparability_)
</li>
<li style="list-style: none">
(H3)
if $H_1$ and $H_2$ are distinct hyperplanes and $x \in E$,
then there is a hyperplane $H_3$ with
$(H_1 \cap H_2) \cup x \subseteq H_3$ (_covering_)
</li>
</ul>
For a proof see @Welsh1976 p. 39.
Conversely,
if a collection of subsets of $E$ satisfies the axioms (H1), (H2) and (H3),
then the collection defines a valid rank function and a matroid on $E$.
To do this, first define the _corank_ function $c()$ by:
\begin{equation}
c(A) := \max \Bigl\{ k : \text{there are hyperplanes } H_1,..., H_k
\text{ where for all } j,
A \subseteq H_j \text{ and } H_1 \cap ... \cap H_{j-1} \nsubseteq H_j \Bigr\}
\end{equation}
And now define $r(A) := c(\varnothing) - c(A)$.
This function $r()$ satisfies the axioms (R1), (R2), and (R3).
The above formula appears in @White1986 p. 306, without a proof.
Given a collection of hyperplanes, checking the hyperplane axioms
(H1), (H2), and (H3)
is more efficient than checking the rank function axioms
(R1), (R2), and (R3),
but _still_ too time-consuming in practice.
<br><br>
# Matroid Circuits
In a matroid $M$ on a ground set $E$, a _circuit_ is a subset
$C \subseteq E$ with
$r(C)=|C|-1$ and $r(C - x) = r(C)$ for all $x \in C$.
One can show that the set of circuits has these properties:
<ul style="line-height: 2em; margin-bottom: 15px">
<li style="list-style: none">
(C1)
$\varnothing$ is not a circuit (_nontriviality_)
</li>
<li style="list-style: none">
(C2)
if $C_1$ and $C_2$ are circuits and $C_1 \subseteq C_2$,
then $C_1 = C_2$ (_incomparability_)
</li>
<li style="list-style: none">
(C3)
if $C_1$ and $C_2$ are distinct circuits and $x \in E$,
then there is a circuit
$C_3 \subseteq(C_1 \cup C_2) - x$ (_weak elimination_)
</li>
</ul>
For a proof see @Welsh1976 p. 9.
Conversely,
if a collection of subsets of $E$ satisfies the axioms (C1), (C2) and (C3),
then the collection defines a valid rank function and a matroid on $E$.
\begin{equation}
r(A) := |A| - \max \Bigl\{ k : \text{there are circuits } C_1,..., C_k
\text{ where for all } j,
C_j \subseteq A \text{ and } C_j \nsubseteq C_1 \cup ... \cup C_{j-1} \Bigr\}
\end{equation}
This formula appears in @White1986 p. 306, without a proof.
A circuit of size 1 is a loop.
A circuit of size 2 is a pair of points in a multiple group.
Recall that _simple matroid_ is a matroid with no loops or multiple groups.
Thus, a simple matroid is a matroid with no circuits of size 1 or 2.
<br><br>
# Efficient Checking of Hyperplane Axioms
In this section we derive an efficient way to check
the hyperplane axioms, but only in the case when the matroid rank is 3.
Given an integer $d \ge 1$ a $d$-_partition of_ $E$ is a collection
of subsets of $E$, called _blocks_, with these properties:
<ul style="line-height: 2em; margin-bottom: 15px">
<li style="list-style: none">
(D1) there are 2 or more blocks
</li>
<li style="list-style: none">
(D2)
each block has $d$ or more points
</li>
<li style="list-style: none">
(D3)
every $d$-element subset of $E$ is a subset of exactly one block
</li>
</ul>
One can show that the blocks of a $d$-partition satisfy the hyperplane axioms
(H1), (H2), and (H3).
For a proof see @Welsh1976 p. 40.
The resulting matroid on $E$ is called a _paving matroid_
and has rank $d{+}1$.
Note that the 3 properties of a $d$-partition can be checked efficiently.
**Theorem**
A matroid of rank $r \ge 2$ is a paving matroid
if and only if
every circuit has size $r$ or greater.
**Proof** See @Welsh1976, p. 40.
<br>
**Theorem**
A simple matroid $M$ of rank 3 is a paving matroid.
**Proof** (trivial)
Since $M$ is simple no circuit has size 1 or 2.
Therefore every circuit has size 3 or greater.
By the previous theorem, $M$ is paving. $\square$
Given a set of proposed hyperplanes for a matroid of rank 3,
we finally have an efficient way to check the hyperplane axioms,
by checking the $d$-partition block axioms instead.
<ol>
<li>simplify the hyperplanes</li>
<li>verify (D1) and (D2), which are linear in the number of hyperplanes</li>
<li>verify (D3), which is quadratic in the number of generators</li>
</ol>
For the hyperplane simplification in item 1,
the number of hyperplanes is preserved,
but all loops are removed,
and every generator except one from each multiple group are removed.
<br><br>
# Conclusion and Conjecture
To summarize, let $E$ be a finite set of floating point 3D vectors,
with no vector equal to 0 and no vector a multiple of another (with tolerances).
The vectors generate a zonohedron.
A collection of subsets of $E$ is then computed, with each subset coplanar,
or very close to coplanar using the tolerance parameter `e2` discussed above.
Each subset is the proposed set of generators of a facet of the
generated zonohedron, and all facets are represented.
These subsets are proposed as the hyperplanes of a matroid.
We have shown that:
<blockquote>
If $E$ can be slightly perturbed to a set of actual real
vectors $E' \subset \mathbb{R}^3$, so that the rank of each real hyperplane
is 2, and is maximal w.r.t. this property,
then these hyperplanes satisfy properties (D1), (D2), and (D3).
</blockquote>
In the software package, we use the contrapositive form:
<blockquote>
If these proposed hyperplanes do not satisfy (D1), (D2), and (D3),
then the hyperplanes do not form a valid matroid,
and $E$ _cannot_ be slightly perturbed to satisfy the desired rank=2 property.
</blockquote>
Even if the matroid is valid, the perturbation $E'$ may not exist,
because the matroid might not be representable over the real numbers
$\mathbb{R}$.
A classical example is the _Fano plane_ matroid on 7 points with 7 hyperplanes.
It has just too many hyperplanes, see @FanoWiki.
Nevertheless, we conjecture that such
non-representable matroids cannot occur in practice.
<blockquote>
**Conjecture**
If the hyperplanes for the floating point set $E$ are computed
following the procedure in the **Introduction**, and the tolerance `e2`
(depending on $E$) is sufficiently small,
then a perturbation $E' \subset \mathbb{R}^3$ representing the matroid exists.
</blockquote>
This statement is theoretical in nature,
since real numbers in $\mathbb{R}$ cannot be represented exactly.
The conjecture is true in some simple cases.
Before exploring this, call the hyperplanes of size 2 the _trivial hyperplanes_.
Note that for the Fano plane matroid, all 7 hyperplanes are
size 3 and non-trivial.
Suppose that _all_ the hyperplanes are trivial, so the matroid is uniform
and all the facets of the zonohedron are parallelograms.
Then no perturbation is needed at all; the given vectors
(with dyadic rational numbers) already represent.
This is the case for 7 of the 13 classical zonohedra in `classics.genlist`.
And it is also the case for the generators in `colorimetry.genlist[[3]]`.
Now suppose that the matroid has only 1 non-trivial hyperplane.
Then there are 3 or more generators that (approximately) span a plane,
and all the other generators are far from the plane.
Perturb this plane to the "best fit" linear plane $P$ to these generators where
$P \subset \mathbb{R}^3$,
and then project them onto $P$.
If this perturbation accidentally creates non-trivial hyperplanes
with the _other_ generators, then just perturb the other generators
to get the original matroid.
An example is the matroid generated by `colorimetry.genlist[[2]]`,
which has 1 non-trivial hyperplane with 50 generators.
Now suppose that all the non-trivial hyperplanes are disjoint.
Then we can repeat the procedure in the previous paragraph
for each hyperplane.
Since the hyperplanes are disjoint, there is no "interaction" between them.
An example is the matroid generated by `colorimetry.genlist[[1]]`,
which has 2 disjoint non-trivial hyperplanes with sizes 3 and 26.
Now suppose that the non-trivial hyperplanes intersect in a single generator.
We can perform a "constrained best fit" perturbation for each plane $P$,
where the constraint is that that single generator is in the plane.
An example is the matroid generated by `classics.genlist[[5]]`,
which has 2 non-trivial hyperplanes: $\{1, 3, 4\}$ and $\{2, 3, 5\}$.
The generated zonohedron is the _rhombo-hexagonal dodecahedron_.
More simple cases can be listed by mixing the above,
but we cannot find a general proof of the conjecture.
<br><br>
# References
<div id="refs"></div>
<br><br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options(old_opt)
sessionInfo()
```
</pre>
| /scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/matroids.Rmd |
## ----setup, include=FALSE---------------------------------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=144 )
## ---- echo=TRUE, message=FALSE---------------------------------------------------------------------------------------------------------------
library(zonohedra)
## ---- echo=TRUE, message=FALSE---------------------------------------------------------------------------------------------------------------
matgen = colorimetry.genlist[[2]] # the CIE 1931 CMFs at 1nm step
matgen = 100 * matgen / sum( matgen[2, ] ) # it is traditional to scale so the center has Y=50, recall we use Illuminant E
zono = zonohedron( matgen )
base = getcenter(zono) ; base
## ---- echo=TRUE, message=FALSE---------------------------------------------------------------------------------------------------------------
theta = 1.478858 ; phi = 0.371322
u = c( sin(phi)*cos(theta), sin(phi)*sin(theta), cos(phi) ) ; u
## ---- echo=TRUE, message=TRUE----------------------------------------------------------------------------------------------------------------
df_opt = raytrace( zono, base, u ) ; df_opt
xyz_opt = df_opt$point[1, ] ; xyz_opt
## ---- echo=TRUE, message=TRUE----------------------------------------------------------------------------------------------------------------
invertboundary( zono, xyz_opt )$transitions
## ---- echo=TRUE, message=TRUE----------------------------------------------------------------------------------------------------------------
df_2trans = raytrace2trans( zono, base, u ) ; df_2trans
xyz_2trans = df_2trans$point[1, ] ; xyz_2trans
## ---- echo=TRUE, message=FALSE---------------------------------------------------------------------------------------------------------------
df_opt$tmax - df_2trans$tmax
## ---- echo=TRUE, message=FALSE---------------------------------------------------------------------------------------------------------------
xyz_mid = (xyz_opt + xyz_2trans) / 2
inside( zono, xyz_mid )
inside2trans( zono, xyz_mid )
## ---- echo=FALSE, results='asis'----------------------------------------------
options(old_opt)
sessionInfo()
| /scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/raytrace.R |
---
title: "Ray Tracing the Zonohedron Boundary and the 2-Transition Surface"
author: "Glenn Davis"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
toc_depth: 2
number_sections: false
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
# csl: institute-of-mathematical-statistics.csl
# csl: transactions-on-mathematical-software.csl
vignette: >
%\VignetteIndexEntry{Ray Tracing the Zonohedron Boundary and the 2-Transition Surface}
%\VignetteEngine{knitr::rmarkdown}
---
```{css, echo=FALSE}
body {
max-width: 870px; /* make wider, default is 700px */
}
```
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=144 )
```
<br><br>
# Introduction
The focus of this vignette are the two
functions `raytrace()` and `raytrace2trans()`.
The former is for the boundary of the zonohedron
and the latter is for the associated 2-transition surface.
We revisit the example at the end of section 6
in Scott Burns' paper @Burns2021,
which is also illustrated in the 1nm plot from Figure 8.
His example is from colorimetry, where the boundary of the zonohedron
is the set of optimal colors
and the 2-transition surface is the set of Schrödinger colors
(both for Illuminant E).
The correspondence for the optimal colors was discovered by
Paul Centore, see @Centore2013.
Other featured functions are `invertboundary()`, `inside()` and `inside2trans()`.
```{r, echo=TRUE, message=FALSE}
library(zonohedra)
```
<br><br>
# A Ray Tracing Example
In Burns' example, the base of the ray is the center of the zonohedron $Z$:
```{r, echo=TRUE, message=FALSE}
matgen = colorimetry.genlist[[2]] # the CIE 1931 CMFs at 1nm step
matgen = 100 * matgen / sum( matgen[2, ] ) # it is traditional to scale so the center has Y=50, recall we use Illuminant E
zono = zonohedron( matgen )
base = getcenter(zono) ; base
```
The vector `base` corresponds to Burns' vector $XYZ_{\text{50%}}$.
The direction of the ray is given by spherical angles,
which define a unit vector `u`:
```{r, echo=TRUE, message=FALSE}
theta = 1.478858 ; phi = 0.371322
u = c( sin(phi)*cos(theta), sin(phi)*sin(theta), cos(phi) ) ; u
```
Calculate the intersection of the ray with the boundary of $Z$.
```{r, echo=TRUE, message=TRUE}
df_opt = raytrace( zono, base, u ) ; df_opt
xyz_opt = df_opt$point[1, ] ; xyz_opt
```
This matches Burns' value of $XYZ_{\text{LPsoln}}$.
From Figure 8 of @Burns2021 we see that this point
(and every point in the same parallelogram)
comes from a reflectance spectrum with 4 transitions.
This can be verified by inverting:
```{r, echo=TRUE, message=TRUE}
invertboundary( zono, xyz_opt )$transitions
```
Now calculate the intersection of the ray with the 2-transition surface
associated with $Z$.
```{r, echo=TRUE, message=TRUE}
df_2trans = raytrace2trans( zono, base, u ) ; df_2trans
xyz_2trans = df_2trans$point[1, ] ; xyz_2trans
```
This matches Burns' value of $XYZ_{\text{two-trans}}$ to 4 decimal places.
The transition wavelengths 629 and 575nm,
and the parallelogram coordinates 0.2246808 and 0.4459951
(these are the corresponding reflectances),
are clearly visible in Figure 8.
Now consider the distance between these 2 points
$XYZ_{\text{LPsoln}}$ and $XYZ_{\text{two-trans}}$.
The parameter `tmax` in both data frames is the parameter on the ray
where it intersects the boundary or the surface.
Since `u` is a unit vector, the difference between the parameters is this distance.
```{r, echo=TRUE, message=FALSE}
df_opt$tmax - df_2trans$tmax
```
This matches Burns' value of $1.29 \times 10^{-3}$,
which is very tiny especially compared to the two $XYZ$s.
What is the maximum that this distance can be over the entire $\partial Z$ ?
To get a rough estimate, a search was made over the rays
passing though the centers of all the 21900 deficient parallelograms,
and with the same basepoint as before.
The largest distance over these rays was $2.47 \times 10^{-3}$.
This distance is for the parallelogram with generators corresponding
to 592 and 608 nm; the generating 'spectrum' has 8 transitions.
The actual maximum distance between the boundary of the color solid
and the 2-transition surface is not much larger than this sampling.
This confirms Burns' statement from @Burns2021 that the distance between
these surfaces has
"... no practical impact on typical colorimetric calculations".
If the zonohedron $Z$ is called the _Optimal Color Solid_ (OCS),
and the inside of the 2-transition surface is called the
_Schrödinger Color Solid_ (SCS),
we see that the OCS is obtained by adding a very thin "skin"
on some regions of the SCS.
<br><br>
# Inside or Outside ?
Consider the midpoint of $XYZ_{\text{LPsoln}}$ and $XYZ_{\text{two-trans}}$.
It lies on the same ray as these 2 points,
so it must be *inside* the zonohedron, but *outside* the 2-transition surface.
We can verify this easily:
```{r, echo=TRUE, message=FALSE}
xyz_mid = (xyz_opt + xyz_2trans) / 2
inside( zono, xyz_mid )
inside2trans( zono, xyz_mid )
```
<br><br>
# References
<div id="refs"></div>
<br><br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options(old_opt)
sessionInfo()
```
</pre>
| /scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/raytrace.Rmd |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.