content
				 
			stringlengths 0 
			14.9M 
			 | filename
				 
			stringlengths 44 
			136 
			 | 
|---|---|
	## ----setup, include=FALSE---------------------------------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=144 )
# if( !file.exists("figs") ) dir.create("figs")
require("rgl",quietly=TRUE)
rgl::setupKnitr(autoprint = TRUE)
## ---- echo=TRUE,  message=FALSE---------------------------------------------------------------------------------------------------------------
library(zonohedra)
## ---- echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=4, fig.cap='Figure 2.1  four points in the 2-transition complex, visualized with bar graphs', out.width="100%", cache=FALSE----
mybarplot <- function( x )  {
n = length(x)
plot( c(0,n), c(0,1), type='n', tcl=0, las=1, xaxt='n', xlab='', ylab='', mgp=c(3,0.25,0) )
grid( nx=NA, ny=NULL, lty=1 )
barplot( x, names.arg=1:n, space=0, add=T, yaxt='n', mgp=c(3,0.25,0) )
}
x1 = numeric(10) ; x1[ c(3,8) ] = exp( c(-0.25,-1) ) ; x1[ 4:7 ] = 1
x2 = numeric(10) ; x2[ c(5,6) ] = exp( c(-1,-0.25) )
oldpar = par( mfrow=c(2,2)  , omi=c(0,0,0,0), mai=c(0.45,0.5,0.1,0) )
mybarplot( x1 )   ; mybarplot( x2 )     #  row #1
mybarplot( 1-x1 ) ; mybarplot( 1-x2 )   #  row #2
par( oldpar )
## ---- echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=4, fig.cap='Figure 2.2', out.width="100%", cache=FALSE---------------
mystepplot <- function( x )  {
# assumption: x is Type I
n = length(x)
plot( c(1/2,n+1/2), c(0,1), type='n', tcl=0, las=1, xlab='', ylab='', lab=c(n,5,7), mgp=c(3,0.25,0) )
grid( lty=1 )
beta = seq(1/2,n+1/2,by=1) ; segments( beta, 0, beta, -0.02 )
ij = which( 0<x & x<1)  ;  lambda = ij + c(1/2 - x[ ij[1] ], x[ ij[2] ] - 1/2)
lines( c(0.5,lambda[1]), c(0,0) ) ; lines(lambda,c(1,1)) ; lines( c(lambda[2],n+1/2), c(0,0) )
segments( lambda, c(0,0), lambda, c(1,1), lty=3 )
}
oldpar = par( mfrow=c(2,2), omi=c(0,0,0,0), mai=c(0.45,0.5,0.1,0) )
mystepplot( x1 ) ; mystepplot( x2 )     #  row #1
mybarplot( x1 ) ; mybarplot( x2 )       #  row #2
par( oldpar )
## ---- rgl=TRUE, dev='png', echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=4, fig.cap='Figure 2.3       [these are interactive WebGL widgets]', fig.keep='none', fig.show='hide', out.width="100%", cache=FALSE----
rgl::par3d( zoom=0.7 )
rgl::mfrow3d( 1, 2 )
zono =  polarzonohedron(9)
plot2trans( zono )
rgl::next3d()
plot2trans( zono, level=c(0,4,7) )
rgl::rglwidget( webgl=TRUE )
## ---- echo=FALSE,  message=TRUE,  warning=TRUE, fig.width=8, fig.height=3, fig.cap='Figure 10.1', out.width="100%", cache=FALSE---------------
plot_slabs <- function()
    {
    plot.new()
    xlim = c(-10,10)
    ylim = c(-7,7)
    theta   = 20 * pi/180
    rot2x2  = matrix( c(cos(theta),sin(theta),-sin(theta),cos(theta)), 2, 2 )
    plot.window( xlim, ylim, asp=1 )
    #   big slab
    x  = c(-15,15,15,-15)
    y   = c(-5,-5,5,5)
    xy  = rbind( x, y )
    xyrot   = rot2x2 %*% xy
    polygon( xyrot[1, ], xyrot[2, ], col='gray90' )
    xya = cbind( c(1,5), c(4,5) )
    xyrot   = rot2x2 %*% xya
    xymid   = rowMeans(xyrot)
    lines( xyrot[1, ], xyrot[2, ], lwd=5 )
    text( xymid[1], xymid[2], "abundant", adj=c(1,-1/2) )
    lines( -xyrot[1, ], -xyrot[2, ], lwd=5 )
    text( -xymid[1], -xymid[2], "abundant", adj=c(0,3/2) )
    #   small slab
    ytop    = 2.5
    x  = c(-15,15,15,-15)
    y   = c(-ytop,-ytop,ytop,ytop)
    xy  = rbind( x, y )
    xyrot   = rot2x2 %*% xy
    polygon( xyrot[1, ], xyrot[2, ], col='gray80', lty=2 )
    xyd = cbind( c(-ytop,ytop), c(0,ytop) )
    xyrot   = rot2x2 %*% xyd
    xymid   = rowMeans(xyrot)
    lines( xyrot[1, ], xyrot[2, ], lwd=5 )
    text( xymid[1], xymid[2], "deficient", adj=c(1,-1/2) )
    lines( -xyrot[1, ], -xyrot[2, ], lwd=5 )
    text( -xymid[1], -xymid[2], "deficient", adj=c(0,3/2) )
    xya     = cbind( c(-6,5), c(-6,5+2) )
    xyrot   = rot2x2 %*% xya
    arrows( xyrot[1,1], xyrot[2,1],  xyrot[1,2], xyrot[2,2], length=0.1, angle=20 )
    arrows( -xyrot[1,1], -xyrot[2,1],  -xyrot[1,2], -xyrot[2,2], length=0.1, angle=20 )
    
    #   label both slabs
    x0  = 7
    
    xy  = cbind( c( x0, (5+ytop)/2 ),  c( x0, -(5+ytop)/2 ) )
    xyrot   = rot2x2 %*% xy
    text( xyrot[1, ], xyrot[2, ], expression( S ) )
    xy  = c( x0, 0 )
    xyrot   = rot2x2 %*% xy
    text( xyrot[1], xyrot[2], expression( S[2] ) )
    points( 0, 0, pch=20 )
    #return( TRUE )
    }
plot_slab <- function()
    {
    plot.new()
    xlim = c(-10,10)
    ylim = c(-8,8)
    theta   = 20 * pi/180
    rot2x2  = matrix( c(cos(theta),sin(theta),-sin(theta),cos(theta)), 2, 2 )
    plot.window( xlim, ylim, asp=1 )
    #   big slab
    x  = c(-15,15,15,-15)
    y   = c(-5,-5,5,5)
    xy  = rbind( x, y )
    xyrot   = rot2x2 %*% xy
    polygon( xyrot[1, ], xyrot[2, ], col='gray80' )
    xya = cbind( c(1,5), c(4,5) )
    xyrot   = rot2x2 %*% xya
    xymid   = rowMeans(xyrot)
    lines( xyrot[1, ], xyrot[2, ], lwd=5 )
    text( xymid[1], xymid[2], "coincident", adj=c(1,-1/2) )
    lines( -xyrot[1, ], -xyrot[2, ], lwd=5 )
    text( -xymid[1], -xymid[2], "coincident", adj=c(0,3/2) )
    #   arrows
    xya     = cbind( c(-6,5), c(-6,5+2) )
    xyrot   = rot2x2 %*% xya
    arrows( xyrot[1,1], xyrot[2,1],  xyrot[1,2], xyrot[2,2], length=0.1, angle=20 )
    arrows( -xyrot[1,1], -xyrot[2,1],  -xyrot[1,2], -xyrot[2,2], length=0.1, angle=20 )
    
    #   label slab
    xy  = c( 6, 0 )
    xyrot   = rot2x2 %*% xy
    text( xyrot[1], xyrot[2], expression( S[2] == S ) )
    points( 0, 0, pch=20 )
    
    }
    
oldpar = par( mfrow=c(1,2)  , omi=c(0,0,0,0), mai=c(0,0.1,0,0.1) )
plot_slabs() ; plot_slab()
par( oldpar )
## ---- echo=TRUE,  message=TRUE,  warning=TRUE-------------------------------------------------------------------------------------------------
matgen = colorimetry.genlist[[2]]   # the CIE 1931 CMFs at 1nm step
matgen = 100 * matgen / sum( matgen[2, ] )   # it's traditional to scale so the center has Y=50
zono =  zonohedron( matgen )
getcenter(zono) ; dim( getmatrix( getsimplified( getmatroid(zono) ) ) )
transitionsdf( zono )
## ---- echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=3, fig.cap='Figure 10.2', out.width="100%", cache=FALSE--------------
oldpar = par( omi=c(0,0,0,0), mai=c(0.45,0.5,0.1,0) )
gnd = getground( getsimplified( getmatroid(zono) ) )
pcube = boundarypgramdata( zono, c(570,608), cube=TRUE )$pcube
xlim = range( gnd[which(0<pcube)] ) + 20*c(-1,1)
plot( xlim, c(0,1), type='n', xlab='', ylab='', las=1, lab=c(5,10,7), cex.axis=0.8 )
grid( col='gray', lty=1 )
lines( gnd, pcube, type='s' )
par( oldpar )
## ---- rgl=TRUE, dev='png', echo=TRUE,  message=TRUE,  warning=FALSE, fig.width=6.5, fig.height=4, fig.cap='Figure 10.3', fig.keep='last', fig.show='hold', out.width="100%", cache=FALSE----
library( orientlib )
user3x3 = orientlib::rotmatrix( orientlib::eulerzyx( -0.249417, 0.7116067, 2.324364 ) )@x
dim(user3x3) = c(3,3)
par3d( userMatrix=rotationMatrix(matrix=user3x3), zoom=0.35 )
plothighertrans( zono )
## ---- echo=FALSE, results='asis'----------------------------------------------
options( old_opt )
sessionInfo()
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/transitions.R 
 | 
					
	---
title: "The 2-Transition Subcomplex and the 2-Transition Surface"
author: "Glenn Davis"
date: "`r Sys.Date()`"
header-includes:
  - \usepackage{textcomp}
output: 
  rmarkdown::html_vignette:
    toc: true
    toc_depth: 2
    number_sections: true
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
# csl: institute-of-mathematical-statistics.csl
# csl: transactions-on-mathematical-software.csl
vignette: >
  %\VignetteIndexEntry{The 2-Transition Subcomplex and the 2-Transition Surface}
  %\VignetteEngine{knitr::rmarkdown}
---
```{css, echo=FALSE}
body {
  max-width: 750px;     /* make a little wider, default is 700px */
}
```
\newcommand{\argmax}{\mathop{\mathrm{argmax}}\limits}
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=144 )
# if( !file.exists("figs") ) dir.create("figs")
require("rgl",quietly=TRUE)
rgl::setupKnitr(autoprint = TRUE)
```
Throughout this vignette,
$Z$ is a zonohedron such that none of its generators
$L(e_1),...,L(e_n)$ are 0,
and no generator is a multiple of another.
Equivalently, we assume that matroid of $Z$ is simple.
For more discussion of zonohedra,
see the [Zonotopes](zonotopes.html) vignette.
Given such a zonohedron $Z$, and a cyclic ordering of the generators of $Z$,
there is a surface contained in $Z$, which may coincide with $\partial Z$,
but in general does not.
In this vignette we give a careful definition of this
_2-transition surface_,
give some examples, and explain some of the relevant functions
in the **zonohedra** package for processing this surface.
Points in the 2-transition surface are analogous to the reflectance spectra
of _Schrödinger colors_.
Points in $\partial Z$ are analogous to the reflectance spectra of
_optimal colors_, see @ANDP:ANDP19203671504, @Logvinenko2009,
and especially @Brill1983.
Featured functions in this vignette are:
`raytrace2trans()`, `transitionsdf()`, and `plothighertrans()`.
Given $x_1,x_2 \in \mathbb{R}^n$, $[x_1,x_2]$ denotes the line segment
from $x_1$ to $x_2$.
<br>
```{r, echo=TRUE,  message=FALSE}
library(zonohedra)
```
<br><br>
# The Unit Cube $Q^n$
We abbreviate $Q^n := [0,1]^n$, i.e. the $n$-cube.
So $Q^n$ is all points
$x = (\alpha_1, ... , \alpha_n)$ where all $\alpha_i \in [0,1]$.
For this vignette we assume $n \ge 3$.
A _vertex_ of $Q^n$ is a point where all $\alpha_i = 0 ~ \textrm{or} ~ 1$;
there are $2^n$ vertices.
If two vertices differ by exactly one coordinate, they are the endpoints
of an _edge_, and the "free" coordinate parameterizes the edge.
There are $n 2^{n-1}$ edges.
If four vertices differ by two coordinates, they are the vertices
of a _square_, and the two "free" coordinates parameterize the square.
There are $\binom{n}{2} 2^{n-2}$ squares.
In the familiar case when $n{=}3$, there are 8 vertices, 12 edges, and 6 squares.
Note that $(\alpha_1, ... , \alpha_n) \in \partial Q^n$ 
iff some $\alpha_i =$ 0 or 1.
Thus vertices, edges, and squares are all subsets of $\partial Q^n$.
The _standard involution_ $\rho: Q^n \to Q^n$ is the map
$(\alpha_1, ... , \alpha_n) ~ \mapsto ~ (1-\alpha_1, ... , 1-\alpha_n)$.
It is clear that $\rho$ maps a vertex to an _antipodal vertex_,
Similarly, there are pairs of _antipodal edges_ and _antipodal squares_.
$\rho$ has a unique fixed point $(1/2, ... ,1/2)$,
which is the center of symmetry.
<br><br>
# The 2-Transition Subcomplex $Q_2^n \subsetneq Q^n$
This section treats the _2-transition subcomplex_ of $Q^n$ with $n \ge 3$,
which we denote by $Q^n_2$.
We define it in three ways.
**Definition 1:**  
Visualize the indexes $1, ... ,n$ as points on the circle,
like beads in a necklace, or the $n$ roots of unity.
Different points $i \ne j$ divide the other
points into 2 contiguous "arcs" (one of them may be empty).
Define $(\alpha_1, ... , \alpha_n) \in Q^n_2$
iff 
there are $i \ne j$, so that $\alpha_k = 0$ for $k$ in one arc
and $\alpha_k = 1$ for $k$ in the other arc.
There are 2 ways to choose 0 and 1,
so as $\alpha_i$ and $\alpha_j$ vary,
they "sweep out" 2 disjoint and antipodal squares in $Q^n$.
The total number of these _2-transition squares_ is $n(n-1)$.
$Q^n_2$ is the union of these squares, joined along their edges.
It is helpful to visualize points in the unit cube as bar graphs.
Here are four points in $Q^{10}_2$, with 2 transitions:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=4, fig.cap='Figure 2.1  four points in the 2-transition complex, visualized with bar graphs', out.width="100%", cache=FALSE }
mybarplot <- function( x )  {
n = length(x)
plot( c(0,n), c(0,1), type='n', tcl=0, las=1, xaxt='n', xlab='', ylab='', mgp=c(3,0.25,0) )
grid( nx=NA, ny=NULL, lty=1 )
barplot( x, names.arg=1:n, space=0, add=T, yaxt='n', mgp=c(3,0.25,0) )
}
x1 = numeric(10) ; x1[ c(3,8) ] = exp( c(-0.25,-1) ) ; x1[ 4:7 ] = 1
x2 = numeric(10) ; x2[ c(5,6) ] = exp( c(-1,-0.25) )
oldpar = par( mfrow=c(2,2)  , omi=c(0,0,0,0), mai=c(0.45,0.5,0.1,0) )
mybarplot( x1 )   ; mybarplot( x2 )     #  row #1
mybarplot( 1-x1 ) ; mybarplot( 1-x2 )   #  row #2
par( oldpar )
```
For the first point, the run of 1s has length 4,
and the (circular) run of 0s also has length 4.
For the next plot, the run of 1s has length 0,
and the (circular) run of 1s has length 8.
The points in row #2 are derived by applying the involution to the point above it.
Of course, the involution turns runs of 1s to runs of 0s,
and vice-versa.
The special vertices $(0,...,0)$ and $(1,...,1)$ are in $Q^n_2$
by this definition, although they technically have no transitions.
Also, if $\alpha_i = 0$ except for one $i$, that point is _also_ in $Q^n_2$
because it is in an edge of one of the above-defined squares.
Note that $Q^3_2 = \partial Q^3$,
i.e. every point in the boundary of $Q^3$ is a 2-transition point.
<br>
**Definition 2:**   
This definition views a 2-transition point as a special discrete projection
of a 2-transition function defined on a circle.
Given $n$, define $n+1$ points, $\beta_i := i + 1/2$ for $i = 0,...,n$,
and $n$ intervals
$I_i := [\beta_{i-1},\beta_i]$ = $[i-1/2,i+1/2]$ for $i = 1,...,n$.
These intervals have length 1 and are a partition of $[1/2,n+1/2]$.
Let $J_2$ be the set of all (step) functions on $[\beta_0,\beta_n]$
that take the values 0 or 1 and have two transitions or no transitions (jumps).
We identify the endpoints $\beta_0$ and $\beta_n$ to form a circle,
so if the functions values at $\beta_0$ and $\beta_n$ are different,
then this is considered to be a transition.
Equivalently $J_2$ is the set of all indicator functions $\mathbf{1}_A$
where $A$ is an arc in the circle.
We allow the arc to be empty (the function is identically 0),
or the entire circle (the function is identically 1).
Define a function $p()$
\begin{equation}
p : J_2 \twoheadrightarrow Q^n  ~~~  \text{by} ~~~  p(f) := (\alpha_1,\ldots,\alpha_n)  ~~~ \text{where}  ~~~   \alpha_i :=  \int_{I_i} f(\lambda) \, d\lambda
\end{equation}
Note that $\alpha_i$ is the mean of $f$ on $I_i$.
So if $f$ is identically 1 on $I_i$, then $\alpha_i = 1$ in $p(f)$.
And the same is true with 1 replaced by 0.
But if $f$ has a jump in $I_i$, then $\alpha_i$ can be anywhere in $[0,1]$
depending on where the jump occurs.
If there is exactly one jump in $I_i$,
then the location of the jump is uniquely determined by $\alpha_i$.
But if there are two jumps in $I_i$,
then the 2 jump locations are **not** determined by $\alpha_i$.
It only determines the distance between the jumps,
so both can be translated a little bit and not change $\alpha_i$.
Thus, $p$ is _not_ injective.
Here are plots with 2 examples:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=4, fig.cap='Figure 2.2', out.width="100%", cache=FALSE }
mystepplot <- function( x )  {
# assumption: x is Type I
n = length(x)
plot( c(1/2,n+1/2), c(0,1), type='n', tcl=0, las=1, xlab='', ylab='', lab=c(n,5,7), mgp=c(3,0.25,0) )
grid( lty=1 )
beta = seq(1/2,n+1/2,by=1) ; segments( beta, 0, beta, -0.02 )
ij = which( 0<x & x<1)  ;  lambda = ij + c(1/2 - x[ ij[1] ], x[ ij[2] ] - 1/2)
lines( c(0.5,lambda[1]), c(0,0) ) ; lines(lambda,c(1,1)) ; lines( c(lambda[2],n+1/2), c(0,0) )
segments( lambda, c(0,0), lambda, c(1,1), lty=3 )
}
oldpar = par( mfrow=c(2,2), omi=c(0,0,0,0), mai=c(0.45,0.5,0.1,0) )
mystepplot( x1 ) ; mystepplot( x2 )     #  row #1
mybarplot( x1 ) ; mybarplot( x2 )       #  row #2
par( oldpar )
```
The 1st plot shows a function $f \in J_2$ with jumps in intervals $I_3$ and $I_8$.
The plot below it shows $p(f) \in Q^{10}_2$.
The 2nd plot shows a function $f \in J_2$ with jumps in intervals $I_5$ and $I_6$.
The plot below it shows $p(f) \in Q^{10}_2$.
In the 1st row, the sequence $\beta_i$ (all of them half-integers)
is marked with small tick marks.
We now define $Q^n_2$ to be the image of $p()$; i.e. $Q^n_2 := p(J_2)$.
It is straightforward to show that **Definition 2** and **Definition 1**
are equivalent.
Now we look at $J_2$ in more detail.
Every function in $J_2$ is integrable, so we can think of
$J_2 \subsetneq L^1(\mathbb{S}^1)$, which is the space of
integrable functions on the circle $\mathbb{S}^1$.
**Theorem**
With the $L^1$ topology, $J_2$ is homeomorphic to the 2-sphere $\mathbb{S}^2$.
**Proof**
Denote the 2 functions in $J_2$ that are identically 0 or 1
by $f_0$ and $f_1$.
For a point in $J_2 - \{f_0, f_1\}$,
the corresponding arc $A$ is non-trivial,
and so the midpoint of the arc is a well-defined point in $\mathbb{S}^1$.
The length of the arc is in the open interval $(0,2\pi)$.
This assignment gives a homeomorphism from $J_2 - \{f_0 , f_1\}$
to the open cylinder $U := \mathbb{S}^1 \times (0,2\pi)$.
Note that as two points in $J_2$ get closer to $f_0$,
they get closer to each other in the $L^1$ metric; and similarly for $f_1$.
So if bottom and top boundaries of $U$ are each collapsed to a point,
and $f_0$ and $f_1$ are mapped to those two points,
it continuously extends the above homeomorphism to all of $J_2$ and
to the cylinder with collapsed boundaries,
which is just a 2-sphere $\mathbb{S}^2$.
$\square$
Later, we will see that $Q^n_2$ is _also_ homeomorphic to $\mathbb{S}^2$.
But the above mapping $p : J_2 \twoheadrightarrow Q^n_2$
is _not_ a homeomorphism,
because we observed above that $p$ is _not_ injective.
<br>
**Definition 3:**   
This definition works directly with vertices and squares of $Q^n$.
A _2-transition vertex_ is one with a single circular run of 0s,
and a single circular run of 1s.
A run is allowed to empty, and this yields the vertex of all 0s and all 1s.
A _2-transition square_ is a square whose 4 vertices are all 2-transition
vertices.
Note that the center of the square has exactly 2 coordinates with value 1/2,
and the other values are a circular arc of 0s and an arc of 1s.
We now define $Q^n_2$ to be the union of all these 2-transition squares.
It is an example of a cubical subcomplex of $Q^n$.
Once again, it is straightforward to show that
**Definition 3** and **Definition 1** are equivalent.
For an $x := (\alpha_1,...,\alpha_n) \in Q^n_2$,
we define the _level_ of $x$ be the number of $\alpha_i$'s that equal $1$.
The level varies from 0 to $n$.
The level is constant on the interior of an edge and the interior of a square.
We define the _level_ of a square to be the level on the interior.
The level of a square varies from 0 to $n{-}2$.
It is helpful to think of level=0 squares at the "bottom" of the subcomplex,
and level=$n{-}2$ squares at the "top".
So we know that $Q^n_2$ is a union of squares, but what does it "look like",
and what is its topology?
We claim that $Q^n_2$ is a 2-sphere $\mathbb{S}^2$.
In the case of $n{=}3$ this is easy to see, since $Q^3_2$ = $\partial Q^3$.
In general, first consider all the 2-transition squares with level=0.
It is straighforward to show that such a square has 0 as a vertex,
and it has 2 vertices with level=1 and one vertex with level=2.
Each edge from 0 to a level 1 vertex is shared by two of the squares,
and so the squares are arranged in circular fashion around 0.
Their union is a topological disk $\mathbb{D}^2$ at the "bottom".
Similarly, the level=$n{-}2$ squares form a disk at the "top".
Now consider squares with fixed level $\mathcal{l}$ with $0 < \mathcal{l} < n{-}2$.
It is easy to show that in each square,
both of the level=$\mathcal{l}{+}1$ vertices are
in one other square, so the level $\mathcal{l}$ squares form a
"necklace of $n$ diamonds".
These $n{-}3$ necklaces are stacked on top of each other to form a cylinder,
and the cylinder is capped at bottom and top to form the
2-sphere $\mathbb{S}^2$.
We now verify the square count in $Q^n_2$;
$n + n(n{-}3) + n = n(n-1)$ which is correct.
The following figure is a helpful visualization.
```{r, rgl=TRUE, dev='png', echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=4, fig.cap='Figure 2.3       [these are interactive WebGL widgets]', fig.keep='none', fig.show='hide', out.width="100%", cache=FALSE }
rgl::par3d( zoom=0.7 )
rgl::mfrow3d( 1, 2 )
zono =  polarzonohedron(9)
plot2trans( zono )
rgl::next3d()
plot2trans( zono, level=c(0,4,7) )
rgl::rglwidget( webgl=TRUE )
```
This figure plots the image of $Q^9_2$ in $\mathbb{R}^3$ under a
suitable linear map.
The squares are distorted into parallelograms.
The figure on the left draws the full subcomplex,
and the one on the right only draws levels 0, 4, and 7.
The "necklace of 9 diamonds" at level=4 is easily visible.
The black dot is the image of $(0,...,0)$,
and the white dot is the image of $(1,...,1)$.
More about these linear maps is given in the next section.
<br><br>
# The 2-Transition Surface $S_2 \subsetneq Z \subsetneq \mathbb{R}^3$
Let the zonohedron $Z := L(Q^n)$,
where $L : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^3$ is a surjective linear map. 
From now on we assume that $Z$ is _pointed_,
which means that 0 is a vertex of $Z$.
Let $S_2 := L(Q^n_2)$ be the image of the 2-transition subcomplex $Q^n_2$.
Since the subcomplex is a union of squares glued on the edges to form a 2-sphere $\mathbb{S}^2$,
$S_2$ is a union of parallelograms glued on the edges.
$S_2$ is a tesselated surface,
but may not be a sphere because it may have self-intersections.
We are mainly interested in the case that there are _no_ self-intersections,
and there is a precise way to state this.
Let $L_2$ be the restriction of $L$ to $Q^n_2$.
So $L_2 : Q^n_2 \to \mathbb{R}^3$ is the composition of the
inclusion $Q^n_2 \subsetneq \mathbb{R}^n$ followed by $L$.
The surface has no self-intersections iff $L_2$ is injective.
For example, in the previous figure $S_2$ does **not**
have self-intersections and is a topological 2-sphere.
$L_2$ is injective.
If $L_2$ is injective,
then it is well-known (@Alexander1924, @Moise1977 p. 117, and @Bing1983 p. 161)
that $S_2$ divides $\mathbb{R}^3$ into
an inside region and an outside region whose intersection is $S_2$.
Moreover, the inside region is homeomorphic to a closed ball,
and $S_2$ is the boundary of that ball.
Since $Q^n_2 \subsetneq Q^n$, $S_2 = L(Q^n_2) \subsetneq L(Q^n) = Z$.
We emphasize that $S_2$ is a surface, and $Z$ is a solid.
We also emphasize that $S_2$ depends intimately on the order of the generators,
and $Z$ does not depend on the order at all.
<br><br>
# Polygons
A polygon for us is more than just a subset of the plane.
A _polygon_ is a finite and cyclically ordered set of distinct _vertices_,
plus the line segments that connect the vertices in cyclic order.
The line segments are called the _edges_.
The union of the edges is a subset of the plane,
but two different sets of vertices can generate the same subset -
the vertices matter.
A _simple polygon_ is a polygon whose edges do not intersect,
except at corresponding endpoints;
i.e. the polygon has no self-intersections.
By the _Jordan Curve Theorem_, a simple polygon,
as just a set, has an inside region
and an outside region, and both are connected.
Note that a non-simple polygon may also have a connected inside region;
it might "double-back" on itself within an edge, and then proceed forward again.
A _convex polygon_ is a polygon with an inside region that is convex.
There is an equivalent definition of polygon using functions.
Let $U_n$ be the set of $n$'th root of unity on the unit circle in $\mathbb{C}$,
plus the edges connecting these vertices in order.
So $U_n$ is sort of a _quintessential_ or _template_ polygon.
A general polygon is a function $f : U_n \to \mathbb{R}^2$ which is
injective _on the vertices_ (the image vertices are distinct) and 
linear _on each edge_.
Since $f$ is injective on the vertices, it is also injective on each edge;
one can think of $f$ as a _piecewise-linear immersion_ of $U_n$.
A polygon defined this way is _simple_ iff $f$ is injective.
This is an unintuitive way to define a polygon,
but has the advantage that it generalizes easily to one higher dimension,
as we see later in **Section 9**.
<br><br>
# The Generator Polygon $P$
Since $Z$ is pointed, there is a plane $K$ in $\mathbb{R}^3$ that has 0
in one open halfspace, and all the generators of $Z$ in the other open halfspace.
This "cutting" hyperplane intersects $S_2$ in a polygon.
Each vertex of the polygon is the intersection of $K$
and the segment from 0 to a generator.
The cyclic order of its vertices is inherited from the order of the generators.
We call this the _generator polygon_ and let $P := K \cap S_2$ denote it.
$P$ may not be a simple polygon in general; it may have self-intersections.
Since there can be many cutting hyperplanes $K$,
$P$ is only defined up to a projective transformation.
In colorimetry for example, there are three chromaticity diagrams,
from 1931, 1960, and 1976.
They are all generator polygons for the same set of generators,
and the 3 polygons differ by projective transformations,
see @Wyszecki&Stiles.
<br><br>
# Parallelograms in $S_2$ and $\partial Z$
Given $\partial Z$ and $S_2$ as above,
the goal in this section is to show that each is a union
of $n(n{-}1)$ parallelograms, and to set up a 1-1 correspondence
between the parallelograms in $S_2$ and those in $\partial Z$.
Each parallelogram will also be assigned a unit normal
in such a way that corresponding parallelograms have the same normal.
First consider $\partial Z$.
If a facet of $Z$ is not a parallelogram, let it be tiled
with the _standard tiling_ by parallelograms,
see the [Zonotopes](zonotopes.html) vignette.
Given an unordered pair $\{ i,j \}$ with $1 \le i , j \le n$
and $i \ne j$,
there are 2 antipodal parallelograms in $\partial Z$.
The edges of both parallelograms are the generators $L(e_i)$ and $L(e_j)$.
If  $\mathcal{P}$ is one of those parallelograms,
we know that $\mathcal{P}$ is the image of some square in $Q^n$.
The following Lemma algebraically characterizes which squares map to
a parallelogram $\mathcal{P} \subset \partial Z$.
**Lemma:**
Let $\Sigma$ be a square in $Q^n$.
By definition $\Sigma$ is determined by an unordered pair $\{ i,j \}$
and for all $k \notin \{ i,j \}$, an assignment of $\alpha_k$ to either 0 or 1.
The coordinates $\alpha_i$ and $\alpha_j$ are 'free' in [0,1]
and sweep out the square.
Let the parallelogram
$\mathcal{P} := L(\Sigma) \subsetneq Z$ be the image of the square.
Let $w := L(e_i) \times L(e_j)$ be the cross product of the edges of $\mathcal{P}$.
Then $\mathcal{P} \subset \partial Z$
iff
for all $k \notin \{ i,j \}$
\begin{equation}
\alpha_k =
\begin{cases}    
0 & \langle L(e_k),w \rangle < 0  \\
1 & \langle L(e_k),w  \rangle > 0  \\
0 ~\text{or} ~ 1 & \langle L(e_k),w  \rangle = 0
\end{cases}
\hspace{20pt}  \text{or}  \hspace{20pt}
\alpha_k =
\begin{cases}
1 & \langle L(e_k),w \rangle < 0  \\
0 & \langle L(e_k),w  \rangle > 0  \\
0 ~\text{or} ~ 1 & \langle L(e_k),w  \rangle = 0
\end{cases}
\end{equation}
Note that the two conditions are almost the same;
the first 2 cases merely swap 0 and 1,
and the third case is the same in both conditions.
The third case is not really new;
it comes from the definition of the square.
Also note that the order of $i$ and $j$ affects the definition of $w$,
but if $i$ and $j$ are swapped,
$w$ is changed to $-w$ which merely swaps the two conditions.
**Proof:**
Let $\lambda$ be the linear functional $z \mapsto \langle z,w \rangle$.
Since $\langle L(e_i),w \rangle = \langle L(e_j),w \rangle = 0$,
the values of $\alpha_i$ and $\alpha_j$ have no effect on $\lambda( L(x) )$.
Similarly, in the last case where $\langle L(e_k),w  \rangle = 0$,
$\alpha_k$ has no effect.
This case only happens when $L(e_k)$ is a linear combination of $L(e_i)$ and $L(e_j)$,
and the corresponding facet is non-trivial with 6 or more edges.
The facet then requires a selected tiling,
and different tilings yield different assignments of 0 and 1 to $\alpha_k$.
If the square $\Sigma$ satisfies the first condition above,
then any $x \in \Sigma$ maximizes $\lambda( L(x) )$ over _all_ $x \in Q^n$.
Therefore $L(\Sigma) \subset \partial Z$, by the definition of $Z$.
If the square $\Sigma$ satisfies the second condition,
then $\lambda( L(x) )$ is minimized and the conclusion is the same.
Conversely, if the square satisfies neither condition,
then  $\lambda( L(x) )$ for $x \in \Sigma$ is strictly between
the maximum and the minimum.
This means that $L(\Sigma)$ is in an intermediate hyperplane
orthogonal to $w$.
The intersection of an intermediate hyperplane with $\partial Z$
is only a 1-dimensional polygon and cannot contain a parallelogram.
Thus $L(\Sigma) \not\subset \partial Z$.
$\square$
Define a _slab_ in $\mathbb{R}^3$ to be the region between two parallel planes,
including the planes themselves.
In equation form, a slab $\mathcal{S}$ is given by
\begin{equation}
\mathcal{S} := \{ ~ x : \alpha  \le  \langle x,w \rangle  \le \beta ~ \}
\end{equation}
where $w \in \mathbb{R}^3$ is the non-zero plane normal,
and $\langle x,w \rangle {=} \alpha$ and $\langle x,w \rangle {=} \beta$ are the two planes.
If $\alpha {<} \beta$ then each point in the boundary planes of $\mathcal{S}$
has a unique outward-pointing unit normal.
But if $\alpha {=} \beta$ then the planes coincide,
the slab degenerates to that plane,
and a normal cannot be assigned unambiguously.
Given an unordered pair $\{ i,j \}$ with $1 \le i , j \le n$
and $i \ne j$,
there are 2 antipodal parallelograms in $\partial Z$.
The edges of both parallelograms are the generators $L(e_i)$ and $L(e_j)$.
They define two distinct parallel planes and therefore a non-degenerate slab
denoted by $\mathcal{S}^{ \{i,j\} }$.
The slab has 2 well defined boundary normals,
which we assign to the 2 parallelograms in the boundary of the slab.
Note that if $u$ is the unit normal for one of the parallelograms $\mathcal{P}$,
then $u$ is a multiple of the cross product $L(e_i) \times L(e_j)$.
Now consider $S_2$.
Given an unordered pair $\{ i,j \}$ as above,
there are 2 antipodal parallelograms in $S_2$
and the edges of both are the generators $L(e_i)$ and $L(e_j)$.
These 2 parallelograms are parallel to each other;
let $\mathcal{S}_2^{ \{i,j\} }$ be the slab defined by them.
The 2 parallelograms in $\partial Z$ given by $\{ i,j \}$ have the same
edges - the generators $L(e_i)$ and $L(e_j)$.
So it is clear that $\mathcal{S}_2^{ \{i,j\} } \subseteq \mathcal{S}^{ \{i,j\} }$.
If this new slab is non-degenerate,
then each of the two parallelograms in $S_2$ can be matched to exactly one
of the two parallelogram in $\partial Z$
by choosing the one with the same normal vector.
If this new slab is degenerate, its outward-pointing normal is not defined,
and we can pick an assignment at random.
This completes the goal of this section.
An interesting observation:
since corresponding parallelograms are congruent,
they have the same surface area,
and therefore $S_2$ and $\partial Z$ have the same surface area as well.
<br><br>
# A Theorem about $S_2$ and the Convexity of $P$
We have seen that $S_2 \subsetneq Z$.
It is natural to ask:
<div style="text-align: center">
When is $S_2$ as large as possible, namely the entire boundary of $Z$ ?
</div>
Recall our assumptions that $Z$ has a simple matroid, and is pointed.
**Theorem:**
With $Z$, $S_2$, $L_2$, and $P$ defined as above, the following are equivalent:
<ol type='1'>
<li>$S_2 = \partial Z$</li>
<li>$L_2$ is injective, and the inside region of $S_2$ is convex</li>
<li>$P$ is a simple convex polygon, possibly with collinear vertices</li>
</ol>
The equivalence of properties 1 and 3 is proved in West & Brill @Brill1983,
except the sequence of vector generators in this theorem
is replaced by a continuous path of vectors in @Brill1983.
To our knowledge, property 2 is new.
**Proof:**  
$1. \implies 2.$
$L_2$ is clearly injective on each square.
If a parallelogram of $S_2$ is in $\partial Z$ then it must be the
corresponding parallelogram (or its antipodal) in $\partial Z$
from the previous section.
This mapping of parallelograms is 1-1,
and since the parallelograms of $\partial Z$ are disjoint
(except on the edges)
the parallelograms of $S_2$ are disjoint (except on the edges).
Therefore $L_2$ is injective.
The inside region of $S_2$ is
the inside region of $\partial Z$, which is $Z$, which is convex.
$2. \implies 3.$ (trivial) The polygon $P$ is the
intersection of a hyperplane and the boundary of a convex polyhedron,
and that intersection is a convex polygon.
Since $L_2$ is injective, $P$ is simple.
$3. \implies 1.$ 
For a generator $L(e_i)$, let $v_i$ be the corresponding vertex of $P$.
It is the intersection of the ray generated by $L(e_i)$ and the
cutting hyperplane $K$.
Note that $v_i$ is a _positive_ multiple of $L(e_i)$.
Firstly we show that $S_2 \subseteq \partial Z$.
Let ${ \{i,j\} }$ be an unordered pair of indexes as above.
These indexes divide the remaining indexes into 2 contiguous circular sequences.
Let $\mathcal{P}$ be one of the corresponding parallelograms of $S_2$,
and let $u$ be its unit normal.
By definition, $\mathcal{P}$ is the image under $L$
of a 2-transition square $\Sigma$ in $Q^n_2$.
For $\Sigma$, all $\alpha_k$ in one sequence are 0,
and all $\alpha_k$ in the other sequence are 1.
We want to show that $\mathcal{P}$ is also in $\partial Z$.
Let $\mathcal{L}$ be the line through $v_i$ and $v_j$.
The plane given by $\langle x,u \rangle = 0$ contains both $L(e_i)$ and $L(e_j)$,
and so $\langle v_i,u \rangle = \langle v_j,u \rangle = 0$.
The line $\mathcal{L}$ divides $K$ into a positive side and a negative side.
For another vertex $v_k$,
$\langle L(e_k),u \rangle > 0$ iff $v_k$ is on the positive side of $\mathcal{L}$,
and
$\langle L(e_k),u \rangle < 0$ iff $v_k$ is on the negative side of $\mathcal{L}$.
Consider the relationship between $\mathcal{L}$ and $P$.
**Case i).** $\mathcal{L}$ intersects the interior of $P$
<br>
Since $P$ is convex, all the $v_k$ in one contiguous sequence
are on the positive side of $\mathcal{L}$ and
all the $v_k$ in the other contiguous sequence are on the negative side of $\mathcal{L}$.
This implies that
all the generators $L(e_k)$ in one contiguous sequence,
have $\langle L(e_k),u \rangle > 0$,
and all the generators $L(e_k)$ in the other contiguous sequence
have $\langle L(e_k),u \rangle < 0$.
But we saw earlier that the $\alpha_k$ in one sequence are all 0,
and the $\alpha_k$ in the other sequence are all 1.
This is exactly the condition given by the Lemma in the previous section.
Therefore $\mathcal{P} \subseteq \partial Z$.
**Case ii).**  $\mathcal{L}$ intersects $\partial P$, but not the interior of $P$
<br>
This case is a little more subtle, but basically the same.
Since $P$ is convex, w.l.o.g. we can assume all $v_k$ are
either on $\mathcal{L}$ or on the negative side of $\mathcal{L}$.
Those on the negative side are part of contiguous sequence,
so for all these $k$, $\alpha_k$ is either 0 or 1.
For the $v_k$ that are **on** the line, $\langle L(e_k),u \rangle = 0$,
so the conditions in the above Lemma do not care whether
$\alpha_k$ is 0 or 1.
Thus all $\alpha_k$ satisfy the conditions of the Lemma
and so $\mathcal{P} \subset \partial Z$.
Secondly we show that $\partial Z \subseteq S_2$.
Let $\mathcal{P} \subset \partial Z$ and let $\Sigma$ be the square
whose image is $\mathcal{P}$.
We want to show that every $x \in \Sigma$ has 2-transitions.
**Case i).** $\mathcal{L}$ intersects the interior of $P$
<br>
Then for $k \not\in \{ i,j \}$,
all the generators $L(e_k)$ in one contiguous sequence
have $\langle L(e_k),u \rangle > 0$,
and all the generators $L(e_k)$ in the other contiguous sequence
have $\langle L(e_k),u \rangle < 0$.
In no case is $\langle L(e_k),u \rangle = 0$.
The Lemma then forces two possibilities for $\alpha_k$,
and both of them have 2 transitions.
**Case ii).** $\mathcal{L}$ intersects $\partial P$, but not the interior of $P$
<br>
We know $v_i$ and $v_j$ are on the line $\mathcal{L}$.
Let $\mathcal{I} := \{ ~ l : v_l \in \mathcal{L} ~ \}$.
Since $P$ is simple, the set of $v_l$ for $l \in \mathcal{I}$ is
contiguous in $P$.
If $i$ and $j$ are the only indexes in $\mathcal{I}$,
then since $P$ is simple and convex,
all the _other_ $v_k$ are contiguous and on one side of $\mathcal{L}$.
So by the Lemma, for all the _other_ $v_k$ either $\alpha_k=0$ or $\alpha_k=1$.
Therefore every $x \in \Sigma$ has 2 transitions.
If $\mathcal{I}$ has more than $i$ and $j$ then the generators
$L(e_l)$ for $l \in \mathcal{I}$ generate a non-trivial zonogon facet of $Z$.
Recall that this facet is tiled with the standard tiling.
Since $Z$ is pointed, the zonogon facet is also pointed.
And since $P$ is simple, the generators of the facet are in angular order.
By the property of the standard tiling in the [Zonotopes](zonotopes.html)
vignette,
we know that there are 2 transitions in the sequence of $\alpha_l$
for $l \in \mathcal{I}$.
Assume now that all vertices of the complementary sequence are
on the *negative* side of $\mathcal{L}$,
so all the complementary $\alpha_k = 0$.
When combined with the $\alpha_l$, the result is a 2-transition sequence,
for every $x \in \Sigma$.
If all vertices of the complementary sequence are
on the *positive* side of $\mathcal{L}$, then we can use the central
symmetry of $Z$ to show that the sequence for $x$ is the reflection,
and thus still has 2 transitions.
$\square$
<br><br>
# Strictly Starshaped Surfaces
`raytrace2trans()` is one of the important functions in the package **zonohedra**.
It expects that the 2-transition surface is nice enough
so that a given ray intersects the surface in a unique point.
This section explores the mathematics of this situation.
It is convenient to deal with abstract polyhedral surfaces in $\mathbb{R}^3$.
Let $S$ be a _polyhedral_ 2-sphere,
i.e. a sphere $\mathbb{S}^2$ that is tesselated by polygons,
and let $f: S \to \mathbb{R}^3$ be a continuous map that is injective
and linear on each polygon.
One can think of $f$ as a _piecewise-linear immersion_.
It may not be injective on all $S$, so the surface $f(S)$ may have self-intersections.
$f(S)$ is a _polyhedral surface_ in $\mathbb{R}^3$.
We call the surface polygons of $f(S)$ _facets_.
Since $S$ is orientable, we can choose a normal vector $n_i$ for each facet,
so that these normals are consistent across the edges.
A facet and its normal defines a positive halfspace for the facet,
and a negative halfspace for the facet.
The example we have in mind is the 2-transition surface $S_2$
associated with a zonohedron.
Given the vectors $n_i$ and a point $p \notin f(S)$
the _linking number_ of $p$ and $f(S)$ is defined as follows.
Choose a ray based at $p$ and not intersecting any edge of $f(S)$;
this is always possible.
Now examine every intersection of the ray with the interior of a facet.
If the ray crosses with the same orientation as $n_i$, assign a +1;
otherwise assign a -1.
The _linking number_ is defined to be the sum of all these +1s and -1s.
It is independent of the chosen ray.
Reversing the sign of every $n_i$ yields a consistent vector field
and changes the sign of the linking number.
The linking number is a straightforward generalization of the
_winding number_ of a closed polygonal curve in the plane,
with respect to a point not on the curve.
For more on this subject, see @Milnor1997.
Suppose now that $f: S \to \mathbb{R}^3$ is injective.
It is well-known (@Alexander1924, @Moise1977 p. 117, and @Bing1983 p. 161)
that $f(S)$ divides $\mathbb{R}^3$ into
an inside region and an outside region whose intersection is $f(S)$.
Moreover, the inside region is homeomorphic to a closed ball,
and $f(S)$ is the boundary of that ball.
**Definition:**
Let $B \subseteq \mathbb{R}^3$ be a closed set and $b_0 \in B$.
Then $B$ is _starshaped at_ $b_0$ iff
for any $b \in B$ the segment $[b_0,b] \subseteq B$.
**Definition:**
Let $B \subseteq \mathbb{R}^3$ be a closed set with interior
and $b_0 \in \operatorname{int}(B)$.
Then $B$ is _strictly starshaped at_ $b_0$ iff
for any $b \in B$ the half-open segment
$[b_0,b) \subseteq \operatorname{int}(B)$.
So to be strictly starshaped, the segment $[b_0,b]$
cannot intersect $\partial B$ except possibly at $b$.
We now want to extend the concept of strictly starshaped
from bodies $B$ to surfaces $f(S)$.
**Definition:**
Let $f: S \to \mathbb{R}^3$ be as above, and $p \notin f(S)$.
Then $f(S)$ is _strictly starshaped at_ $p$ iff $f$ is injective
and the inside region $B$ is strictly starshaped at $p$.
Note that this definition forces $p$ to be in the interior of $B$.
**Theorem:** 
Let $f: S \to \mathbb{R}^3$ be as above, and $p \notin f(S)$.
Then these are equivalent:
<ol type='a'>
<li>$f(S)$ is strictly starshaped at $p$</li>
<li>$f$ is injective, and every ray based at $p$ intersects $f(S)$ in
exactly one point</li>
<li>the linking number of $f(S)$ and $p$ is +1 (resp. -1) and $p$
is in the negative (resp. positive) open halfspace of every facet of $f(S)$</li>
</ol>
`raytrace2trans()` is one of the important functions in the package **zonohedra**.
For the function to work well, we want Property b to be true
for the 2-transition surface $S_2$,
but its truth or falsity is not readily computable.
However, Property c. is easily computed, and if the surface fails the test,
then `raytrace2trans()` issues a warning
that the computed ray intersection may not be unique.
<br><br>
# Higher-Transition Points in the Cube $Q^n$
In Section 2, the 2-transition subcomplex $Q_2^n \subsetneq Q^n$ was defined;
it is the subset of points with 2 transitions.
We now want to define the number of transitions for _any_ point $x \in Q^n$.
When defining the subcomplex $Q_2^n$ above,
**Definition 2** used the set $J_2$ of all (step) functions on $[\beta_0,\beta_n]$
that take the values 0 or 1 and have two transitions or no transitions (jumps).
Let $J_\infty$ be the bigger set of all (step) functions on $[\beta_0,\beta_n]$
that take the values 0 or 1 and have a finite number of transitions.
The endpoints $\beta_0$ and $\beta_n$ are identified, to form a circle.
Equivalently, $J_\infty$ is the set of all indicator functions
$\mathbf{1}_{A^+}$ where $A^+$ is a finite disjoint union of arcs in the circle.
The symbol $\infty$ does not mean that there can be infinitely many transitions;
it means that the transition count is finite
but can be arbitrarily large.
The transition count is twice the number of arcs,
so for any $f \in J_\infty$
the transition count of $f$ is a well-defined even integer.
We now want to define the transition count of $x \in Q^n$ using
the function $p : J_\infty \twoheadrightarrow Q^n$,
which is defined exactly as in Section 2.
**Definition:** For any $x \in Q^n$ define
\begin{equation}
\text{transition count of} ~ x ~ := \min_{f \in p^{-1}(x)} \{ ~ \text{transition count of} ~ f ~ \}
\end{equation}
By this definition, all $x \in Q_2^n$ have 2 or 0 transitions.
But if $x \not\in Q_2^n$ then $x$ has more than 2 transitions.
Following @Burns2021, we say that $x$ is a _higher-transition point_.
Given an $x \in Q^n$, the transition count of $x$ is easily computable.
In the interval [0,1] the endpoints 0 and 1 are _boundary values_,
and all other values are _interior values_.
First consider the case when all coordinate values
of $x$ are 0 or 1, so $x$ is a vertex.
The maximum transition count is when 0 and 1 alternate.
A little consideration of small $n$ yields the following table of counts.
|               | 3 | 4 | 5 | 6 | 7 | ... | $n$ |
|:-------------:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:---:|:-:|
| max transition count for a vertex $x$ | 2 | 4 | 4 | 6 | 6 | ... | $2 \lfloor n/2 \rfloor$  |
At the other extreme is when all coordinate values are interior values,
so $x$ is an interior point.
A little consideration of small $n$ shows that
one can make $x = p(f)$ when $f$ has transition counts in this table:
|               | 3 | 4 | 5 | 6 | 7 | ... | $n$ |
|:-------------:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:---:|:-:|
| transition count for an interior point $x$ | 4 | 4 | 6 | 6 | 8 | ... | $2 \lfloor (n+1)/2 \rfloor$  |
Finally, consider the general $x \in Q^n$.
Find all runs of interior values,
and the 2 border values on either side of the run.
These 2 border values are either **equal** or **not equal**.
Let $r$ be the length of a run and look up the # of transitions
for this specific run in this table:
|               | 1 | 2 | 3 | 4 | ... | $r$ |
|--------------:|:-:|:-:|:-:|:-:|:---:|:-:|
| equal border values | 2 | 2 | 4 | 4 | ... | $2 \lfloor (r+1)/2 \rfloor$ |
| unequal border values | 0 | 2 | 2 | 4 | ... | $2 \lfloor r/2 \rfloor$  |
The numbers in the header row are the possible lengths of the run of interior values.
For example, if the length of the run is 1, and the border values are equal
the transition count for this sequence is 2.
Take the sum of these counts over all runs of interior values.
Next, strip out all interior values completely, to leave a circular sequence of 0s and 1s.
Compute the number of transitions of this "stripped" sequence in the usual way,
and add to the previous sum.
This final sum is the transition count for any $x \in Q^n$.
This shows that $p : J_\infty \twoheadrightarrow Q^n$ truly is surjective.
We are mostly interested in the case when $L(x) \in \partial Z$,
and then $x$ has at most 2 interior values and
the length of a run of interior values is either 1 or 2.
From the above algorithm it is clear that for a fixed parallelogram
$\mathcal{P} \subset \partial Z$,
and for any $x$ that maps to the interior of $\mathcal{P}$,
the transition count of $x$ is a constant.
Thus we can write about the transition count for any
parallelogram $\mathcal{P} \subset \partial Z$.
Given indexes $i$ and $j$ of generators of $Z$, the transition count for
the corresponding parallelogram(s) in $\partial Z$ is fairly easy to compute.
The algorithm is in the proof of the theorem in section 7.
Let $\mathcal{L}$ be the line through vertices $v_i$ and $v_j$ in
the generator polygon $P$.
Then then transition count is the number of times that $\mathcal{L}$ cuts $P$.
This algorithm is also present in @Brill1983.
<br><br>
# Parallelograms in $S_2$ and $\partial Z$, revisited
By the previous theorem, if the generator polygon $P$ is **not**  simple and convex,
then $S_2 \ne \partial Z$.
This means there is a parallelogram in $S_2$ that is in the interior of $Z$.
Let $\{ i,j \}$ be an unordered pair of indexes for such a parallelogram.
Consider the slabs $\mathcal{S}^{ \{i,j\} }$ and $\mathcal{S}_2^{ \{i,j\} }$ defined above.
For simplicity, drop the $\{ i,j \}$ to get just $\mathcal{S}$ and $\mathcal{S}_2$.
Since this parallelogram in $S_2$ is in the _interior_ of the slab,
the functional defining the slabs is not maximized on the parallelogram,
so we call it _deficient_.
The difference between the functional values on the two parallelograms is
called the _deficit_.
The corresponding parallelogram in $\partial Z$
is called _abundant_ because every $z$ in this parallelogram
is the image under $L$ of a higher-transition $x \in Q^n$.
To summarize, each deficient parallogram in $S_2$ has a matching
abundant parallelogram in $\partial Z$.
This is illustrated in the left side of the next figure.
The bold line segments correspond to the parallelograms and their antipodals.
The outward-pointing normals define the functionals to be maximized.
The 2 slabs are labeled.
Note $\mathcal{S}_2$ is a proper subset of $\mathcal{S}$;
in symbols $\mathcal{S}_2 \subsetneq \mathcal{S}$.
```{r, echo=FALSE,  message=TRUE,  warning=TRUE, fig.width=8, fig.height=3, fig.cap='Figure 10.1', out.width="100%", cache=FALSE }
plot_slabs <- function()
    {
    plot.new()
    xlim = c(-10,10)
    ylim = c(-7,7)
    theta   = 20 * pi/180
    rot2x2  = matrix( c(cos(theta),sin(theta),-sin(theta),cos(theta)), 2, 2 )
    plot.window( xlim, ylim, asp=1 )
    #   big slab
    x  = c(-15,15,15,-15)
    y   = c(-5,-5,5,5)
    xy  = rbind( x, y )
    xyrot   = rot2x2 %*% xy
    polygon( xyrot[1, ], xyrot[2, ], col='gray90' )
    xya = cbind( c(1,5), c(4,5) )
    xyrot   = rot2x2 %*% xya
    xymid   = rowMeans(xyrot)
    lines( xyrot[1, ], xyrot[2, ], lwd=5 )
    text( xymid[1], xymid[2], "abundant", adj=c(1,-1/2) )
    lines( -xyrot[1, ], -xyrot[2, ], lwd=5 )
    text( -xymid[1], -xymid[2], "abundant", adj=c(0,3/2) )
    #   small slab
    ytop    = 2.5
    x  = c(-15,15,15,-15)
    y   = c(-ytop,-ytop,ytop,ytop)
    xy  = rbind( x, y )
    xyrot   = rot2x2 %*% xy
    polygon( xyrot[1, ], xyrot[2, ], col='gray80', lty=2 )
    xyd = cbind( c(-ytop,ytop), c(0,ytop) )
    xyrot   = rot2x2 %*% xyd
    xymid   = rowMeans(xyrot)
    lines( xyrot[1, ], xyrot[2, ], lwd=5 )
    text( xymid[1], xymid[2], "deficient", adj=c(1,-1/2) )
    lines( -xyrot[1, ], -xyrot[2, ], lwd=5 )
    text( -xymid[1], -xymid[2], "deficient", adj=c(0,3/2) )
    xya     = cbind( c(-6,5), c(-6,5+2) )
    xyrot   = rot2x2 %*% xya
    arrows( xyrot[1,1], xyrot[2,1],  xyrot[1,2], xyrot[2,2], length=0.1, angle=20 )
    arrows( -xyrot[1,1], -xyrot[2,1],  -xyrot[1,2], -xyrot[2,2], length=0.1, angle=20 )
    
    #   label both slabs
    x0  = 7
    
    xy  = cbind( c( x0, (5+ytop)/2 ),  c( x0, -(5+ytop)/2 ) )
    xyrot   = rot2x2 %*% xy
    text( xyrot[1, ], xyrot[2, ], expression( S ) )
    xy  = c( x0, 0 )
    xyrot   = rot2x2 %*% xy
    text( xyrot[1], xyrot[2], expression( S[2] ) )
    points( 0, 0, pch=20 )
    #return( TRUE )
    }
plot_slab <- function()
    {
    plot.new()
    xlim = c(-10,10)
    ylim = c(-8,8)
    theta   = 20 * pi/180
    rot2x2  = matrix( c(cos(theta),sin(theta),-sin(theta),cos(theta)), 2, 2 )
    plot.window( xlim, ylim, asp=1 )
    #   big slab
    x  = c(-15,15,15,-15)
    y   = c(-5,-5,5,5)
    xy  = rbind( x, y )
    xyrot   = rot2x2 %*% xy
    polygon( xyrot[1, ], xyrot[2, ], col='gray80' )
    xya = cbind( c(1,5), c(4,5) )
    xyrot   = rot2x2 %*% xya
    xymid   = rowMeans(xyrot)
    lines( xyrot[1, ], xyrot[2, ], lwd=5 )
    text( xymid[1], xymid[2], "coincident", adj=c(1,-1/2) )
    lines( -xyrot[1, ], -xyrot[2, ], lwd=5 )
    text( -xymid[1], -xymid[2], "coincident", adj=c(0,3/2) )
    #   arrows
    xya     = cbind( c(-6,5), c(-6,5+2) )
    xyrot   = rot2x2 %*% xya
    arrows( xyrot[1,1], xyrot[2,1],  xyrot[1,2], xyrot[2,2], length=0.1, angle=20 )
    arrows( -xyrot[1,1], -xyrot[2,1],  -xyrot[1,2], -xyrot[2,2], length=0.1, angle=20 )
    
    #   label slab
    xy  = c( 6, 0 )
    xyrot   = rot2x2 %*% xy
    text( xyrot[1], xyrot[2], expression( S[2] == S ) )
    points( 0, 0, pch=20 )
    
    }
    
oldpar = par( mfrow=c(1,2)  , omi=c(0,0,0,0), mai=c(0,0.1,0,0.1) )
plot_slabs() ; plot_slab()
par( oldpar )
```
On the other hand,
if a parallelogram of $S_2$ is in the boundary of the slab $\mathcal{S}$,
then the two parallelograms are equal and we call them _coincident_.
It means that every $z$ in this parallelogram
is the image of an $x \in Q^n$ that has 2 (or 0) transitions.
This is illustrated in the right side of the above figure.
To get data about the abundant and coincident parallelograms in $\partial Z$,
use the functions `transitionsdf()`, for example:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE }
matgen = colorimetry.genlist[[2]]   # the CIE 1931 CMFs at 1nm step
matgen = 100 * matgen / sum( matgen[2, ] )   # it's traditional to scale so the center has Y=50
zono =  zonohedron( matgen )
getcenter(zono) ; dim( getmatrix( getsimplified( getmatroid(zono) ) ) )
transitionsdf( zono )
```
The number of (simplified) generators of `zono` is $n{=}340$,
indexed from 360 to 699.
So the total number of parallelograms is $340(340{-}1)=115260$.
Data in the first row are for the coincident parallelograms, with 2 transitions.
These form the majority of $\partial Z$,
with about 33110/34669 = 95.5% of the surface area.
Data in the rows below is for the abundant parallelograms.
More transitions typically have fewer parallelograms.
The last column has an example of a parallelogram with the given
transition count.
For example, there are 1802 parallelograms in $\partial Z$ with 8 transitions,
and the one given by generators $\{ 570,608 \}$ is one of them.
This means that the line through $v_{570}$ and $v_{608}$ cuts $P$ in 8 places.
Here is a plot of the point in the cube that maps to the center
of the parallelogram:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=3, fig.cap='Figure 10.2', out.width="100%", cache=FALSE }
oldpar = par( omi=c(0,0,0,0), mai=c(0.45,0.5,0.1,0) )
gnd = getground( getsimplified( getmatroid(zono) ) )
pcube = boundarypgramdata( zono, c(570,608), cube=TRUE )$pcube
xlim = range( gnd[which(0<pcube)] ) + 20*c(-1,1)
plot( xlim, c(0,1), type='n', xlab='', ylab='', las=1, lab=c(5,10,7), cex.axis=0.8 )
grid( col='gray', lty=1 )
lines( gnd, pcube, type='s' )
par( oldpar )
```
Note that the values at 570 and 608 are both 1/2, and all the other values are 0 or 1.
<br>
The following figure is a helpful 3D visualization
of *all* the abundant parallelograms:
```{r, rgl=TRUE, dev='png', echo=TRUE,  message=TRUE,  warning=FALSE, fig.width=6.5, fig.height=4, fig.cap='Figure 10.3', fig.keep='last', fig.show='hold', out.width="100%", cache=FALSE }
library( orientlib )
user3x3 = orientlib::rotmatrix( orientlib::eulerzyx( -0.249417, 0.7116067, 2.324364 ) )@x
dim(user3x3) = c(3,3)
par3d( userMatrix=rotationMatrix(matrix=user3x3), zoom=0.35 )
plothighertrans( zono )
```
In this figure, the abundant parallelograms are color-coded following @Burns2021;
dark red for 4, yellow for 6, blue for 8, and purple for 10 transitions.
The view is looking up the "neutral axis" with a black point at 0,
a white point at the opposite point, and a gray point at the
center of symmetry.
The central symmetry of the abundant parallelograms is clear.
Compare this with Figure 7 in @Burns2021.
<br><br><br>
# References
<div id="refs"></div>
<br><br><br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options( old_opt )
sessionInfo()
```
</pre>
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/transitions.Rmd 
 | 
					
	## ----setup, include=FALSE---------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=120 )
# if( !file.exists("figs") ) dir.create("figs")
require("rgl",quietly=TRUE)
rgl::setupKnitr(autoprint = TRUE)
## ---- echo=TRUE,  message=FALSE---------------------------------------------------------------------------------------
library(zonohedra)
library(rgl)
## ---- echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=8, fig.height=4, fig.cap='polar zonohedra with 5 generators (left) and 25 generators (right)    [both of these are interactive WebGL widgets]', fig.keep='none', fig.show='hide', out.width="100%", cache=FALSE----
rgl::mfrow3d( 1, 2 )
pz5 = polarzonohedron( 5 ) ;  plot( pz5, ewd=5 )
rgl::next3d()
plot( polarzonohedron( 25 ), ewd=3 )
rgl::rglwidget( webgl=TRUE )
## ---- echo=TRUE, message=FALSE----------------------------------------------------------------------------------------
getmatrix( pz5 )
## ---- echo=TRUE, message=FALSE----------------------------------------------------------------------------------------
classics.genlist
## ---- echo=TRUE, message=TRUE-----------------------------------------------------------------------------------------
mat = classics.genlist[['TC']] ; mat
## ---- rgl=TRUE, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=8, fig.height=5, out.width="100%", fig.align="center", fig.cap='truncated cuboctahedron      [This is an interactive WebGL widget]', fig.keep='last', fig.show='hide', cache=FALSE----
rgl::par3d( userMatrix = rotationMatrix( -20*pi/180, 0, 1, 1) )
zono = zonohedron( mat )
plot( zono, type='f' )
rgl::rglwidget( webgl=TRUE )
## ---- echo=TRUE, message=FALSE, warning=FALSE-------------------------------------------------------------------------
library(gifski)
#   zono        the zonohedron
#   id          unique ID for this animation, a positive integer
#   fps         frames per second
#   duration    of the animation, in seconds
#   revolutions number of revolutions
#   vpsize      viewport size = (width,height)
spinit <- function( zono, index, fps=5, duration=8, revolutions=1, vpsize=c(480,480) ) {
#  enlarge viewport
wr = par3d( "windowRect" ) 
par3d( windowRect = c( wr[1:2], wr[1:2] + vpsize ) )
pathtemp = "./figs" ;   if( ! file.exists(pathtemp) ) dir.create(pathtemp)  # make temp folder
#  make a lot of .PNG files in pathtemp
movie3d( spin3d( getcenter(zono), rpm=revolutions*60/duration ), duration=duration, fps=fps, startTime=1/fps,
           convert=F, movie='junk', dir=pathtemp, verbose=F, webshot=F )
#  combine all the .PNGs into a single .GIF
pathvec = dir( pathtemp, pattern="png$", full=T )
gif_file = sprintf( "./figs/animation%g.gif", index ) 
# if( file.exists(gif_file) )  file.remove( gif_file )
out = gifski::gifski( pathvec, gif_file=gif_file, delay=1/fps, progress=F, width=vpsize[1], height=vpsize[2] )
res = file.remove( pathvec )  # cleanup the .PNG files, leaving just the .GIF
return( out )
}
## ---- echo=TRUE, message=TRUE, warning=TRUE, fig.cap='optimal color solid', fig.keep='last', fig.show='hide', cache=FALSE----
# colorimetry.genlist[[1]] is a 3x81 matrix with the CIE 1931 CMFs at 5nm interval
zono5 = zonohedron( colorimetry.genlist[[1]] )
plot( zono5, type='f' )
gif_file = spinit( zono5, 2, vpsize=c(256,256) )
## ---- echo=FALSE, results='asis'----------------------------------------------
options( old_opt )
sessionInfo()
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/zonohedra-guide.R 
 | 
					
	---
title: "zonohedra User Guide"
author: "Glenn Davis"
date: "`r Sys.Date()`"
output: 
  rmarkdown::html_vignette:
    toc: true
    toc_depth: 2
    number_sections: false
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
# csl: institute-of-mathematical-statistics.csl
# csl: transactions-on-mathematical-software.csl
vignette: >
  %\VignetteIndexEntry{zonohedra User Guide}
  %\VignetteEngine{knitr::rmarkdown}
---
```{css, echo=FALSE}
body {
  max-width: 750px;     /* make a little wider, default is 700px */
}
/*
div.figure {
 border: 1px;
 border-style: groove;
}
*/
```
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=120 )
# if( !file.exists("figs") ) dir.create("figs")
require("rgl",quietly=TRUE)
rgl::setupKnitr(autoprint = TRUE)
```
# Introduction
A _zonohedron_, roughly speaking, is the projection of
a high-dimensional cube to $\mathbb{R}^3$.
For a precise definition see the [Zonotopes](zonotopes.html) vignette,
section 1.3.
A zonohedron is a special type of convex polyhedron.
The goal of this package is to construct any zonohedron,
but especially the ones in these 2 families:
<ul>
<li> the classical zonohedra, with high symmetry </li> 
<li> zonohedra that arise naturally from colorimetry, which may contain hundreds of generators, but little symmetry</li> 
</ul>
In the first case, 13 classical zonohedra have been taken from
@wikiZonohedron
and are built in to the package.
In the second case, an _optimal color solid_ is viewed as a zonohedron;
this connection was discovered by Paul Centore
and is explained very clearly in @Centore2013.
```{r, echo=TRUE,  message=FALSE}
library(zonohedra)
library(rgl)
```
The package dependencies are:
<ul>
<li>**rgl**  @rgl - for 3D plotting</li>
<li>**microbenchmark**  @microbenchmark  - is suggested for its high-precision timer</li>
<li>**logger**  @logger - for event logging</li>
</ul>
Some of the figures below are displayed with **WebGL** -
a JavaScript API for rendering interactive 2D and 3D graphics.
Try using the left mouse button to rotate and the scroll wheel to zoom.
<br><br>
# Polar Zonohedra
The generators for a polar zonohedra are particularly simple -
they are equally distributed on a circle that
is in a plane parallel to the xy-plane and
whose center is on the z-axis.
Construct polar zonohedra with 5 and 25 generators and plot them.
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=8, fig.height=4, fig.cap='polar zonohedra with 5 generators (left) and 25 generators (right)    [both of these are interactive WebGL widgets]', fig.keep='none', fig.show='hide', out.width="100%", cache=FALSE }
rgl::mfrow3d( 1, 2 )
pz5 = polarzonohedron( 5 ) ;  plot( pz5, ewd=5 )
rgl::next3d()
plot( polarzonohedron( 25 ), ewd=3 )
rgl::rglwidget( webgl=TRUE )
```
In these 2 plots, the black dot is the origin,
the 5 vertices nearest to the origin are the 5 generators,
and the white dot is the point (0,0,$\pi$).
Each of the generators is assigned a unique color,
and every other edge with that color is parallel to the generator.
All parallelograms with an edge of that color form the
_zone_ or _belt_ for that generator.
Each belt is a topological annulus.
For more details on these polar zonohedra, see @Chilton1963.
Print the generators of the first zonohedron `pz5`;
they are the columns of this 3x5 matrix.
```{r, echo=TRUE, message=FALSE}
getmatrix( pz5 )
```
A function similar to `polarzonohedron()` is `regularprism()`.
<br><br>
<br><br>
# Classic Zonohedra
There are 13 classic zonohedron available in the package,
as a list of 3xN matrices,
where N is the number of generators.
The global data variable is 
`classics.genlist`, with S3 class `'genlist'`.
The 13 matrices in the list are taken from @Eppstein.
```{r, echo=TRUE, message=FALSE}
classics.genlist
```
Extract the matrix of generators for the `truncated cuboctahedron`,
which is abbreviated by `TC`.
```{r, echo=TRUE, message=TRUE}
mat = classics.genlist[['TC']] ; mat
```
Create the truncated cuboctahedron and plot it, with filled faces.
```{r, rgl=TRUE, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=8, fig.height=5, out.width="100%", fig.align="center", fig.cap='truncated cuboctahedron      [This is an interactive WebGL widget]', fig.keep='last', fig.show='hide', cache=FALSE }
rgl::par3d( userMatrix = rotationMatrix( -20*pi/180, 0, 1, 1) )
zono = zonohedron( mat )
plot( zono, type='f' )
rgl::rglwidget( webgl=TRUE )
```
<br>
Before continuing, define function `spinit()` used for creating animated GIFs.
```{r, echo=TRUE, message=FALSE, warning=FALSE}
library(gifski)
#   zono        the zonohedron
#   id          unique ID for this animation, a positive integer
#   fps         frames per second
#   duration    of the animation, in seconds
#   revolutions number of revolutions
#   vpsize      viewport size = (width,height)
spinit <- function( zono, index, fps=5, duration=8, revolutions=1, vpsize=c(480,480) ) {
#  enlarge viewport
wr = par3d( "windowRect" ) 
par3d( windowRect = c( wr[1:2], wr[1:2] + vpsize ) )
pathtemp = "./figs" ;   if( ! file.exists(pathtemp) ) dir.create(pathtemp)  # make temp folder
#  make a lot of .PNG files in pathtemp
movie3d( spin3d( getcenter(zono), rpm=revolutions*60/duration ), duration=duration, fps=fps, startTime=1/fps,
           convert=F, movie='junk', dir=pathtemp, verbose=F, webshot=F )
#  combine all the .PNGs into a single .GIF
pathvec = dir( pathtemp, pattern="png$", full=T )
gif_file = sprintf( "./figs/animation%g.gif", index ) 
# if( file.exists(gif_file) )  file.remove( gif_file )
out = gifski::gifski( pathvec, gif_file=gif_file, delay=1/fps, progress=F, width=vpsize[1], height=vpsize[2] )
res = file.remove( pathvec )  # cleanup the .PNG files, leaving just the .GIF
return( out )
}
```
<br><br>
# Colorimetry Zonohedra
In colorimetry, an optimal color solid is a zonohedron.
```{r, echo=TRUE, message=TRUE, warning=TRUE, fig.cap='optimal color solid', fig.keep='last', fig.show='hide', cache=FALSE }
# colorimetry.genlist[[1]] is a 3x81 matrix with the CIE 1931 CMFs at 5nm interval
zono5 = zonohedron( colorimetry.genlist[[1]] )
plot( zono5, type='f' )
gif_file = spinit( zono5, 2, vpsize=c(256,256) )
```
{width=60%}
In this figure, the black dot is the _black point_ [0,0,0].
The white dot is the _white point_, i.e. the column sums of the
generating matrix.
<br><br>
# Future Work
Here are a few possible improvements and additions.
**export**   
There should be a way to export a zonohedron as
a quadrilateral mesh in some standard format(s).
**vignettes**  
There should be more vignettes.
One idea is to show ways
to examine individual hyperplanes and facets of a zonohedron.
Another idea is to display some interesting Minkowski sums of a few
classic zonohedra.
<br><br>
# References
<div id="refs"></div>
<br><br>
\Appendix
<br><br>
## Appendix A - Methods
The constructor `zonohedron()` uses the optimizations in
Paul Heckbert's memo @Heckbert1985.
The key step is sorting points that lie on a great circle on the sphere.
This efficient method is $O(N^2\log(N))$;
whereas the naive method is $O(N 2^N)$.
The central symmetry is used whenever possible,
and when used this can speed things up by a factor of 2.
To further speed things up, many of the methods use C/C++.
The function `grpDuplicated()` was written by Long Qu,
with a small modification of the return value by myself.
It is written in C/C++ and is implemented with `std::unordered_map`.
The code was taken from the discontinued package **uniqueAtomMat**,
see @uniqueAtomMat.
<br><br>
## Appendix B - Logging
Logging is performed using the package **logger**, see @logger.
This is a powerful package that allows a separate configuration
for logging from within **zonohedra**, and that is what I have done.
During package loading, the logging threshold is changed from `INFO` to `WARN`.
To change it back again, one can execute:  
`log_threshold( INFO, namespace="zonohedra" )`
The layout callback functions is customized;
it adds the name of the calling function to the message.
To install your own layout function, you can execute:  
`log_layout( <your function>, namespace="zonohedra" )`
The appender callback functions is also customized;
it comes to an immediate stop if the message level is `ERROR` or `FATAL`.
To return to the default behavior, you can execute:  
`log_appender( appender_console, namespace="zonohedra" )`
The formatter callback function is forced to be `formatter_sprintf()`;
this should not be changed.
<br><br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options( old_opt )
sessionInfo()
```
</pre>
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/zonohedra-guide.Rmd 
 | 
					
	## ----setup, include=FALSE---------------------------------------------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=120 )
require("rgl",quietly=TRUE)
rgl::setupKnitr(autoprint = TRUE)
## ---- echo=TRUE,  message=FALSE---------------------------------------------------------------------------------------
library(zonohedra)
## ---- echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6, fig.height=4, fig.cap='', out.width="80%", cache=FALSE----
zono =  polarzonogon( 14, 4 )
oldpar = par( omi=c(0,0,0,0), mai=c(0.8,0.7,0.7,0.2) )
plot( zono, elabels=T )
par( oldpar )
## ---- echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6, fig.height=4, fig.cap='', out.width="80%", cache=FALSE----
oldpar = par( omi=c(0,0,0,0), mai=c(0.8,0.7,0.7,0.2) )
plot( zono, tiling=T, elabels=T, tlabels=T )
par( oldpar )
## ---- echo=FALSE, results='asis'----------------------------------------------
options( old_opt )
sessionInfo()
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/zonotopes.R 
 | 
					
	---
title: "Zonotopes"
author: "Glenn Davis"
date: "`r Sys.Date()`"
header-includes:
#  - \usepackage{amsmath}
#  - \usepackage{amssymb}
#  - \usepackage{amsthm}
output:
  rmarkdown::html_vignette:
    toc: true
    toc_depth: 2
    number_sections: true
#  includes:
#    in_header: preamble.tex    
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
vignette: >
  %\VignetteIndexEntry{Zonotopes}
  %\VignetteEngine{knitr::rmarkdown}
---
\newcommand{\argmax}{\mathop{\mathrm{argmax}}\limits}
\newcommand{\max}{\mathop{\mathrm{max}}\limits}
\newtheorem{theorem}{Theorem}
\newtheorem{corollary}{Corollary}[section]
\newtheorem{lemma}{Lemma}[section]
\newtheorem{assumption}{Assumption}
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=120 )
require("rgl",quietly=TRUE)
rgl::setupKnitr(autoprint = TRUE)
```
\begin{theorem}
A test theorem
\end{theorem}
This vignette is a long-winded mathematical exposition on zonotopes
that concentrates on inversion.
Discussion of software is delayed until the end.
Featured functions are:
`zonoseg()`, `zonogon()`, `zonohedron()`, `invert()`, and `invertboundary()`.
# Basic Concepts
The emphasis in this vignette are the concepts needed to understand
the inversion functions in the **zonohedra** package.
Much of this is based on @Ziegler2012.
## supporting hyperplanes
A _supporting hyperplane_ of a compact set $C$ in Euclidean space $\mathbb{R}^n$ is a hyperplane $P$ that has these properties:
<ol>
<li>$P$ intersects $C$</li>
<li>$C$ is entirely contained in one of the two closed half-spaces defined by $P$</li>
</ol>
Note that the 2 properties imply that the intersection
$P \cap C$ is a subset of the boundary of $C$.
<br>
If the compact set is a _convex body_, i.e. is convex with interior,
then 2 equivalent properties are given by this:  
**Theorem:** 
If $B$ is a closed convex body with interior,
then $P$ is a supporting hyperplane of $B$ iff $P$ has these properties:
<ol type='i'>
<li>$P$ intersects $B$</li>
<li>$P$ does **not** intersect the interior of $B$</li>
</ol>
**Proof:**   
not ii. $\implies$ not 2.  
Let ii. be false, so hyperplane $P$ _does_ intersect $int(B)$, at a point $p$.
Then there is an open ball centered at $p$, and the ball is inside $B$.
There are clearly points in the ball in _both_ halfspaces,
and so 2. is false.
not 2. $\implies$ not ii.   
Let 2. be false, so there are points $b^-, b^+ \in B$ that are in
_different_ open halfspaces.
Let $b_i$ be a point in $int(B)$.
If $b_i \in P$ then ii. is false and we are done.
Otherwise, either $b^-$ or $b^+$ are in a different halfspace than $b_i$.
Take it to be $b^+$, w.l.o.g.
Since $b_i$ and $b^+$ are in opposite halfspaces,
the segment $[b_i,b^+]$ intersects $P$;
let $c_i$ be this point of intersection.
There is an open ball in $B$ centered at $b_i$.
Let $C$ denote the convex hull of $b^+$ and this ball - a partial open cone.
By convexity of $B$, $C$ is in $B$.
There is a scaled down open ball centered at $c_i$ and contained in $C$.
Thus $c_i \in P \cap int(B)$ and ii. is false.
$\square$
## faces
**Definition:**   
A (proper) _face_ $F$ of a compact set $C \subset \mathbb{R}^n$
is a subset of $C$ that has these 3 equivalent properties:
<ol>
<li>$F = C \cap P$ for some supporting hyperplane $P$</li>
<li>$F = \argmax_{x \in C}  \langle x,w \rangle$ for some non-zero normal vector $w$</li>
<li>$F = \argmax_{x \in C}  \lambda(x)$ for some non-zero linear functional $\lambda : \mathbb{R}^n \to \mathbb{R}$</li>
</ol>
The equivalence 1 and 2 is straightforward,
and the equivalence of 2 and 3 is trivial.
The entire set $C$ is considered to be an (improper) face.
The _dimension of a face_ is the dimension of the affine subspace
spanned by the face.
From now on, We always assume that the dimension of $C$ is $n$,
which is equivalent to $C$ having an interior.
A _d-face_ is a face of dimension _d_.
So a _0-face_ is a _vertex_, and a _1-face_ is an _edge_.
A _facet_ is an ($n{-}1$)-face, and a maximal proper face.
Note that every face of the cube $[0,1]^n \subset \mathbb{R}^n$
is a cube of smaller dimension;
in fact the dimension is the number of 0s in the normal vector $w$ from
part 2 of the above definition.
Let $A : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^m$ be a
surjective affine map (so $m \le n$),
and let $C':= A(C)$.
**Theorem:** If $F'$ is a face of $C'$, then $A^{-1}(F')$  is a face of $C$.
Stated in words, the affine preimage of a face is a face.
**Proof:**
Use part 3 of the above definition so $F' = \argmax_{y \in C'} \lambda(y)$,
where $\lambda$ is a non-zero linear functional on $\mathbb{R}^m$.
Let $\mu := \max_{y \in C'} \lambda(y)$.
Now $A^{-1}(F') = A^{-1}( \lambda^{-1}(\mu) ) = (\lambda \circ A)^{-1}(\mu)$.
But $\lambda \circ A$ is a non-zero linear functional
on $\mathbb{R}^n$, plus a constant.
$\square$
See also @Ziegler2012, Lemma 7.10.
## a zonotope and its generators
**Definition:**
A _zonotope_ $Z$ is a set of the form $L([0,1]^n) + z_0$
where $L : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^m$ is a surjective linear map. 
Simply stated, a zonotope is an linear image of a cube plus a translation
(an affine image of a cube).
Since the cube is convex, the zonotope is also convex.
The $n$ _generators_ of $Z$ are the images of the $n$ elementary vectors
$L(e_1), ... , L(e_n)$.
A point $z \in Z$ iff
$z = \alpha_1 L(e_1) ~+~ ... ~+~ \alpha_n L(e_n) + z_0$ with all $\alpha_i \in [0,1]$.
A zonotope is centrally symmetric about the point $L(1/2,...,1/2) + z_0$.
By reflecting through the center of symmetry,
each facet of $Z$ has a corresponding _antipodal_ facet.
The facets come in antipodal pairs.
Given a face $F$ of zonotope $Z$, the preimage of $F$ is a face $F'$
of the cube.
But every face of a cube is also a cube, and so $F$ is also a zonotope.
Let the normal vector of the supporting hyperplane of $F'$ be $w$.
Then the vectors $\{ \ L(e_i) | w_i=0 \ \}$ are all parallel
to the face $F$, and in fact they generate the linear subspace parallel to $F$.
We call $\{ \ L(e_i) | w_i=0 \ \}$ the _generators_ of $F$.
And important fact:
the number of generators of $F$ is the dimension of the preimage $F'$.
Note that a face of dimension $d$ may have _more_ than $d$ generators,
because they may be linearly dependent.
For a parallelogram face, even if no generators are 0, the face may have more
than 2 generators because some may be multiples of others.
If the dimension of $Z$ is $m$, we call it an _m-zonotope_.
**Theorem:** If $K$ is the convex hull of a finite set of points in $\mathbb{R}^n$, with $n \ge 3$.
Then $K$ is an $n$-zonotope iff all facets of $K$ are ($n{-}1$)-zonotopes.
For a proof of this hard result, plus much more, see @Bolker1969.
A zonotope of dimensions 1, 2, and 3
is called a _zonoseg_ , _zonogon_, and _zonohedron_, respectively.
The term "zonoseg" is mine, since I could not find a term for it in the literature.  Geometrically a zonoseg is just a line segment.
A zonoseg has only two faces - the endpoints of the segment.
A zonogon is a convex polygon with 0-faces (vertices) and 1-faces (edges).
Since the dimension of an edge is 1 less than the
dimension of the zonogon, an edge of a zonogon is also a facet.
It can be shown that a convex polygon is a zonogon iff it is centrally symmetric.
A zonohedron has 0-faces (vertices), 1-faces (edges), and 2-faces (facets).
All the facets are zonogons.
A parallelogram facet is called _trivial_,
and facets with more than 4 edges are _non-trivial_.
## convex cones and zonotopes
Let $\mathbb{R}^n_{\ge 0} := \{ \ (x_1,x_2,...x_n) \ | \ x_i \ge 0 \ \}$
denote the non-negative orthant in $\mathbb{R}^n$.
A _convex cone_ $K$ is a set of the form $K = L(\mathbb{R}^n_{\ge 0})$,
where $L : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^m$
is a surjective linear map.
The $n$ _generators_ of $K$ are the images of the $n$ elementary vectors
$L(e_1), ... , L(e_n)$.
$K$ is the set of all non-negative linear combinations of the generators.
If $K$ is a subset of a closed linear halfspace, it is called _salient_.
If $K$ is a subset of an open linear halfspace
(except for the vertex 0), it is called _pointed_.
These properties are equivalent to 0 being in the boundary of $K$,
and being a vertex of $K$, respectively.
Obviously, pointed implies salient, but salient does not imply pointed.
Given a zonotope $Z = L([0,1]^n) + z_0$ the map $L$ also defines
a convex cone $K$.
$Z$ is a subset of $K$, after translating $Z$ by $-z_0$.
We carry the two above properties of $K$ over to $Z$.
It is straightforward to show that
$Z$ is _salient_ iff $z_0$ is in the boundary of $Z$,
and 
$Z$ is _pointed_ iff $z_0$ is a vertex of $Z$.
If $Z$ is pointed then there is a "cutting plane" that has $z_0$
on one side, and all the other vertices on the other side.
The intersection of this cutting plane and $Z$ is called the
_vertex figure_ of $Z$ at $z_0$.
The vertex figure is actually more general,
and is defined for any vertex of a convex polyhedron,
see @Ziegler2012, p. 54.
In the case that $Z$ is a zonohedron,
the vertex figure at $z_0$ is a a convex polygon that we call the
_generator polygon_.
The polygon is only unique up to a 2D projective transformation.
## matroids and zonotopes
This section assumes some knowledge of _matroids_;
for background on them see the matroids vignette.
Given a zonotope $Z = L([0,1]^n) + z_0$ as above,
the generators define a matroid $M$.
Since $L$ is surjective, $\mathrm{rank}(M) = m$.
A hyperplane of $M$ corresponds to a pair of antipodal facets of $Z$
(the concept of _hyperplane_ here is the the one used in matroid theory).
Assume now that $m=3$, so $Z$ is a zonohedron
and all its facets are zonogons.
If $M$ is simple, then the number of sides
of a zonogon facet is twice the number of points in the corresponding hyperplane.
So a parallelogram corresponds to a hyperplane with 2 points,
which is called a _trivial_ hyperplane.
# Inversion
As before,
let $L : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^m$ be a surjective linear map,
and let $Z := L([0,1]^n) + z_0$ for some $z_0 \in \mathbb{R}^m$.
From this setup, given a point $z \in Z$,
we know that the equation
\begin{equation}\tag{$\star$}
L(x) + z_0 = z  ~~~~ \textrm{for} ~ x \in [0,1]^n
\end{equation}
has a solution $x$.
The rest of this section looks at the solutions of
($\star$) in more depth.
## a uniqueness theorem
In this section we consider the question:
When is the solution of $(\star)$ unique ?
In the interior case, the answer is straightforward.
**Lemma:**
If $z$ is in the interior of $Z$, then the solution of $(\star)$ is unique iff $n=m$.
**Proof:**
If $n=m$ then $L$ is invertible so we are done.
If $n>m$ the nullspace of $L$ has positive dimension $n-m$.
By Theorem 4.2 of @Davis2018, we can pick an $x \in \operatorname{int}([0,1]^n)$
that satisfies $(\star)$.
Let $U \subset [0,1]^n$ be an open ball around $x$;
the intersection of the nullspace with $U$ is an infinite set.
$\square$
For $z \in Z$, let $F_z$ be the smallest face that contains $z$,
i.e. the intersection of all faces that contain $z$.
It is clear that $z$ is in the _relative interior_ of $F_z$,
i.e. $z \in \operatorname{relint}(F_z)$.
At the extremes, if $z$ is a vertex then $F_z$ is $\{ z \}$,
and if $z$ is in the interior of $Z$, then $F_z$ is $Z$
(here we allow $Z$ itself as an improper face).
The relative interiors of the faces form a partition of $Z$,
see @Ziegler2012, p. 61.
**Theorem:** Let $z$ and $Z$ and $F_z$ be as above.
Then the solution of $(\star)$
is unique iff the number of generators of $F_z$ is
equal to the dimension of $F_z$.
**Proof:**
The condition says that the dimension of the preimage of $F_z$
is the dimension of $F_z$.
Now apply the Lemma, with $Z$ replaced by the preimage of $F_z$.
$\square$
It is useful to reformulate the uniqueness theorem for the specific
case when $Z$ is a zonohedron ($m=3$).
**Theorem:** Let $z$ be in a zonohedron $Z$.
If $n>3$, then the solution of $(\star)$ is unique iff
none of the generators of $Z$ are 0 and:
$z$ is a vertex of $Z$  
or
$z$ is in an edge of $Z$, and the edge has one generator  
or
$z$ is in a parallelogram facet of $Z$, and the parallelogram has two generators
If the matroid of $Z$ is simple, i.e. no generators of $Z$
are 0 or multiples of each other, then this simplifies to:
**Theorem:** Let $z$ be in a zonohedron $Z$ whose matroid is simple.
If $n>3$, then the solution of $(\star)$ is unique iff
$z$ is in an edge or a parallelogram facet.
And for a zonogon we have:
**Theorem:** Let $z$ be in a zonogon $Z$ whose matroid is simple.
If $n>2$, then the solution of $(\star)$ is unique iff
$z$ is in the boundary of $Z$  (denoted by $\partial Z$).
## a right inverse on the boundary of a zonogon
```{r, echo=TRUE,  message=FALSE}
library(zonohedra)
```
Let $Z$ be a zonogon whose matroid is simple.
By the previous theorem there is a unique function
$\sigma : \partial Z \to [0,1]^n$ that is a
_right inverse_ for $x \mapsto L(x)+z_0$.
We have $L( \sigma(z) ) + z_0 = z$ for all $z \in \partial Z$.
Question: Is $\sigma()$ continuous ?
Well, on each edge it is linear, and so certainly continuous.
Moreover, on each vertex is is uniquely defined, and so the separate
linear maps on each edge must match up on the vertices.
So yes, $\sigma()$ is continuous; in fact it is _piecewise-linear_.
It is instructive to consider a very specific case.
Consider the figure:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6, fig.height=4, fig.cap='', out.width="80%", cache=FALSE }
zono =  polarzonogon( 14, 4 )
oldpar = par( omi=c(0,0,0,0), mai=c(0.8,0.7,0.7,0.2) )
plot( zono, elabels=T )
par( oldpar )
```
Denote the 4 generators by $z_1, z_2, z_3, z_4$;
these are labeled in the figure by just the indexes.
Along the bottom edge, $x_1$ increases from 0 to 1,
while the other $x$'s are 0.
When the first vertex $z_1=(1,0)$ is reached, $x_1$ remains at 1,
and on the 2nd edge $x_2$ increases from 0 to 1,
until the next vertex $z_1+z_2$, etc.
At any point on the lower boundary
$x = (1,...,1,\alpha,0, ... ,0)$;
i.e. a run of 1s, then an arbitrary $\alpha \in [0,1]$,
and then a run of 0s.
Both runs are allowed to be empty.
Similarly, along the upper boundary $x = (0,...,0,\alpha,1, ... ,1)$.
These two "low-pass" and "high-pass" filters are analogous
to Goethe's _edge colors_ (Kantenfarben), see @Koenderink p. 17.
## extending the right inverse, using parallelogram tilings
In the previous section, the right inverse $\sigma()$ was only
defined on $\partial Z$.
Question: Is there a way to extend $\sigma()$ across the interior ?
The answer lies in parallelogram tilings.
Consider the figure:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6, fig.height=4, fig.cap='', out.width="80%", cache=FALSE }
oldpar = par( omi=c(0,0,0,0), mai=c(0.8,0.7,0.7,0.2) )
plot( zono, tiling=T, elabels=T, tlabels=T )
par( oldpar )
```
The labels inside the parallelogram tiles are the generators of the tiles.
For points inside the 3 tiles that meet 0, the values of $x_i$ are obvious.
For a point inside tile
<span style="color: red;">1,3</span>,
$x_2=1$ and $x_1$ and $x_3$ vary in [0,1].
The rule for a point $z$ is to locate the tile containing $z$
and then the _origin_ of the tile.
Next, locate a path of tile edges from 0 to the origin,
and that determines the $x$ coordinate alues that are 1.
The $x$ coordinate values for the tile generators are the 2 coordinates
of $z$ in the tile relative to the origin, and all other $x$ values are 0.
For tile
<span style="color: red;">1,4</span>
there are 2 different paths to the origin of the tile,
but it doesn't matter since the $x$ indexes
on the different paths are the same: 2 and 3.
In general, two different paths are connected by a homotopy
that "crosses" one parallelogram at a time,
and each time, the 2 associated $x$ indexes are the same.
It is straightforward to verify that the right inverse $\sigma()$
defined by this rule is continuous.
The above tiling is just one of many,
and each different tiling generates a different right inverse.
For the 4-generator zonogon above, the number of different tilings is 8.
This sequence increases very rapidly with $n$;
for $n{=}8$ generators (16 sides) the number of
tilings is already more than $10^6$, see @A006245.
The above tiling is
an example of the _standard tiling_ in the **zonohedra** package,
which is generated by the following recipe.
Each generator is "lifted" to $\mathbb{R}^3$
by the mapping $(x,y) \mapsto (x,y,\sqrt{x^2+y^2})$.
The mapping "lifts" each generator to the cone $x^2 + y^2 = z^2$.
The lifted generators generate a zonohedron,
which has an upper half and a lower half.
The faces in the lower half are the ones that can be _seen_
from a a viewpoint far below the $xy$-plane, see @Ziegler2012, p. 130.
The parallelogram facets in the _lower_ half are projected down to
$\mathbb{R}^2$ and these form the _standard tiling_.
The standard tiling has the following nice property:
if the zonogon is pointed,
and the generators are in order by angle (clockwise or counterclockwise),
then every point $\sigma(z)$ has 2 _transitions_,
and the run of 1s does not wrap around.
For the definition of a 2-transition point of the cube,
see the
[The 2-Transition Subcomplex and the 2-Transition Surface](transitions.html) vignette.
The 2-transition concept is important for zonohedra coming
from colorimetry.
The standard tiling is denoted by $T_{min}$ in @Henriques2007 p. 13,
where the 2-transition property is also noted in equation (12).
## a right inverse on the boundary of a zonohedron
In this section,
let $Z$ be a zonohedron whose matroid is simple, with $n{>}3$.
Assume for simplicity that all facets are parallelograms.
Then by a previous theorem there is a unique function
$\sigma : \partial Z \to [0,1]^n$ that is a
_right inverse_ for $x \mapsto L(x)+z_0$.
This right inverse is unique on the edges,
which implies that $\sigma()$ is continuous.
Each parallelogram is the image of a square in $[0,1]^n$,
and the squares are "glued" together on the edges
to form a "surface" (in fact a topological sphere)
embedded in $[0,1]^n$.
We see another example of this in the
[The 2-Transition Subcomplex and the 2-Transition Surface](transitions.html) vignette.
Now suppose that a facet of $Z$ is an arbitrary zonogon,
with a high number of generators.
Then by rotating this non-trivial facet to the plane,
and choosing the standard tiling, we can use the construction
in the previous section to extend the right inverse across this facet.
Once again, the right inverse is unique on the edges,
which implies that the extended $\sigma()$ is continuous.
To summarize this section,
a right inverse $\sigma()$ defined on $\partial Z$ always exists
and is continuous,
but is only unique up to the selected tiling of the
non-trivial facets of $Z$.
<br>
# Software
The above sections are mathematical in nature.
This sections is about the implementation of the above
in the software package **zonohedra**.
The package only supports zonotopes of dimensions 1, 2, and 3,
which are called _zonosegs_, _zonogons_, and _zonohedra_, respectively.
The extra generality of the translation $z_0$ turned out to be an unnecessary
complication.
Thus, in the package, the constructors
`zonoseg()`, `zonogon()`, and `zonohedron()` only take a matrix argument,
and not $z_0$.
Many of the above theorems require that the matroid associated with
$Z$ is simple.
In the non-simple matroid case, the package ignores all generators that are 0.
And for a multiple group, when all the generators are positive multiples
of each other, it replaces these generators by their sum.
When some generators are negative multiples of each other,
the situation is more complicated and not yet documented.
The zonohedron is then computed from these "simplified" generators,
which has a simple matroid.
In many calculations, the central symmetry is used to reduce storage.
For example, only one facet in a pair of antipodal facets needs to be stored,
and the other can easily be derived by reflection.
In some cases, the symmetry is also used to reduce computation time.
The section **extending the right inverse, using parallelogram tilings**
is implemented in the function `invert()`,
which takes the zonogon as argument.
The section **a right inverse on the boundary of a zonohedron**
is implemented in the function `invertboundary()`,
which takes the zonohedron as argument.
For a pointed zonohedron $Z$, the function `plotpolygon()` plots
the generator polygon for $Z$ at 0.
<br>
# References
<div id="refs"></div>
<br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options( old_opt )
sessionInfo()
```
</pre>
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/inst/doc/zonotopes.Rmd 
 | 
					
	
saveMunsell <- function( pathin= "../inst/extdata/Munsell-Finland-1600.txt", pathout="../inst/extdata/Munsell1600.txt" )
    {
    requireNamespace( "munsellinterpol" )
    requireNamespace( "spacesXYZ" )
    linevec     = readLines( pathin )
    idx = grep( "^ABB", linevec[1:100] )   # ; print(idx)
    if( length(idx)==0 )    return(FALSE)
    linevec = linevec[ idx[1]:length(linevec) ]
    linesexpected   = 1600
    n   = length(linevec)
    if( n != linesexpected )
        {
        cat( "linesdata =", n, "   linesexpected =", linesexpected, '\n' )
        return( FALSE )
        }
    #   hue lookup
    huenum  = c( A=2.5, B=5, C=7.5, D=10, E=1.25, F=3.75, G=6.25, H=8.75 )  # ;  print( huenum )
    huename = c( RR="R", YR="YR", YY="Y", GY="GY", GG="G", BG="BG", BB="B", PB="PB", PP="P", RP="RP" )  #;  print( huename )
    munsell = character( n )
    for( i in 1:n )
        {
        line    = linevec[i]
        if( grepl( "NEUT", line ) )
            {
            value   = as.double( substr(line,5,7) ) / 100
            munsell[i]  = sprintf( "N%g/", value )
            next
            }
        hue1    = huenum[ substr(line,1,1) ]
        hue2    = huename[  substr(line,2,3) ]
        val     = as.double( substr(line,4,5) ) / 10
        chroma  = as.double( substr(line,6,7) )
        munsell[i]  = paste( hue1, hue2, val, '/', chroma, sep='', collapse='' ) #; print( munsell[i] )
        }
    # print( munsell )
    #munsell     = c( "N10/", munsell )
    #   adapt to Illuminant E
    C   = spacesXYZ::XYZfromxyY( c( spacesXYZ::standardxy( "C.NBS" ), 1) )
    CtoD65 = spacesXYZ::CAT( C, 'E' )
    XYZ     = munsellinterpol::MunsellToXYZ( munsell )
    XYZ     = spacesXYZ::adaptXYZ( CtoD65, XYZ )   #; print( XYZ )
    header  = character(0)
    header  = c( header, "#   XYZ data converted from README.txt" )
    header  = c( header, "#  https://sites.uef.fi/spectral/munsell-colors-glossy-all-spectrofotometer-measured/"  )
    header  = c( header, '#  adapted to Illuminant E for simplicity' )    
    header  = c( header, '' )
    
    write( header, file=pathout )
    
    utils::write.table( XYZ, file=pathout, append=TRUE )
    return(TRUE)
    }
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/inst/extdata/saveMunsell.R 
 | 
					
	---
title: "Matroids"
author: "Glenn Davis"
date: "`r Sys.Date()`"
output: 
  rmarkdown::html_vignette:
    toc: true
    toc_depth: 2
    number_sections: false
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
# csl: institute-of-mathematical-statistics.csl
# csl: transactions-on-mathematical-software.csl
vignette: >
  %\VignetteIndexEntry{Matroids}
  %\VignetteEngine{knitr::rmarkdown}
---
```{css, echo=FALSE}
body {
  max-width: 750px;     /* make a little wider, default is 700px */
}
```
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=120 )
```
<br><br>
# Introduction
The focus of this vignette is the `zonohedron()` constructor
and specifically its tolerance argument `e2`,
whose default value is `1.e-10`.
One goal of the **zonohedra** package is to handle all possible
zonogon facets, not just the parallelograms in the generic case.
The input to the constructor is matrix whose columns are the generators
of the zonohedron.
The generators of a specific facet span a plane, and adding another
generator increases the span to all of $\mathbb{R}^3$.
Stated another way, the set of generators of a specific facet has rank 2,
and is maximal with respect to this property.
So a naive way of determining the facets is to examine *all* subsets
of the generators and determine whether each one has this property.
This is hopelessly impractical.
Moreover, although the rank function is well-defined for matrices with
numbers in $\mathbb{R}$,
it is not computationally meaningful for floating-point numbers.
For example, if a set of floating-point vectors spans the xy-plane,
their rank is unambiguously 2; the smallest singular value is 0.
But if the set is given a random rotation,
the smallest singular value will be very small, but non-zero.
Some sort of tolerance is needed.
The central dogma is that there are vector generators in $\mathbb{R}^3$
that are very close to the given (dyadic rational floating point) vectors,
and have actual rank 2.
The package does a feasibility test that the floating point generators
could have come from true real vectors.
This test comes from the axioms of matroid theory.
The facet-finding method chosen for `zonohedron()` does not use rank,
but it also requires a tolerance - the argument `e2`.
The computational steps in `zonohedron()` are:
<ol>
<li>
Eliminate the zero generators; argument `e0` is used here
</li>
<li>
Unify the non-zero generators that are multiples of each other;
argument `e1` is used here.
Every set of two distinct generators $\{ v_i, v_j \}$ now has rank 2,
so their cross-product $v_i \times v_j \neq 0$.
</li>
<li>
Compute all pairwise cross-products of the generators,
and unitize them to the unit sphere.
For generators $v_i$ and $v_j$, denote the unit vector by
$u_{i,j} := v_i \times v_j / || v_i \times v_j ||$.
</li>
<li>
Perform a cluster analysis for the unitized cross-products,
using `e2` as a "pseudo-angular" threshold.
Special measures are taken so that vector $u_{i,j}$ is considered
identical to $-u_{i,j}$.
</li>
<li>
for each cluster of unit vectors,
take all the generators associated with this cluster and call them
the generators of a pair of antipodal facets.
Most of the clusters have only one unit vector,
and thus only 2 generators of antipodal parallelogram facets.
But some facets may have 3 or even more generators.
</li>
<li>
Perform a feasibility test on these subsets of generators,
and if the test fails, the zonohedron is invalid and the constructor fails.
This test depends on the hyperplane axioms of matroid theory,
and is outlined in the rest of the vignette.
</li>
</ol>
<br><br>
# Rank Functions
Let $E$ be a finite set of vectors in $\mathbb{R}^n$.
For any $A \subseteq E$ the _rank function_
$r(A) := \operatorname{dim}( \operatorname{span}(A) )$
has these properties:
<ul style="line-height: 2em; margin-bottom: 15px">
<li style="list-style: none">
(R1)  $0 \le r(A) \le |A|$  (_cardinality bound_)
</li>
<li style="list-style: none">
(R2)  If $A \subseteq B$, then $r(A) \le r(B)$  (_monotonicity_)
</li>
<li style="list-style: none">
(R3)  $r(A \cup B) + r(A \cap B) \le r(A) + r(B)$  (_submodularity_)
</li>
</ul>
If $E$ is changed to be just a set of abstract _points_,
then an integer-valued function defined on subsets of
$E$ that satisfies the axioms
(R1), (R2), and (R3) defines a _matroid_ on the _ground set_ $E$.
The _rank_ of the matroid is defined to be $r(E)$.
We mostly follow references @Welsh1976 and @White1986.
A given matroid $M$ may not be represented by a set of vectors in $\mathbb{R}^n$.
But if it _is_, we say that $M$ is _representable over_ $\mathbb{R}$.
We also say that $M$ is a _vector matroid_.
From (R1) it follows that a point has rank 0 or 1.
A point of rank 0 is called a _loop_;
in a vector matroid a loop corresponds to the 0 vector.
A _multiple group_ is a subset of size 2 or more,
which has rank 1, and with all points of rank 1,
and which is maximal.
In a vector matroid a multiple group is a maximal set of
2 or more non-zero vectors
that are all multiples of each other.
A _simple matroid_ is a matroid with no loops or multiple groups.
A rank function is defined for every subset of $E$,
and is much too large to deal with directly.
Matroid theory provides more efficient alternatives.
<br><br>
# Matroid Hyperplanes
In a matroid $M$ on a ground set $E$, a _hyperplane_ is a maximal subset
$H \subseteq E$ with $r(H)=r(E)-1$.
One can show that the set of hyperplanes has these properties:
<ul style="line-height: 2em; margin-bottom: 15px">
<li style="list-style: none">
(H1)
$E$ is not a hyperplane (_nontriviality_)
</li>
<li style="list-style: none">
(H2)
if $H_1$ and $H_2$ are hyperplanes and $H_1 \subseteq  H_2$,
then $H_1 = H_2$  (_incomparability_)
</li>
<li style="list-style: none">
(H3)
if $H_1$ and $H_2$ are distinct hyperplanes and $x \in E$,
then there is a hyperplane $H_3$ with
$(H_1 \cap H_2) \cup x \subseteq H_3$  (_covering_)
</li>
</ul>
For a proof see @Welsh1976 p. 39.
Conversely,
if a collection of subsets of $E$ satisfies the axioms (H1), (H2) and (H3),
then the collection defines a valid rank function and a matroid on $E$.
To do this, first define the _corank_ function $c()$ by:
\begin{equation}
c(A) := \max \Bigl\{ k : \text{there are hyperplanes } H_1,..., H_k
\text{ where for all } j,
A \subseteq H_j \text{ and } H_1 \cap ... \cap H_{j-1} \nsubseteq H_j \Bigr\}
\end{equation}
And now define $r(A) := c(\varnothing) - c(A)$.
This function $r()$ satisfies the axioms (R1), (R2), and (R3).
The above formula appears in @White1986 p. 306, without a proof.
Given a collection of hyperplanes, checking the hyperplane axioms
(H1), (H2), and (H3)
is more efficient than checking the rank function axioms
(R1), (R2), and (R3),
but _still_ too time-consuming in practice.
<br><br>
# Matroid Circuits
In a matroid $M$ on a ground set $E$, a _circuit_ is a subset
$C \subseteq E$ with
$r(C)=|C|-1$ and $r(C - x) = r(C)$ for all $x \in C$.
One can show that the set of circuits has these properties:
<ul style="line-height: 2em; margin-bottom: 15px">
<li style="list-style: none">
(C1)
$\varnothing$ is not a circuit (_nontriviality_)
</li>
<li style="list-style: none">
(C2)
if $C_1$ and $C_2$ are circuits and $C_1 \subseteq  C_2$,
then $C_1 = C_2$  (_incomparability_)
</li>
<li style="list-style: none">
(C3)
if $C_1$ and $C_2$ are distinct circuits and $x \in E$,
then there is a circuit
$C_3 \subseteq(C_1 \cup C_2) - x$  (_weak elimination_)
</li>
</ul>
For a proof see @Welsh1976 p. 9.
Conversely,
if a collection of subsets of $E$ satisfies the axioms (C1), (C2) and (C3),
then the collection defines a valid rank function and a matroid on $E$.
\begin{equation}
r(A) := |A| - \max \Bigl\{ k : \text{there are circuits } C_1,..., C_k
\text{ where for all } j,
C_j \subseteq A \text{ and } C_j \nsubseteq C_1 \cup ... \cup C_{j-1}  \Bigr\}
\end{equation}
This formula appears in @White1986 p. 306, without a proof.
A circuit of size 1 is a loop.
A circuit of size 2 is a pair of points in a multiple group.
Recall that _simple matroid_ is a matroid with no loops or multiple groups.
Thus, a simple matroid is a matroid with no circuits of size 1 or 2.
<br><br>
# Efficient Checking of Hyperplane Axioms
In this section we derive an efficient way to check
the hyperplane axioms, but only in the case when the matroid rank is 3.
Given an integer $d \ge 1$ a $d$-_partition of_ $E$ is a collection
of subsets of $E$, called _blocks_, with these properties:
<ul style="line-height: 2em; margin-bottom: 15px">
<li style="list-style: none">
(D1) there are 2 or more blocks
</li>
<li style="list-style: none">
(D2)
each block has $d$ or more points
</li>
<li style="list-style: none">
(D3)
every $d$-element subset of $E$ is a subset of exactly one block
</li>
</ul>
One can show that the blocks of a $d$-partition satisfy the hyperplane axioms
(H1), (H2), and (H3).
For a proof see @Welsh1976 p. 40.
The resulting matroid on $E$ is called a _paving matroid_
and has rank $d{+}1$.
Note that the 3 properties of a $d$-partition can be checked efficiently.
**Theorem**
A matroid of rank $r \ge 2$ is a paving matroid
if and only if
every circuit has size $r$ or greater.
**Proof** See @Welsh1976, p. 40.
<br>
**Theorem**
A simple matroid $M$ of rank 3 is a paving matroid.
**Proof** (trivial)
Since $M$ is simple no circuit has size 1 or 2.
Therefore every circuit has size 3 or greater.
By the previous theorem, $M$ is paving.  $\square$
Given a set of proposed hyperplanes for a matroid of rank 3,
we finally have an efficient way to check the hyperplane axioms,
by checking the $d$-partition block axioms instead.
<ol>
<li>simplify the hyperplanes</li>
<li>verify (D1) and (D2), which are linear in the number of hyperplanes</li>
<li>verify (D3), which is quadratic in the number of generators</li>
</ol>
For the hyperplane simplification in item 1,
the number of hyperplanes is preserved,
but all loops are removed,
and every generator except one from each multiple group are removed. 
<br><br>
# Conclusion and Conjecture
To summarize, let $E$ be a finite set of floating point 3D vectors,
with no vector equal to 0 and no vector a multiple of another (with tolerances).
The vectors generate a zonohedron.
A collection of subsets of $E$ is then computed, with each subset coplanar,
or very close to coplanar using the tolerance parameter `e2` discussed above.
Each subset is the proposed set of generators of a facet of the
generated zonohedron, and all facets are represented.
These subsets are proposed as the hyperplanes of a matroid.
We have shown that:
<blockquote>
If $E$ can be slightly perturbed to a set of actual real
vectors $E' \subset \mathbb{R}^3$, so that the rank of each real hyperplane
is 2, and is maximal w.r.t. this property,
then these hyperplanes satisfy properties (D1), (D2), and (D3).
</blockquote>
In the software package, we use the contrapositive form:
<blockquote>
If these proposed hyperplanes do not satisfy (D1), (D2), and (D3),
then the hyperplanes do not form a valid matroid,
and $E$ _cannot_ be slightly perturbed to satisfy the desired rank=2 property.
</blockquote>
Even if the matroid is valid, the perturbation $E'$ may not exist,
because the matroid might not be representable over the real numbers
$\mathbb{R}$.
A classical example is the _Fano plane_ matroid on 7 points with 7 hyperplanes.
It has just too many hyperplanes, see @FanoWiki.
Nevertheless, we conjecture that such
non-representable matroids cannot occur in practice.
<blockquote>
**Conjecture**
If the hyperplanes for the floating point set $E$ are computed
following the procedure in the **Introduction**, and the tolerance `e2`
(depending on $E$) is sufficiently small,
then a perturbation $E' \subset \mathbb{R}^3$ representing the matroid exists.
</blockquote>
This statement is theoretical in nature,
since real numbers in $\mathbb{R}$ cannot be represented exactly.
The conjecture is true in some simple cases.
Before exploring this, call the hyperplanes of size 2 the _trivial hyperplanes_.
Note that for the Fano plane matroid, all 7 hyperplanes are
size 3 and non-trivial.
Suppose that _all_ the hyperplanes are trivial, so the matroid is uniform
and all the facets of the zonohedron are parallelograms.
Then no perturbation is needed at all; the given vectors
(with dyadic rational numbers) already represent.
This is the case for 7 of the 13 classical zonohedra in `classics.genlist`.
And it is also the case for the generators in `colorimetry.genlist[[3]]`.
Now suppose that the matroid has only 1 non-trivial hyperplane.
Then there are 3 or more generators that (approximately) span a plane,
and all the other generators are far from the plane.
Perturb this plane to the "best fit" linear plane $P$ to these generators where
$P \subset \mathbb{R}^3$,
and then project them onto $P$.
If this perturbation accidentally creates non-trivial hyperplanes
with the _other_ generators, then just perturb the other generators
to get the original matroid.
An example is the matroid generated by `colorimetry.genlist[[2]]`,
which has 1 non-trivial hyperplane with 50 generators.
Now suppose that all the non-trivial hyperplanes are disjoint.
Then we can repeat the procedure in the previous paragraph
for each hyperplane.
Since the hyperplanes are disjoint, there is no "interaction" between them.
An example is the matroid generated by `colorimetry.genlist[[1]]`,
which has 2 disjoint non-trivial hyperplanes with sizes 3 and 26.
Now suppose that the non-trivial hyperplanes intersect in a single generator.
We can perform a "constrained best fit" perturbation for each plane $P$,
where the constraint is that that single generator is in the plane.
An example is the matroid generated by `classics.genlist[[5]]`,
which has 2 non-trivial hyperplanes: $\{1, 3, 4\}$ and $\{2, 3, 5\}$.
The generated zonohedron is the _rhombo-hexagonal dodecahedron_.
More simple cases can be listed by mixing the above,
but we cannot find a general proof of the conjecture.
<br><br>
# References
<div id="refs"></div>
<br><br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options(old_opt)
sessionInfo()
```
</pre>
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/vignettes/matroids.Rmd 
 | 
					
	---
title: "Ray Tracing the Zonohedron Boundary and the 2-Transition Surface"
author: "Glenn Davis"
date: "`r Sys.Date()`"
output: 
  rmarkdown::html_vignette:
    toc: true
    toc_depth: 2
    number_sections: false
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
# csl: institute-of-mathematical-statistics.csl
# csl: transactions-on-mathematical-software.csl
vignette: >
  %\VignetteIndexEntry{Ray Tracing the Zonohedron Boundary and the 2-Transition Surface}
  %\VignetteEngine{knitr::rmarkdown}
---
```{css, echo=FALSE}
body {
  max-width: 870px;     /* make wider, default is 700px */
}
```
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=144 )
```
<br><br>
# Introduction
The focus of this vignette are the two
functions `raytrace()` and `raytrace2trans()`.
The former is for the boundary of the zonohedron 
and the latter is for the associated 2-transition surface.
We revisit the example at the end of section 6
in Scott Burns' paper @Burns2021,
which is also illustrated in the 1nm plot from Figure 8.
His example is from colorimetry, where the boundary of the zonohedron
is the set of optimal colors
and the 2-transition surface is the set of Schrödinger colors
(both for Illuminant E).
The correspondence for the optimal colors was discovered by
Paul Centore, see @Centore2013.
Other featured functions are `invertboundary()`, `inside()` and `inside2trans()`.
```{r, echo=TRUE,  message=FALSE}
library(zonohedra)
```
<br><br>
# A Ray Tracing Example
In Burns' example, the base of the ray is the center of the zonohedron $Z$:
```{r, echo=TRUE,  message=FALSE}
matgen = colorimetry.genlist[[2]]   # the CIE 1931 CMFs at 1nm step
matgen = 100 * matgen / sum( matgen[2, ] )   # it is traditional to scale so the center has Y=50, recall we use Illuminant E
zono =  zonohedron( matgen )
base = getcenter(zono) ; base
```
The vector `base` corresponds to Burns' vector $XYZ_{\text{50%}}$.
The direction of the ray is given by spherical angles,
which define a unit vector `u`:
```{r, echo=TRUE,  message=FALSE}
theta = 1.478858 ; phi = 0.371322
u = c( sin(phi)*cos(theta), sin(phi)*sin(theta), cos(phi) ) ; u
```
Calculate the intersection of the ray with the boundary of $Z$.
```{r, echo=TRUE,  message=TRUE}
df_opt = raytrace( zono, base, u ) ; df_opt
xyz_opt = df_opt$point[1, ] ; xyz_opt
```
This matches Burns' value of $XYZ_{\text{LPsoln}}$.
From Figure 8 of @Burns2021 we see that this point
(and every point in the same parallelogram)
comes from a reflectance spectrum with 4 transitions.
This can be verified by inverting:
```{r, echo=TRUE,  message=TRUE}
invertboundary( zono, xyz_opt )$transitions
```
Now calculate the intersection of the ray with the 2-transition surface
associated with $Z$.
```{r, echo=TRUE,  message=TRUE}
df_2trans = raytrace2trans( zono, base, u ) ; df_2trans
xyz_2trans = df_2trans$point[1, ] ; xyz_2trans
```
This matches Burns' value of $XYZ_{\text{two-trans}}$ to 4 decimal places.
The transition wavelengths 629 and 575nm,
and the parallelogram coordinates 0.2246808 and 0.4459951
(these are the corresponding reflectances),
are clearly visible in Figure 8.
Now consider the distance between these 2 points
$XYZ_{\text{LPsoln}}$ and $XYZ_{\text{two-trans}}$.
The parameter `tmax` in both data frames is the parameter on the ray
where it intersects the boundary or the surface.
Since `u` is a unit vector, the difference between the parameters is this distance.
```{r, echo=TRUE,  message=FALSE}
df_opt$tmax - df_2trans$tmax
```
This matches Burns' value of $1.29 \times 10^{-3}$,
which is very tiny especially compared to the two $XYZ$s.
What is the maximum that this distance can be over the entire $\partial Z$ ?
To get a rough estimate, a search was made over the rays
passing though the centers of all the 21900 deficient parallelograms,
and with the same basepoint as before.
The largest distance over these rays was $2.47 \times 10^{-3}$.
This distance is for the parallelogram with generators corresponding
to 592 and 608 nm; the generating 'spectrum' has 8 transitions.
The actual maximum distance between the boundary of the color solid
and the 2-transition surface is not much larger than this sampling.
This confirms Burns' statement from @Burns2021 that the distance between
these surfaces has
"... no practical impact on typical colorimetric calculations".
If the zonohedron $Z$ is called the _Optimal Color Solid_ (OCS),
and the inside of the 2-transition surface is called the 
_Schrödinger Color Solid_ (SCS),
we see that the OCS is obtained by adding a very thin "skin"
on some regions of the SCS.
<br><br>
# Inside or Outside ?
Consider the midpoint of $XYZ_{\text{LPsoln}}$ and $XYZ_{\text{two-trans}}$.
It lies on the same ray as these 2 points,
so it must be *inside* the zonohedron, but *outside* the 2-transition surface.
We can verify this easily:
```{r, echo=TRUE,  message=FALSE}
xyz_mid = (xyz_opt + xyz_2trans) / 2
inside( zono, xyz_mid )
inside2trans( zono, xyz_mid )
```
<br><br>
# References
<div id="refs"></div>
<br><br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options(old_opt)
sessionInfo()
```
</pre>
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/vignettes/raytrace.Rmd 
 | 
					
	---
title: "The 2-Transition Subcomplex and the 2-Transition Surface"
author: "Glenn Davis"
date: "`r Sys.Date()`"
header-includes:
  - \usepackage{textcomp}
output: 
  rmarkdown::html_vignette:
    toc: true
    toc_depth: 2
    number_sections: true
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
# csl: institute-of-mathematical-statistics.csl
# csl: transactions-on-mathematical-software.csl
vignette: >
  %\VignetteIndexEntry{The 2-Transition Subcomplex and the 2-Transition Surface}
  %\VignetteEngine{knitr::rmarkdown}
---
```{css, echo=FALSE}
body {
  max-width: 750px;     /* make a little wider, default is 700px */
}
```
\newcommand{\argmax}{\mathop{\mathrm{argmax}}\limits}
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=144 )
# if( !file.exists("figs") ) dir.create("figs")
require("rgl",quietly=TRUE)
rgl::setupKnitr(autoprint = TRUE)
```
Throughout this vignette,
$Z$ is a zonohedron such that none of its generators
$L(e_1),...,L(e_n)$ are 0,
and no generator is a multiple of another.
Equivalently, we assume that matroid of $Z$ is simple.
For more discussion of zonohedra,
see the [Zonotopes](zonotopes.html) vignette.
Given such a zonohedron $Z$, and a cyclic ordering of the generators of $Z$,
there is a surface contained in $Z$, which may coincide with $\partial Z$,
but in general does not.
In this vignette we give a careful definition of this
_2-transition surface_,
give some examples, and explain some of the relevant functions
in the **zonohedra** package for processing this surface.
Points in the 2-transition surface are analogous to the reflectance spectra
of _Schrödinger colors_.
Points in $\partial Z$ are analogous to the reflectance spectra of
_optimal colors_, see @ANDP:ANDP19203671504, @Logvinenko2009,
and especially @Brill1983.
Featured functions in this vignette are:
`raytrace2trans()`, `transitionsdf()`, and `plothighertrans()`.
Given $x_1,x_2 \in \mathbb{R}^n$, $[x_1,x_2]$ denotes the line segment
from $x_1$ to $x_2$.
<br>
```{r, echo=TRUE,  message=FALSE}
library(zonohedra)
```
<br><br>
# The Unit Cube $Q^n$
We abbreviate $Q^n := [0,1]^n$, i.e. the $n$-cube.
So $Q^n$ is all points
$x = (\alpha_1, ... , \alpha_n)$ where all $\alpha_i \in [0,1]$.
For this vignette we assume $n \ge 3$.
A _vertex_ of $Q^n$ is a point where all $\alpha_i = 0 ~ \textrm{or} ~ 1$;
there are $2^n$ vertices.
If two vertices differ by exactly one coordinate, they are the endpoints
of an _edge_, and the "free" coordinate parameterizes the edge.
There are $n 2^{n-1}$ edges.
If four vertices differ by two coordinates, they are the vertices
of a _square_, and the two "free" coordinates parameterize the square.
There are $\binom{n}{2} 2^{n-2}$ squares.
In the familiar case when $n{=}3$, there are 8 vertices, 12 edges, and 6 squares.
Note that $(\alpha_1, ... , \alpha_n) \in \partial Q^n$ 
iff some $\alpha_i =$ 0 or 1.
Thus vertices, edges, and squares are all subsets of $\partial Q^n$.
The _standard involution_ $\rho: Q^n \to Q^n$ is the map
$(\alpha_1, ... , \alpha_n) ~ \mapsto ~ (1-\alpha_1, ... , 1-\alpha_n)$.
It is clear that $\rho$ maps a vertex to an _antipodal vertex_,
Similarly, there are pairs of _antipodal edges_ and _antipodal squares_.
$\rho$ has a unique fixed point $(1/2, ... ,1/2)$,
which is the center of symmetry.
<br><br>
# The 2-Transition Subcomplex $Q_2^n \subsetneq Q^n$
This section treats the _2-transition subcomplex_ of $Q^n$ with $n \ge 3$,
which we denote by $Q^n_2$.
We define it in three ways.
**Definition 1:**  
Visualize the indexes $1, ... ,n$ as points on the circle,
like beads in a necklace, or the $n$ roots of unity.
Different points $i \ne j$ divide the other
points into 2 contiguous "arcs" (one of them may be empty).
Define $(\alpha_1, ... , \alpha_n) \in Q^n_2$
iff 
there are $i \ne j$, so that $\alpha_k = 0$ for $k$ in one arc
and $\alpha_k = 1$ for $k$ in the other arc.
There are 2 ways to choose 0 and 1,
so as $\alpha_i$ and $\alpha_j$ vary,
they "sweep out" 2 disjoint and antipodal squares in $Q^n$.
The total number of these _2-transition squares_ is $n(n-1)$.
$Q^n_2$ is the union of these squares, joined along their edges.
It is helpful to visualize points in the unit cube as bar graphs.
Here are four points in $Q^{10}_2$, with 2 transitions:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=4, fig.cap='Figure 2.1  four points in the 2-transition complex, visualized with bar graphs', out.width="100%", cache=FALSE }
mybarplot <- function( x )  {
n = length(x)
plot( c(0,n), c(0,1), type='n', tcl=0, las=1, xaxt='n', xlab='', ylab='', mgp=c(3,0.25,0) )
grid( nx=NA, ny=NULL, lty=1 )
barplot( x, names.arg=1:n, space=0, add=T, yaxt='n', mgp=c(3,0.25,0) )
}
x1 = numeric(10) ; x1[ c(3,8) ] = exp( c(-0.25,-1) ) ; x1[ 4:7 ] = 1
x2 = numeric(10) ; x2[ c(5,6) ] = exp( c(-1,-0.25) )
oldpar = par( mfrow=c(2,2)  , omi=c(0,0,0,0), mai=c(0.45,0.5,0.1,0) )
mybarplot( x1 )   ; mybarplot( x2 )     #  row #1
mybarplot( 1-x1 ) ; mybarplot( 1-x2 )   #  row #2
par( oldpar )
```
For the first point, the run of 1s has length 4,
and the (circular) run of 0s also has length 4.
For the next plot, the run of 1s has length 0,
and the (circular) run of 1s has length 8.
The points in row #2 are derived by applying the involution to the point above it.
Of course, the involution turns runs of 1s to runs of 0s,
and vice-versa.
The special vertices $(0,...,0)$ and $(1,...,1)$ are in $Q^n_2$
by this definition, although they technically have no transitions.
Also, if $\alpha_i = 0$ except for one $i$, that point is _also_ in $Q^n_2$
because it is in an edge of one of the above-defined squares.
Note that $Q^3_2 = \partial Q^3$,
i.e. every point in the boundary of $Q^3$ is a 2-transition point.
<br>
**Definition 2:**   
This definition views a 2-transition point as a special discrete projection
of a 2-transition function defined on a circle.
Given $n$, define $n+1$ points, $\beta_i := i + 1/2$ for $i = 0,...,n$,
and $n$ intervals
$I_i := [\beta_{i-1},\beta_i]$ = $[i-1/2,i+1/2]$ for $i = 1,...,n$.
These intervals have length 1 and are a partition of $[1/2,n+1/2]$.
Let $J_2$ be the set of all (step) functions on $[\beta_0,\beta_n]$
that take the values 0 or 1 and have two transitions or no transitions (jumps).
We identify the endpoints $\beta_0$ and $\beta_n$ to form a circle,
so if the functions values at $\beta_0$ and $\beta_n$ are different,
then this is considered to be a transition.
Equivalently $J_2$ is the set of all indicator functions $\mathbf{1}_A$
where $A$ is an arc in the circle.
We allow the arc to be empty (the function is identically 0),
or the entire circle (the function is identically 1).
Define a function $p()$
\begin{equation}
p : J_2 \twoheadrightarrow Q^n  ~~~  \text{by} ~~~  p(f) := (\alpha_1,\ldots,\alpha_n)  ~~~ \text{where}  ~~~   \alpha_i :=  \int_{I_i} f(\lambda) \, d\lambda
\end{equation}
Note that $\alpha_i$ is the mean of $f$ on $I_i$.
So if $f$ is identically 1 on $I_i$, then $\alpha_i = 1$ in $p(f)$.
And the same is true with 1 replaced by 0.
But if $f$ has a jump in $I_i$, then $\alpha_i$ can be anywhere in $[0,1]$
depending on where the jump occurs.
If there is exactly one jump in $I_i$,
then the location of the jump is uniquely determined by $\alpha_i$.
But if there are two jumps in $I_i$,
then the 2 jump locations are **not** determined by $\alpha_i$.
It only determines the distance between the jumps,
so both can be translated a little bit and not change $\alpha_i$.
Thus, $p$ is _not_ injective.
Here are plots with 2 examples:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=4, fig.cap='Figure 2.2', out.width="100%", cache=FALSE }
mystepplot <- function( x )  {
# assumption: x is Type I
n = length(x)
plot( c(1/2,n+1/2), c(0,1), type='n', tcl=0, las=1, xlab='', ylab='', lab=c(n,5,7), mgp=c(3,0.25,0) )
grid( lty=1 )
beta = seq(1/2,n+1/2,by=1) ; segments( beta, 0, beta, -0.02 )
ij = which( 0<x & x<1)  ;  lambda = ij + c(1/2 - x[ ij[1] ], x[ ij[2] ] - 1/2)
lines( c(0.5,lambda[1]), c(0,0) ) ; lines(lambda,c(1,1)) ; lines( c(lambda[2],n+1/2), c(0,0) )
segments( lambda, c(0,0), lambda, c(1,1), lty=3 )
}
oldpar = par( mfrow=c(2,2), omi=c(0,0,0,0), mai=c(0.45,0.5,0.1,0) )
mystepplot( x1 ) ; mystepplot( x2 )     #  row #1
mybarplot( x1 ) ; mybarplot( x2 )       #  row #2
par( oldpar )
```
The 1st plot shows a function $f \in J_2$ with jumps in intervals $I_3$ and $I_8$.
The plot below it shows $p(f) \in Q^{10}_2$.
The 2nd plot shows a function $f \in J_2$ with jumps in intervals $I_5$ and $I_6$.
The plot below it shows $p(f) \in Q^{10}_2$.
In the 1st row, the sequence $\beta_i$ (all of them half-integers)
is marked with small tick marks.
We now define $Q^n_2$ to be the image of $p()$; i.e. $Q^n_2 := p(J_2)$.
It is straightforward to show that **Definition 2** and **Definition 1**
are equivalent.
Now we look at $J_2$ in more detail.
Every function in $J_2$ is integrable, so we can think of
$J_2 \subsetneq L^1(\mathbb{S}^1)$, which is the space of
integrable functions on the circle $\mathbb{S}^1$.
**Theorem**
With the $L^1$ topology, $J_2$ is homeomorphic to the 2-sphere $\mathbb{S}^2$.
**Proof**
Denote the 2 functions in $J_2$ that are identically 0 or 1
by $f_0$ and $f_1$.
For a point in $J_2 - \{f_0, f_1\}$,
the corresponding arc $A$ is non-trivial,
and so the midpoint of the arc is a well-defined point in $\mathbb{S}^1$.
The length of the arc is in the open interval $(0,2\pi)$.
This assignment gives a homeomorphism from $J_2 - \{f_0 , f_1\}$
to the open cylinder $U := \mathbb{S}^1 \times (0,2\pi)$.
Note that as two points in $J_2$ get closer to $f_0$,
they get closer to each other in the $L^1$ metric; and similarly for $f_1$.
So if bottom and top boundaries of $U$ are each collapsed to a point,
and $f_0$ and $f_1$ are mapped to those two points,
it continuously extends the above homeomorphism to all of $J_2$ and
to the cylinder with collapsed boundaries,
which is just a 2-sphere $\mathbb{S}^2$.
$\square$
Later, we will see that $Q^n_2$ is _also_ homeomorphic to $\mathbb{S}^2$.
But the above mapping $p : J_2 \twoheadrightarrow Q^n_2$
is _not_ a homeomorphism,
because we observed above that $p$ is _not_ injective.
<br>
**Definition 3:**   
This definition works directly with vertices and squares of $Q^n$.
A _2-transition vertex_ is one with a single circular run of 0s,
and a single circular run of 1s.
A run is allowed to empty, and this yields the vertex of all 0s and all 1s.
A _2-transition square_ is a square whose 4 vertices are all 2-transition
vertices.
Note that the center of the square has exactly 2 coordinates with value 1/2,
and the other values are a circular arc of 0s and an arc of 1s.
We now define $Q^n_2$ to be the union of all these 2-transition squares.
It is an example of a cubical subcomplex of $Q^n$.
Once again, it is straightforward to show that
**Definition 3** and **Definition 1** are equivalent.
For an $x := (\alpha_1,...,\alpha_n) \in Q^n_2$,
we define the _level_ of $x$ be the number of $\alpha_i$'s that equal $1$.
The level varies from 0 to $n$.
The level is constant on the interior of an edge and the interior of a square.
We define the _level_ of a square to be the level on the interior.
The level of a square varies from 0 to $n{-}2$.
It is helpful to think of level=0 squares at the "bottom" of the subcomplex,
and level=$n{-}2$ squares at the "top".
So we know that $Q^n_2$ is a union of squares, but what does it "look like",
and what is its topology?
We claim that $Q^n_2$ is a 2-sphere $\mathbb{S}^2$.
In the case of $n{=}3$ this is easy to see, since $Q^3_2$ = $\partial Q^3$.
In general, first consider all the 2-transition squares with level=0.
It is straighforward to show that such a square has 0 as a vertex,
and it has 2 vertices with level=1 and one vertex with level=2.
Each edge from 0 to a level 1 vertex is shared by two of the squares,
and so the squares are arranged in circular fashion around 0.
Their union is a topological disk $\mathbb{D}^2$ at the "bottom".
Similarly, the level=$n{-}2$ squares form a disk at the "top".
Now consider squares with fixed level $\mathcal{l}$ with $0 < \mathcal{l} < n{-}2$.
It is easy to show that in each square,
both of the level=$\mathcal{l}{+}1$ vertices are
in one other square, so the level $\mathcal{l}$ squares form a
"necklace of $n$ diamonds".
These $n{-}3$ necklaces are stacked on top of each other to form a cylinder,
and the cylinder is capped at bottom and top to form the
2-sphere $\mathbb{S}^2$.
We now verify the square count in $Q^n_2$;
$n + n(n{-}3) + n = n(n-1)$ which is correct.
The following figure is a helpful visualization.
```{r, rgl=TRUE, dev='png', echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=4, fig.cap='Figure 2.3       [these are interactive WebGL widgets]', fig.keep='none', fig.show='hide', out.width="100%", cache=FALSE }
rgl::par3d( zoom=0.7 )
rgl::mfrow3d( 1, 2 )
zono =  polarzonohedron(9)
plot2trans( zono )
rgl::next3d()
plot2trans( zono, level=c(0,4,7) )
rgl::rglwidget( webgl=TRUE )
```
This figure plots the image of $Q^9_2$ in $\mathbb{R}^3$ under a
suitable linear map.
The squares are distorted into parallelograms.
The figure on the left draws the full subcomplex,
and the one on the right only draws levels 0, 4, and 7.
The "necklace of 9 diamonds" at level=4 is easily visible.
The black dot is the image of $(0,...,0)$,
and the white dot is the image of $(1,...,1)$.
More about these linear maps is given in the next section.
<br><br>
# The 2-Transition Surface $S_2 \subsetneq Z \subsetneq \mathbb{R}^3$
Let the zonohedron $Z := L(Q^n)$,
where $L : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^3$ is a surjective linear map. 
From now on we assume that $Z$ is _pointed_,
which means that 0 is a vertex of $Z$.
Let $S_2 := L(Q^n_2)$ be the image of the 2-transition subcomplex $Q^n_2$.
Since the subcomplex is a union of squares glued on the edges to form a 2-sphere $\mathbb{S}^2$,
$S_2$ is a union of parallelograms glued on the edges.
$S_2$ is a tesselated surface,
but may not be a sphere because it may have self-intersections.
We are mainly interested in the case that there are _no_ self-intersections,
and there is a precise way to state this.
Let $L_2$ be the restriction of $L$ to $Q^n_2$.
So $L_2 : Q^n_2 \to \mathbb{R}^3$ is the composition of the
inclusion $Q^n_2 \subsetneq \mathbb{R}^n$ followed by $L$.
The surface has no self-intersections iff $L_2$ is injective.
For example, in the previous figure $S_2$ does **not**
have self-intersections and is a topological 2-sphere.
$L_2$ is injective.
If $L_2$ is injective,
then it is well-known (@Alexander1924, @Moise1977 p. 117, and @Bing1983 p. 161)
that $S_2$ divides $\mathbb{R}^3$ into
an inside region and an outside region whose intersection is $S_2$.
Moreover, the inside region is homeomorphic to a closed ball,
and $S_2$ is the boundary of that ball.
Since $Q^n_2 \subsetneq Q^n$, $S_2 = L(Q^n_2) \subsetneq L(Q^n) = Z$.
We emphasize that $S_2$ is a surface, and $Z$ is a solid.
We also emphasize that $S_2$ depends intimately on the order of the generators,
and $Z$ does not depend on the order at all.
<br><br>
# Polygons
A polygon for us is more than just a subset of the plane.
A _polygon_ is a finite and cyclically ordered set of distinct _vertices_,
plus the line segments that connect the vertices in cyclic order.
The line segments are called the _edges_.
The union of the edges is a subset of the plane,
but two different sets of vertices can generate the same subset -
the vertices matter.
A _simple polygon_ is a polygon whose edges do not intersect,
except at corresponding endpoints;
i.e. the polygon has no self-intersections.
By the _Jordan Curve Theorem_, a simple polygon,
as just a set, has an inside region
and an outside region, and both are connected.
Note that a non-simple polygon may also have a connected inside region;
it might "double-back" on itself within an edge, and then proceed forward again.
A _convex polygon_ is a polygon with an inside region that is convex.
There is an equivalent definition of polygon using functions.
Let $U_n$ be the set of $n$'th root of unity on the unit circle in $\mathbb{C}$,
plus the edges connecting these vertices in order.
So $U_n$ is sort of a _quintessential_ or _template_ polygon.
A general polygon is a function $f : U_n \to \mathbb{R}^2$ which is
injective _on the vertices_ (the image vertices are distinct) and 
linear _on each edge_.
Since $f$ is injective on the vertices, it is also injective on each edge;
one can think of $f$ as a _piecewise-linear immersion_ of $U_n$.
A polygon defined this way is _simple_ iff $f$ is injective.
This is an unintuitive way to define a polygon,
but has the advantage that it generalizes easily to one higher dimension,
as we see later in **Section 9**.
<br><br>
# The Generator Polygon $P$
Since $Z$ is pointed, there is a plane $K$ in $\mathbb{R}^3$ that has 0
in one open halfspace, and all the generators of $Z$ in the other open halfspace.
This "cutting" hyperplane intersects $S_2$ in a polygon.
Each vertex of the polygon is the intersection of $K$
and the segment from 0 to a generator.
The cyclic order of its vertices is inherited from the order of the generators.
We call this the _generator polygon_ and let $P := K \cap S_2$ denote it.
$P$ may not be a simple polygon in general; it may have self-intersections.
Since there can be many cutting hyperplanes $K$,
$P$ is only defined up to a projective transformation.
In colorimetry for example, there are three chromaticity diagrams,
from 1931, 1960, and 1976.
They are all generator polygons for the same set of generators,
and the 3 polygons differ by projective transformations,
see @Wyszecki&Stiles.
<br><br>
# Parallelograms in $S_2$ and $\partial Z$
Given $\partial Z$ and $S_2$ as above,
the goal in this section is to show that each is a union
of $n(n{-}1)$ parallelograms, and to set up a 1-1 correspondence
between the parallelograms in $S_2$ and those in $\partial Z$.
Each parallelogram will also be assigned a unit normal
in such a way that corresponding parallelograms have the same normal.
First consider $\partial Z$.
If a facet of $Z$ is not a parallelogram, let it be tiled
with the _standard tiling_ by parallelograms,
see the [Zonotopes](zonotopes.html) vignette.
Given an unordered pair $\{ i,j \}$ with $1 \le i , j \le n$
and $i \ne j$,
there are 2 antipodal parallelograms in $\partial Z$.
The edges of both parallelograms are the generators $L(e_i)$ and $L(e_j)$.
If  $\mathcal{P}$ is one of those parallelograms,
we know that $\mathcal{P}$ is the image of some square in $Q^n$.
The following Lemma algebraically characterizes which squares map to
a parallelogram $\mathcal{P} \subset \partial Z$.
**Lemma:**
Let $\Sigma$ be a square in $Q^n$.
By definition $\Sigma$ is determined by an unordered pair $\{ i,j \}$
and for all $k \notin \{ i,j \}$, an assignment of $\alpha_k$ to either 0 or 1.
The coordinates $\alpha_i$ and $\alpha_j$ are 'free' in [0,1]
and sweep out the square.
Let the parallelogram
$\mathcal{P} := L(\Sigma) \subsetneq Z$ be the image of the square.
Let $w := L(e_i) \times L(e_j)$ be the cross product of the edges of $\mathcal{P}$.
Then $\mathcal{P} \subset \partial Z$
iff
for all $k \notin \{ i,j \}$
\begin{equation}
\alpha_k =
\begin{cases}    
0 & \langle L(e_k),w \rangle < 0  \\
1 & \langle L(e_k),w  \rangle > 0  \\
0 ~\text{or} ~ 1 & \langle L(e_k),w  \rangle = 0
\end{cases}
\hspace{20pt}  \text{or}  \hspace{20pt}
\alpha_k =
\begin{cases}
1 & \langle L(e_k),w \rangle < 0  \\
0 & \langle L(e_k),w  \rangle > 0  \\
0 ~\text{or} ~ 1 & \langle L(e_k),w  \rangle = 0
\end{cases}
\end{equation}
Note that the two conditions are almost the same;
the first 2 cases merely swap 0 and 1,
and the third case is the same in both conditions.
The third case is not really new;
it comes from the definition of the square.
Also note that the order of $i$ and $j$ affects the definition of $w$,
but if $i$ and $j$ are swapped,
$w$ is changed to $-w$ which merely swaps the two conditions.
**Proof:**
Let $\lambda$ be the linear functional $z \mapsto \langle z,w \rangle$.
Since $\langle L(e_i),w \rangle = \langle L(e_j),w \rangle = 0$,
the values of $\alpha_i$ and $\alpha_j$ have no effect on $\lambda( L(x) )$.
Similarly, in the last case where $\langle L(e_k),w  \rangle = 0$,
$\alpha_k$ has no effect.
This case only happens when $L(e_k)$ is a linear combination of $L(e_i)$ and $L(e_j)$,
and the corresponding facet is non-trivial with 6 or more edges.
The facet then requires a selected tiling,
and different tilings yield different assignments of 0 and 1 to $\alpha_k$.
If the square $\Sigma$ satisfies the first condition above,
then any $x \in \Sigma$ maximizes $\lambda( L(x) )$ over _all_ $x \in Q^n$.
Therefore $L(\Sigma) \subset \partial Z$, by the definition of $Z$.
If the square $\Sigma$ satisfies the second condition,
then $\lambda( L(x) )$ is minimized and the conclusion is the same.
Conversely, if the square satisfies neither condition,
then  $\lambda( L(x) )$ for $x \in \Sigma$ is strictly between
the maximum and the minimum.
This means that $L(\Sigma)$ is in an intermediate hyperplane
orthogonal to $w$.
The intersection of an intermediate hyperplane with $\partial Z$
is only a 1-dimensional polygon and cannot contain a parallelogram.
Thus $L(\Sigma) \not\subset \partial Z$.
$\square$
Define a _slab_ in $\mathbb{R}^3$ to be the region between two parallel planes,
including the planes themselves.
In equation form, a slab $\mathcal{S}$ is given by
\begin{equation}
\mathcal{S} := \{ ~ x : \alpha  \le  \langle x,w \rangle  \le \beta ~ \}
\end{equation}
where $w \in \mathbb{R}^3$ is the non-zero plane normal,
and $\langle x,w \rangle {=} \alpha$ and $\langle x,w \rangle {=} \beta$ are the two planes.
If $\alpha {<} \beta$ then each point in the boundary planes of $\mathcal{S}$
has a unique outward-pointing unit normal.
But if $\alpha {=} \beta$ then the planes coincide,
the slab degenerates to that plane,
and a normal cannot be assigned unambiguously.
Given an unordered pair $\{ i,j \}$ with $1 \le i , j \le n$
and $i \ne j$,
there are 2 antipodal parallelograms in $\partial Z$.
The edges of both parallelograms are the generators $L(e_i)$ and $L(e_j)$.
They define two distinct parallel planes and therefore a non-degenerate slab
denoted by $\mathcal{S}^{ \{i,j\} }$.
The slab has 2 well defined boundary normals,
which we assign to the 2 parallelograms in the boundary of the slab.
Note that if $u$ is the unit normal for one of the parallelograms $\mathcal{P}$,
then $u$ is a multiple of the cross product $L(e_i) \times L(e_j)$.
Now consider $S_2$.
Given an unordered pair $\{ i,j \}$ as above,
there are 2 antipodal parallelograms in $S_2$
and the edges of both are the generators $L(e_i)$ and $L(e_j)$.
These 2 parallelograms are parallel to each other;
let $\mathcal{S}_2^{ \{i,j\} }$ be the slab defined by them.
The 2 parallelograms in $\partial Z$ given by $\{ i,j \}$ have the same
edges - the generators $L(e_i)$ and $L(e_j)$.
So it is clear that $\mathcal{S}_2^{ \{i,j\} } \subseteq \mathcal{S}^{ \{i,j\} }$.
If this new slab is non-degenerate,
then each of the two parallelograms in $S_2$ can be matched to exactly one
of the two parallelogram in $\partial Z$
by choosing the one with the same normal vector.
If this new slab is degenerate, its outward-pointing normal is not defined,
and we can pick an assignment at random.
This completes the goal of this section.
An interesting observation:
since corresponding parallelograms are congruent,
they have the same surface area,
and therefore $S_2$ and $\partial Z$ have the same surface area as well.
<br><br>
# A Theorem about $S_2$ and the Convexity of $P$
We have seen that $S_2 \subsetneq Z$.
It is natural to ask:
<div style="text-align: center">
When is $S_2$ as large as possible, namely the entire boundary of $Z$ ?
</div>
Recall our assumptions that $Z$ has a simple matroid, and is pointed.
**Theorem:**
With $Z$, $S_2$, $L_2$, and $P$ defined as above, the following are equivalent:
<ol type='1'>
<li>$S_2 = \partial Z$</li>
<li>$L_2$ is injective, and the inside region of $S_2$ is convex</li>
<li>$P$ is a simple convex polygon, possibly with collinear vertices</li>
</ol>
The equivalence of properties 1 and 3 is proved in West & Brill @Brill1983,
except the sequence of vector generators in this theorem
is replaced by a continuous path of vectors in @Brill1983.
To our knowledge, property 2 is new.
**Proof:**  
$1. \implies 2.$
$L_2$ is clearly injective on each square.
If a parallelogram of $S_2$ is in $\partial Z$ then it must be the
corresponding parallelogram (or its antipodal) in $\partial Z$
from the previous section.
This mapping of parallelograms is 1-1,
and since the parallelograms of $\partial Z$ are disjoint
(except on the edges)
the parallelograms of $S_2$ are disjoint (except on the edges).
Therefore $L_2$ is injective.
The inside region of $S_2$ is
the inside region of $\partial Z$, which is $Z$, which is convex.
$2. \implies 3.$ (trivial) The polygon $P$ is the
intersection of a hyperplane and the boundary of a convex polyhedron,
and that intersection is a convex polygon.
Since $L_2$ is injective, $P$ is simple.
$3. \implies 1.$ 
For a generator $L(e_i)$, let $v_i$ be the corresponding vertex of $P$.
It is the intersection of the ray generated by $L(e_i)$ and the
cutting hyperplane $K$.
Note that $v_i$ is a _positive_ multiple of $L(e_i)$.
Firstly we show that $S_2 \subseteq \partial Z$.
Let ${ \{i,j\} }$ be an unordered pair of indexes as above.
These indexes divide the remaining indexes into 2 contiguous circular sequences.
Let $\mathcal{P}$ be one of the corresponding parallelograms of $S_2$,
and let $u$ be its unit normal.
By definition, $\mathcal{P}$ is the image under $L$
of a 2-transition square $\Sigma$ in $Q^n_2$.
For $\Sigma$, all $\alpha_k$ in one sequence are 0,
and all $\alpha_k$ in the other sequence are 1.
We want to show that $\mathcal{P}$ is also in $\partial Z$.
Let $\mathcal{L}$ be the line through $v_i$ and $v_j$.
The plane given by $\langle x,u \rangle = 0$ contains both $L(e_i)$ and $L(e_j)$,
and so $\langle v_i,u \rangle = \langle v_j,u \rangle = 0$.
The line $\mathcal{L}$ divides $K$ into a positive side and a negative side.
For another vertex $v_k$,
$\langle L(e_k),u \rangle > 0$ iff $v_k$ is on the positive side of $\mathcal{L}$,
and
$\langle L(e_k),u \rangle < 0$ iff $v_k$ is on the negative side of $\mathcal{L}$.
Consider the relationship between $\mathcal{L}$ and $P$.
**Case i).** $\mathcal{L}$ intersects the interior of $P$
<br>
Since $P$ is convex, all the $v_k$ in one contiguous sequence
are on the positive side of $\mathcal{L}$ and
all the $v_k$ in the other contiguous sequence are on the negative side of $\mathcal{L}$.
This implies that
all the generators $L(e_k)$ in one contiguous sequence,
have $\langle L(e_k),u \rangle > 0$,
and all the generators $L(e_k)$ in the other contiguous sequence
have $\langle L(e_k),u \rangle < 0$.
But we saw earlier that the $\alpha_k$ in one sequence are all 0,
and the $\alpha_k$ in the other sequence are all 1.
This is exactly the condition given by the Lemma in the previous section.
Therefore $\mathcal{P} \subseteq \partial Z$.
**Case ii).**  $\mathcal{L}$ intersects $\partial P$, but not the interior of $P$
<br>
This case is a little more subtle, but basically the same.
Since $P$ is convex, w.l.o.g. we can assume all $v_k$ are
either on $\mathcal{L}$ or on the negative side of $\mathcal{L}$.
Those on the negative side are part of contiguous sequence,
so for all these $k$, $\alpha_k$ is either 0 or 1.
For the $v_k$ that are **on** the line, $\langle L(e_k),u \rangle = 0$,
so the conditions in the above Lemma do not care whether
$\alpha_k$ is 0 or 1.
Thus all $\alpha_k$ satisfy the conditions of the Lemma
and so $\mathcal{P} \subset \partial Z$.
Secondly we show that $\partial Z \subseteq S_2$.
Let $\mathcal{P} \subset \partial Z$ and let $\Sigma$ be the square
whose image is $\mathcal{P}$.
We want to show that every $x \in \Sigma$ has 2-transitions.
**Case i).** $\mathcal{L}$ intersects the interior of $P$
<br>
Then for $k \not\in \{ i,j \}$,
all the generators $L(e_k)$ in one contiguous sequence
have $\langle L(e_k),u \rangle > 0$,
and all the generators $L(e_k)$ in the other contiguous sequence
have $\langle L(e_k),u \rangle < 0$.
In no case is $\langle L(e_k),u \rangle = 0$.
The Lemma then forces two possibilities for $\alpha_k$,
and both of them have 2 transitions.
**Case ii).** $\mathcal{L}$ intersects $\partial P$, but not the interior of $P$
<br>
We know $v_i$ and $v_j$ are on the line $\mathcal{L}$.
Let $\mathcal{I} := \{ ~ l : v_l \in \mathcal{L} ~ \}$.
Since $P$ is simple, the set of $v_l$ for $l \in \mathcal{I}$ is
contiguous in $P$.
If $i$ and $j$ are the only indexes in $\mathcal{I}$,
then since $P$ is simple and convex,
all the _other_ $v_k$ are contiguous and on one side of $\mathcal{L}$.
So by the Lemma, for all the _other_ $v_k$ either $\alpha_k=0$ or $\alpha_k=1$.
Therefore every $x \in \Sigma$ has 2 transitions.
If $\mathcal{I}$ has more than $i$ and $j$ then the generators
$L(e_l)$ for $l \in \mathcal{I}$ generate a non-trivial zonogon facet of $Z$.
Recall that this facet is tiled with the standard tiling.
Since $Z$ is pointed, the zonogon facet is also pointed.
And since $P$ is simple, the generators of the facet are in angular order.
By the property of the standard tiling in the [Zonotopes](zonotopes.html)
vignette,
we know that there are 2 transitions in the sequence of $\alpha_l$
for $l \in \mathcal{I}$.
Assume now that all vertices of the complementary sequence are
on the *negative* side of $\mathcal{L}$,
so all the complementary $\alpha_k = 0$.
When combined with the $\alpha_l$, the result is a 2-transition sequence,
for every $x \in \Sigma$.
If all vertices of the complementary sequence are
on the *positive* side of $\mathcal{L}$, then we can use the central
symmetry of $Z$ to show that the sequence for $x$ is the reflection,
and thus still has 2 transitions.
$\square$
<br><br>
# Strictly Starshaped Surfaces
`raytrace2trans()` is one of the important functions in the package **zonohedra**.
It expects that the 2-transition surface is nice enough
so that a given ray intersects the surface in a unique point.
This section explores the mathematics of this situation.
It is convenient to deal with abstract polyhedral surfaces in $\mathbb{R}^3$.
Let $S$ be a _polyhedral_ 2-sphere,
i.e. a sphere $\mathbb{S}^2$ that is tesselated by polygons,
and let $f: S \to \mathbb{R}^3$ be a continuous map that is injective
and linear on each polygon.
One can think of $f$ as a _piecewise-linear immersion_.
It may not be injective on all $S$, so the surface $f(S)$ may have self-intersections.
$f(S)$ is a _polyhedral surface_ in $\mathbb{R}^3$.
We call the surface polygons of $f(S)$ _facets_.
Since $S$ is orientable, we can choose a normal vector $n_i$ for each facet,
so that these normals are consistent across the edges.
A facet and its normal defines a positive halfspace for the facet,
and a negative halfspace for the facet.
The example we have in mind is the 2-transition surface $S_2$
associated with a zonohedron.
Given the vectors $n_i$ and a point $p \notin f(S)$
the _linking number_ of $p$ and $f(S)$ is defined as follows.
Choose a ray based at $p$ and not intersecting any edge of $f(S)$;
this is always possible.
Now examine every intersection of the ray with the interior of a facet.
If the ray crosses with the same orientation as $n_i$, assign a +1;
otherwise assign a -1.
The _linking number_ is defined to be the sum of all these +1s and -1s.
It is independent of the chosen ray.
Reversing the sign of every $n_i$ yields a consistent vector field
and changes the sign of the linking number.
The linking number is a straightforward generalization of the
_winding number_ of a closed polygonal curve in the plane,
with respect to a point not on the curve.
For more on this subject, see @Milnor1997.
Suppose now that $f: S \to \mathbb{R}^3$ is injective.
It is well-known (@Alexander1924, @Moise1977 p. 117, and @Bing1983 p. 161)
that $f(S)$ divides $\mathbb{R}^3$ into
an inside region and an outside region whose intersection is $f(S)$.
Moreover, the inside region is homeomorphic to a closed ball,
and $f(S)$ is the boundary of that ball.
**Definition:**
Let $B \subseteq \mathbb{R}^3$ be a closed set and $b_0 \in B$.
Then $B$ is _starshaped at_ $b_0$ iff
for any $b \in B$ the segment $[b_0,b] \subseteq B$.
**Definition:**
Let $B \subseteq \mathbb{R}^3$ be a closed set with interior
and $b_0 \in \operatorname{int}(B)$.
Then $B$ is _strictly starshaped at_ $b_0$ iff
for any $b \in B$ the half-open segment
$[b_0,b) \subseteq \operatorname{int}(B)$.
So to be strictly starshaped, the segment $[b_0,b]$
cannot intersect $\partial B$ except possibly at $b$.
We now want to extend the concept of strictly starshaped
from bodies $B$ to surfaces $f(S)$.
**Definition:**
Let $f: S \to \mathbb{R}^3$ be as above, and $p \notin f(S)$.
Then $f(S)$ is _strictly starshaped at_ $p$ iff $f$ is injective
and the inside region $B$ is strictly starshaped at $p$.
Note that this definition forces $p$ to be in the interior of $B$.
**Theorem:** 
Let $f: S \to \mathbb{R}^3$ be as above, and $p \notin f(S)$.
Then these are equivalent:
<ol type='a'>
<li>$f(S)$ is strictly starshaped at $p$</li>
<li>$f$ is injective, and every ray based at $p$ intersects $f(S)$ in
exactly one point</li>
<li>the linking number of $f(S)$ and $p$ is +1 (resp. -1) and $p$
is in the negative (resp. positive) open halfspace of every facet of $f(S)$</li>
</ol>
`raytrace2trans()` is one of the important functions in the package **zonohedra**.
For the function to work well, we want Property b to be true
for the 2-transition surface $S_2$,
but its truth or falsity is not readily computable.
However, Property c. is easily computed, and if the surface fails the test,
then `raytrace2trans()` issues a warning
that the computed ray intersection may not be unique.
<br><br>
# Higher-Transition Points in the Cube $Q^n$
In Section 2, the 2-transition subcomplex $Q_2^n \subsetneq Q^n$ was defined;
it is the subset of points with 2 transitions.
We now want to define the number of transitions for _any_ point $x \in Q^n$.
When defining the subcomplex $Q_2^n$ above,
**Definition 2** used the set $J_2$ of all (step) functions on $[\beta_0,\beta_n]$
that take the values 0 or 1 and have two transitions or no transitions (jumps).
Let $J_\infty$ be the bigger set of all (step) functions on $[\beta_0,\beta_n]$
that take the values 0 or 1 and have a finite number of transitions.
The endpoints $\beta_0$ and $\beta_n$ are identified, to form a circle.
Equivalently, $J_\infty$ is the set of all indicator functions
$\mathbf{1}_{A^+}$ where $A^+$ is a finite disjoint union of arcs in the circle.
The symbol $\infty$ does not mean that there can be infinitely many transitions;
it means that the transition count is finite
but can be arbitrarily large.
The transition count is twice the number of arcs,
so for any $f \in J_\infty$
the transition count of $f$ is a well-defined even integer.
We now want to define the transition count of $x \in Q^n$ using
the function $p : J_\infty \twoheadrightarrow Q^n$,
which is defined exactly as in Section 2.
**Definition:** For any $x \in Q^n$ define
\begin{equation}
\text{transition count of} ~ x ~ := \min_{f \in p^{-1}(x)} \{ ~ \text{transition count of} ~ f ~ \}
\end{equation}
By this definition, all $x \in Q_2^n$ have 2 or 0 transitions.
But if $x \not\in Q_2^n$ then $x$ has more than 2 transitions.
Following @Burns2021, we say that $x$ is a _higher-transition point_.
Given an $x \in Q^n$, the transition count of $x$ is easily computable.
In the interval [0,1] the endpoints 0 and 1 are _boundary values_,
and all other values are _interior values_.
First consider the case when all coordinate values
of $x$ are 0 or 1, so $x$ is a vertex.
The maximum transition count is when 0 and 1 alternate.
A little consideration of small $n$ yields the following table of counts.
|               | 3 | 4 | 5 | 6 | 7 | ... | $n$ |
|:-------------:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:---:|:-:|
| max transition count for a vertex $x$ | 2 | 4 | 4 | 6 | 6 | ... | $2 \lfloor n/2 \rfloor$  |
At the other extreme is when all coordinate values are interior values,
so $x$ is an interior point.
A little consideration of small $n$ shows that
one can make $x = p(f)$ when $f$ has transition counts in this table:
|               | 3 | 4 | 5 | 6 | 7 | ... | $n$ |
|:-------------:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:---:|:-:|
| transition count for an interior point $x$ | 4 | 4 | 6 | 6 | 8 | ... | $2 \lfloor (n+1)/2 \rfloor$  |
Finally, consider the general $x \in Q^n$.
Find all runs of interior values,
and the 2 border values on either side of the run.
These 2 border values are either **equal** or **not equal**.
Let $r$ be the length of a run and look up the # of transitions
for this specific run in this table:
|               | 1 | 2 | 3 | 4 | ... | $r$ |
|--------------:|:-:|:-:|:-:|:-:|:---:|:-:|
| equal border values | 2 | 2 | 4 | 4 | ... | $2 \lfloor (r+1)/2 \rfloor$ |
| unequal border values | 0 | 2 | 2 | 4 | ... | $2 \lfloor r/2 \rfloor$  |
The numbers in the header row are the possible lengths of the run of interior values.
For example, if the length of the run is 1, and the border values are equal
the transition count for this sequence is 2.
Take the sum of these counts over all runs of interior values.
Next, strip out all interior values completely, to leave a circular sequence of 0s and 1s.
Compute the number of transitions of this "stripped" sequence in the usual way,
and add to the previous sum.
This final sum is the transition count for any $x \in Q^n$.
This shows that $p : J_\infty \twoheadrightarrow Q^n$ truly is surjective.
We are mostly interested in the case when $L(x) \in \partial Z$,
and then $x$ has at most 2 interior values and
the length of a run of interior values is either 1 or 2.
From the above algorithm it is clear that for a fixed parallelogram
$\mathcal{P} \subset \partial Z$,
and for any $x$ that maps to the interior of $\mathcal{P}$,
the transition count of $x$ is a constant.
Thus we can write about the transition count for any
parallelogram $\mathcal{P} \subset \partial Z$.
Given indexes $i$ and $j$ of generators of $Z$, the transition count for
the corresponding parallelogram(s) in $\partial Z$ is fairly easy to compute.
The algorithm is in the proof of the theorem in section 7.
Let $\mathcal{L}$ be the line through vertices $v_i$ and $v_j$ in
the generator polygon $P$.
Then then transition count is the number of times that $\mathcal{L}$ cuts $P$.
This algorithm is also present in @Brill1983.
<br><br>
# Parallelograms in $S_2$ and $\partial Z$, revisited
By the previous theorem, if the generator polygon $P$ is **not**  simple and convex,
then $S_2 \ne \partial Z$.
This means there is a parallelogram in $S_2$ that is in the interior of $Z$.
Let $\{ i,j \}$ be an unordered pair of indexes for such a parallelogram.
Consider the slabs $\mathcal{S}^{ \{i,j\} }$ and $\mathcal{S}_2^{ \{i,j\} }$ defined above.
For simplicity, drop the $\{ i,j \}$ to get just $\mathcal{S}$ and $\mathcal{S}_2$.
Since this parallelogram in $S_2$ is in the _interior_ of the slab,
the functional defining the slabs is not maximized on the parallelogram,
so we call it _deficient_.
The difference between the functional values on the two parallelograms is
called the _deficit_.
The corresponding parallelogram in $\partial Z$
is called _abundant_ because every $z$ in this parallelogram
is the image under $L$ of a higher-transition $x \in Q^n$.
To summarize, each deficient parallogram in $S_2$ has a matching
abundant parallelogram in $\partial Z$.
This is illustrated in the left side of the next figure.
The bold line segments correspond to the parallelograms and their antipodals.
The outward-pointing normals define the functionals to be maximized.
The 2 slabs are labeled.
Note $\mathcal{S}_2$ is a proper subset of $\mathcal{S}$;
in symbols $\mathcal{S}_2 \subsetneq \mathcal{S}$.
```{r, echo=FALSE,  message=TRUE,  warning=TRUE, fig.width=8, fig.height=3, fig.cap='Figure 10.1', out.width="100%", cache=FALSE }
plot_slabs <- function()
    {
    plot.new()
    xlim = c(-10,10)
    ylim = c(-7,7)
    theta   = 20 * pi/180
    rot2x2  = matrix( c(cos(theta),sin(theta),-sin(theta),cos(theta)), 2, 2 )
    plot.window( xlim, ylim, asp=1 )
    #   big slab
    x  = c(-15,15,15,-15)
    y   = c(-5,-5,5,5)
    xy  = rbind( x, y )
    xyrot   = rot2x2 %*% xy
    polygon( xyrot[1, ], xyrot[2, ], col='gray90' )
    xya = cbind( c(1,5), c(4,5) )
    xyrot   = rot2x2 %*% xya
    xymid   = rowMeans(xyrot)
    lines( xyrot[1, ], xyrot[2, ], lwd=5 )
    text( xymid[1], xymid[2], "abundant", adj=c(1,-1/2) )
    lines( -xyrot[1, ], -xyrot[2, ], lwd=5 )
    text( -xymid[1], -xymid[2], "abundant", adj=c(0,3/2) )
    #   small slab
    ytop    = 2.5
    x  = c(-15,15,15,-15)
    y   = c(-ytop,-ytop,ytop,ytop)
    xy  = rbind( x, y )
    xyrot   = rot2x2 %*% xy
    polygon( xyrot[1, ], xyrot[2, ], col='gray80', lty=2 )
    xyd = cbind( c(-ytop,ytop), c(0,ytop) )
    xyrot   = rot2x2 %*% xyd
    xymid   = rowMeans(xyrot)
    lines( xyrot[1, ], xyrot[2, ], lwd=5 )
    text( xymid[1], xymid[2], "deficient", adj=c(1,-1/2) )
    lines( -xyrot[1, ], -xyrot[2, ], lwd=5 )
    text( -xymid[1], -xymid[2], "deficient", adj=c(0,3/2) )
    xya     = cbind( c(-6,5), c(-6,5+2) )
    xyrot   = rot2x2 %*% xya
    arrows( xyrot[1,1], xyrot[2,1],  xyrot[1,2], xyrot[2,2], length=0.1, angle=20 )
    arrows( -xyrot[1,1], -xyrot[2,1],  -xyrot[1,2], -xyrot[2,2], length=0.1, angle=20 )
    
    #   label both slabs
    x0  = 7
    
    xy  = cbind( c( x0, (5+ytop)/2 ),  c( x0, -(5+ytop)/2 ) )
    xyrot   = rot2x2 %*% xy
    text( xyrot[1, ], xyrot[2, ], expression( S ) )
    xy  = c( x0, 0 )
    xyrot   = rot2x2 %*% xy
    text( xyrot[1], xyrot[2], expression( S[2] ) )
    points( 0, 0, pch=20 )
    #return( TRUE )
    }
plot_slab <- function()
    {
    plot.new()
    xlim = c(-10,10)
    ylim = c(-8,8)
    theta   = 20 * pi/180
    rot2x2  = matrix( c(cos(theta),sin(theta),-sin(theta),cos(theta)), 2, 2 )
    plot.window( xlim, ylim, asp=1 )
    #   big slab
    x  = c(-15,15,15,-15)
    y   = c(-5,-5,5,5)
    xy  = rbind( x, y )
    xyrot   = rot2x2 %*% xy
    polygon( xyrot[1, ], xyrot[2, ], col='gray80' )
    xya = cbind( c(1,5), c(4,5) )
    xyrot   = rot2x2 %*% xya
    xymid   = rowMeans(xyrot)
    lines( xyrot[1, ], xyrot[2, ], lwd=5 )
    text( xymid[1], xymid[2], "coincident", adj=c(1,-1/2) )
    lines( -xyrot[1, ], -xyrot[2, ], lwd=5 )
    text( -xymid[1], -xymid[2], "coincident", adj=c(0,3/2) )
    #   arrows
    xya     = cbind( c(-6,5), c(-6,5+2) )
    xyrot   = rot2x2 %*% xya
    arrows( xyrot[1,1], xyrot[2,1],  xyrot[1,2], xyrot[2,2], length=0.1, angle=20 )
    arrows( -xyrot[1,1], -xyrot[2,1],  -xyrot[1,2], -xyrot[2,2], length=0.1, angle=20 )
    
    #   label slab
    xy  = c( 6, 0 )
    xyrot   = rot2x2 %*% xy
    text( xyrot[1], xyrot[2], expression( S[2] == S ) )
    points( 0, 0, pch=20 )
    
    }
    
oldpar = par( mfrow=c(1,2)  , omi=c(0,0,0,0), mai=c(0,0.1,0,0.1) )
plot_slabs() ; plot_slab()
par( oldpar )
```
On the other hand,
if a parallelogram of $S_2$ is in the boundary of the slab $\mathcal{S}$,
then the two parallelograms are equal and we call them _coincident_.
It means that every $z$ in this parallelogram
is the image of an $x \in Q^n$ that has 2 (or 0) transitions.
This is illustrated in the right side of the above figure.
To get data about the abundant and coincident parallelograms in $\partial Z$,
use the functions `transitionsdf()`, for example:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE }
matgen = colorimetry.genlist[[2]]   # the CIE 1931 CMFs at 1nm step
matgen = 100 * matgen / sum( matgen[2, ] )   # it's traditional to scale so the center has Y=50
zono =  zonohedron( matgen )
getcenter(zono) ; dim( getmatrix( getsimplified( getmatroid(zono) ) ) )
transitionsdf( zono )
```
The number of (simplified) generators of `zono` is $n{=}340$,
indexed from 360 to 699.
So the total number of parallelograms is $340(340{-}1)=115260$.
Data in the first row are for the coincident parallelograms, with 2 transitions.
These form the majority of $\partial Z$,
with about 33110/34669 = 95.5% of the surface area.
Data in the rows below is for the abundant parallelograms.
More transitions typically have fewer parallelograms.
The last column has an example of a parallelogram with the given
transition count.
For example, there are 1802 parallelograms in $\partial Z$ with 8 transitions,
and the one given by generators $\{ 570,608 \}$ is one of them.
This means that the line through $v_{570}$ and $v_{608}$ cuts $P$ in 8 places.
Here is a plot of the point in the cube that maps to the center
of the parallelogram:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6.5, fig.height=3, fig.cap='Figure 10.2', out.width="100%", cache=FALSE }
oldpar = par( omi=c(0,0,0,0), mai=c(0.45,0.5,0.1,0) )
gnd = getground( getsimplified( getmatroid(zono) ) )
pcube = boundarypgramdata( zono, c(570,608), cube=TRUE )$pcube
xlim = range( gnd[which(0<pcube)] ) + 20*c(-1,1)
plot( xlim, c(0,1), type='n', xlab='', ylab='', las=1, lab=c(5,10,7), cex.axis=0.8 )
grid( col='gray', lty=1 )
lines( gnd, pcube, type='s' )
par( oldpar )
```
Note that the values at 570 and 608 are both 1/2, and all the other values are 0 or 1.
<br>
The following figure is a helpful 3D visualization
of *all* the abundant parallelograms:
```{r, rgl=TRUE, dev='png', echo=TRUE,  message=TRUE,  warning=FALSE, fig.width=6.5, fig.height=4, fig.cap='Figure 10.3', fig.keep='last', fig.show='hold', out.width="100%", cache=FALSE }
library( orientlib )
user3x3 = orientlib::rotmatrix( orientlib::eulerzyx( -0.249417, 0.7116067, 2.324364 ) )@x
dim(user3x3) = c(3,3)
par3d( userMatrix=rotationMatrix(matrix=user3x3), zoom=0.35 )
plothighertrans( zono )
```
In this figure, the abundant parallelograms are color-coded following @Burns2021;
dark red for 4, yellow for 6, blue for 8, and purple for 10 transitions.
The view is looking up the "neutral axis" with a black point at 0,
a white point at the opposite point, and a gray point at the
center of symmetry.
The central symmetry of the abundant parallelograms is clear.
Compare this with Figure 7 in @Burns2021.
<br><br><br>
# References
<div id="refs"></div>
<br><br><br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options( old_opt )
sessionInfo()
```
</pre>
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/vignettes/transitions.Rmd 
 | 
					
	---
title: "zonohedra User Guide"
author: "Glenn Davis"
date: "`r Sys.Date()`"
output: 
  rmarkdown::html_vignette:
    toc: true
    toc_depth: 2
    number_sections: false
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
# csl: institute-of-mathematical-statistics.csl
# csl: transactions-on-mathematical-software.csl
vignette: >
  %\VignetteIndexEntry{zonohedra User Guide}
  %\VignetteEngine{knitr::rmarkdown}
---
```{css, echo=FALSE}
body {
  max-width: 750px;     /* make a little wider, default is 700px */
}
/*
div.figure {
 border: 1px;
 border-style: groove;
}
*/
```
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=120 )
# if( !file.exists("figs") ) dir.create("figs")
require("rgl",quietly=TRUE)
rgl::setupKnitr(autoprint = TRUE)
```
# Introduction
A _zonohedron_, roughly speaking, is the projection of
a high-dimensional cube to $\mathbb{R}^3$.
For a precise definition see the [Zonotopes](zonotopes.html) vignette,
section 1.3.
A zonohedron is a special type of convex polyhedron.
The goal of this package is to construct any zonohedron,
but especially the ones in these 2 families:
<ul>
<li> the classical zonohedra, with high symmetry </li> 
<li> zonohedra that arise naturally from colorimetry, which may contain hundreds of generators, but little symmetry</li> 
</ul>
In the first case, 13 classical zonohedra have been taken from
@wikiZonohedron
and are built in to the package.
In the second case, an _optimal color solid_ is viewed as a zonohedron;
this connection was discovered by Paul Centore
and is explained very clearly in @Centore2013.
```{r, echo=TRUE,  message=FALSE}
library(zonohedra)
library(rgl)
```
The package dependencies are:
<ul>
<li>**rgl**  @rgl - for 3D plotting</li>
<li>**microbenchmark**  @microbenchmark  - is suggested for its high-precision timer</li>
<li>**logger**  @logger - for event logging</li>
</ul>
Some of the figures below are displayed with **WebGL** -
a JavaScript API for rendering interactive 2D and 3D graphics.
Try using the left mouse button to rotate and the scroll wheel to zoom.
<br><br>
# Polar Zonohedra
The generators for a polar zonohedra are particularly simple -
they are equally distributed on a circle that
is in a plane parallel to the xy-plane and
whose center is on the z-axis.
Construct polar zonohedra with 5 and 25 generators and plot them.
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=8, fig.height=4, fig.cap='polar zonohedra with 5 generators (left) and 25 generators (right)    [both of these are interactive WebGL widgets]', fig.keep='none', fig.show='hide', out.width="100%", cache=FALSE }
rgl::mfrow3d( 1, 2 )
pz5 = polarzonohedron( 5 ) ;  plot( pz5, ewd=5 )
rgl::next3d()
plot( polarzonohedron( 25 ), ewd=3 )
rgl::rglwidget( webgl=TRUE )
```
In these 2 plots, the black dot is the origin,
the 5 vertices nearest to the origin are the 5 generators,
and the white dot is the point (0,0,$\pi$).
Each of the generators is assigned a unique color,
and every other edge with that color is parallel to the generator.
All parallelograms with an edge of that color form the
_zone_ or _belt_ for that generator.
Each belt is a topological annulus.
For more details on these polar zonohedra, see @Chilton1963.
Print the generators of the first zonohedron `pz5`;
they are the columns of this 3x5 matrix.
```{r, echo=TRUE, message=FALSE}
getmatrix( pz5 )
```
A function similar to `polarzonohedron()` is `regularprism()`.
<br><br>
<br><br>
# Classic Zonohedra
There are 13 classic zonohedron available in the package,
as a list of 3xN matrices,
where N is the number of generators.
The global data variable is 
`classics.genlist`, with S3 class `'genlist'`.
The 13 matrices in the list are taken from @Eppstein.
```{r, echo=TRUE, message=FALSE}
classics.genlist
```
Extract the matrix of generators for the `truncated cuboctahedron`,
which is abbreviated by `TC`.
```{r, echo=TRUE, message=TRUE}
mat = classics.genlist[['TC']] ; mat
```
Create the truncated cuboctahedron and plot it, with filled faces.
```{r, rgl=TRUE, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=8, fig.height=5, out.width="100%", fig.align="center", fig.cap='truncated cuboctahedron      [This is an interactive WebGL widget]', fig.keep='last', fig.show='hide', cache=FALSE }
rgl::par3d( userMatrix = rotationMatrix( -20*pi/180, 0, 1, 1) )
zono = zonohedron( mat )
plot( zono, type='f' )
rgl::rglwidget( webgl=TRUE )
```
<br>
Before continuing, define function `spinit()` used for creating animated GIFs.
```{r, echo=TRUE, message=FALSE, warning=FALSE}
library(gifski)
#   zono        the zonohedron
#   id          unique ID for this animation, a positive integer
#   fps         frames per second
#   duration    of the animation, in seconds
#   revolutions number of revolutions
#   vpsize      viewport size = (width,height)
spinit <- function( zono, index, fps=5, duration=8, revolutions=1, vpsize=c(480,480) ) {
#  enlarge viewport
wr = par3d( "windowRect" ) 
par3d( windowRect = c( wr[1:2], wr[1:2] + vpsize ) )
pathtemp = "./figs" ;   if( ! file.exists(pathtemp) ) dir.create(pathtemp)  # make temp folder
#  make a lot of .PNG files in pathtemp
movie3d( spin3d( getcenter(zono), rpm=revolutions*60/duration ), duration=duration, fps=fps, startTime=1/fps,
           convert=F, movie='junk', dir=pathtemp, verbose=F, webshot=F )
#  combine all the .PNGs into a single .GIF
pathvec = dir( pathtemp, pattern="png$", full=T )
gif_file = sprintf( "./figs/animation%g.gif", index ) 
# if( file.exists(gif_file) )  file.remove( gif_file )
out = gifski::gifski( pathvec, gif_file=gif_file, delay=1/fps, progress=F, width=vpsize[1], height=vpsize[2] )
res = file.remove( pathvec )  # cleanup the .PNG files, leaving just the .GIF
return( out )
}
```
<br><br>
# Colorimetry Zonohedra
In colorimetry, an optimal color solid is a zonohedron.
```{r, echo=TRUE, message=TRUE, warning=TRUE, fig.cap='optimal color solid', fig.keep='last', fig.show='hide', cache=FALSE }
# colorimetry.genlist[[1]] is a 3x81 matrix with the CIE 1931 CMFs at 5nm interval
zono5 = zonohedron( colorimetry.genlist[[1]] )
plot( zono5, type='f' )
gif_file = spinit( zono5, 2, vpsize=c(256,256) )
```
{width=60%}
In this figure, the black dot is the _black point_ [0,0,0].
The white dot is the _white point_, i.e. the column sums of the
generating matrix.
<br><br>
# Future Work
Here are a few possible improvements and additions.
**export**   
There should be a way to export a zonohedron as
a quadrilateral mesh in some standard format(s).
**vignettes**  
There should be more vignettes.
One idea is to show ways
to examine individual hyperplanes and facets of a zonohedron.
Another idea is to display some interesting Minkowski sums of a few
classic zonohedra.
<br><br>
# References
<div id="refs"></div>
<br><br>
\Appendix
<br><br>
## Appendix A - Methods
The constructor `zonohedron()` uses the optimizations in
Paul Heckbert's memo @Heckbert1985.
The key step is sorting points that lie on a great circle on the sphere.
This efficient method is $O(N^2\log(N))$;
whereas the naive method is $O(N 2^N)$.
The central symmetry is used whenever possible,
and when used this can speed things up by a factor of 2.
To further speed things up, many of the methods use C/C++.
The function `grpDuplicated()` was written by Long Qu,
with a small modification of the return value by myself.
It is written in C/C++ and is implemented with `std::unordered_map`.
The code was taken from the discontinued package **uniqueAtomMat**,
see @uniqueAtomMat.
<br><br>
## Appendix B - Logging
Logging is performed using the package **logger**, see @logger.
This is a powerful package that allows a separate configuration
for logging from within **zonohedra**, and that is what I have done.
During package loading, the logging threshold is changed from `INFO` to `WARN`.
To change it back again, one can execute:  
`log_threshold( INFO, namespace="zonohedra" )`
The layout callback functions is customized;
it adds the name of the calling function to the message.
To install your own layout function, you can execute:  
`log_layout( <your function>, namespace="zonohedra" )`
The appender callback functions is also customized;
it comes to an immediate stop if the message level is `ERROR` or `FATAL`.
To return to the default behavior, you can execute:  
`log_appender( appender_console, namespace="zonohedra" )`
The formatter callback function is forced to be `formatter_sprintf()`;
this should not be changed.
<br><br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options( old_opt )
sessionInfo()
```
</pre>
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/vignettes/zonohedra-guide.Rmd 
 | 
					
	---
title: "Zonotopes"
author: "Glenn Davis"
date: "`r Sys.Date()`"
header-includes:
#  - \usepackage{amsmath}
#  - \usepackage{amssymb}
#  - \usepackage{amsthm}
output:
  rmarkdown::html_vignette:
    toc: true
    toc_depth: 2
    number_sections: true
#  includes:
#    in_header: preamble.tex    
bibliography: bibliography.bib
# csl: iso690-numeric-brackets-cs.csl
csl: personal.csl
vignette: >
  %\VignetteIndexEntry{Zonotopes}
  %\VignetteEngine{knitr::rmarkdown}
---
\newcommand{\argmax}{\mathop{\mathrm{argmax}}\limits}
\newcommand{\max}{\mathop{\mathrm{max}}\limits}
\newtheorem{theorem}{Theorem}
\newtheorem{corollary}{Corollary}[section]
\newtheorem{lemma}{Lemma}[section]
\newtheorem{assumption}{Assumption}
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
old_opt = options( width=120 )
require("rgl",quietly=TRUE)
rgl::setupKnitr(autoprint = TRUE)
```
\begin{theorem}
A test theorem
\end{theorem}
This vignette is a long-winded mathematical exposition on zonotopes
that concentrates on inversion.
Discussion of software is delayed until the end.
Featured functions are:
`zonoseg()`, `zonogon()`, `zonohedron()`, `invert()`, and `invertboundary()`.
# Basic Concepts
The emphasis in this vignette are the concepts needed to understand
the inversion functions in the **zonohedra** package.
Much of this is based on @Ziegler2012.
## supporting hyperplanes
A _supporting hyperplane_ of a compact set $C$ in Euclidean space $\mathbb{R}^n$ is a hyperplane $P$ that has these properties:
<ol>
<li>$P$ intersects $C$</li>
<li>$C$ is entirely contained in one of the two closed half-spaces defined by $P$</li>
</ol>
Note that the 2 properties imply that the intersection
$P \cap C$ is a subset of the boundary of $C$.
<br>
If the compact set is a _convex body_, i.e. is convex with interior,
then 2 equivalent properties are given by this:  
**Theorem:** 
If $B$ is a closed convex body with interior,
then $P$ is a supporting hyperplane of $B$ iff $P$ has these properties:
<ol type='i'>
<li>$P$ intersects $B$</li>
<li>$P$ does **not** intersect the interior of $B$</li>
</ol>
**Proof:**   
not ii. $\implies$ not 2.  
Let ii. be false, so hyperplane $P$ _does_ intersect $int(B)$, at a point $p$.
Then there is an open ball centered at $p$, and the ball is inside $B$.
There are clearly points in the ball in _both_ halfspaces,
and so 2. is false.
not 2. $\implies$ not ii.   
Let 2. be false, so there are points $b^-, b^+ \in B$ that are in
_different_ open halfspaces.
Let $b_i$ be a point in $int(B)$.
If $b_i \in P$ then ii. is false and we are done.
Otherwise, either $b^-$ or $b^+$ are in a different halfspace than $b_i$.
Take it to be $b^+$, w.l.o.g.
Since $b_i$ and $b^+$ are in opposite halfspaces,
the segment $[b_i,b^+]$ intersects $P$;
let $c_i$ be this point of intersection.
There is an open ball in $B$ centered at $b_i$.
Let $C$ denote the convex hull of $b^+$ and this ball - a partial open cone.
By convexity of $B$, $C$ is in $B$.
There is a scaled down open ball centered at $c_i$ and contained in $C$.
Thus $c_i \in P \cap int(B)$ and ii. is false.
$\square$
## faces
**Definition:**   
A (proper) _face_ $F$ of a compact set $C \subset \mathbb{R}^n$
is a subset of $C$ that has these 3 equivalent properties:
<ol>
<li>$F = C \cap P$ for some supporting hyperplane $P$</li>
<li>$F = \argmax_{x \in C}  \langle x,w \rangle$ for some non-zero normal vector $w$</li>
<li>$F = \argmax_{x \in C}  \lambda(x)$ for some non-zero linear functional $\lambda : \mathbb{R}^n \to \mathbb{R}$</li>
</ol>
The equivalence 1 and 2 is straightforward,
and the equivalence of 2 and 3 is trivial.
The entire set $C$ is considered to be an (improper) face.
The _dimension of a face_ is the dimension of the affine subspace
spanned by the face.
From now on, We always assume that the dimension of $C$ is $n$,
which is equivalent to $C$ having an interior.
A _d-face_ is a face of dimension _d_.
So a _0-face_ is a _vertex_, and a _1-face_ is an _edge_.
A _facet_ is an ($n{-}1$)-face, and a maximal proper face.
Note that every face of the cube $[0,1]^n \subset \mathbb{R}^n$
is a cube of smaller dimension;
in fact the dimension is the number of 0s in the normal vector $w$ from
part 2 of the above definition.
Let $A : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^m$ be a
surjective affine map (so $m \le n$),
and let $C':= A(C)$.
**Theorem:** If $F'$ is a face of $C'$, then $A^{-1}(F')$  is a face of $C$.
Stated in words, the affine preimage of a face is a face.
**Proof:**
Use part 3 of the above definition so $F' = \argmax_{y \in C'} \lambda(y)$,
where $\lambda$ is a non-zero linear functional on $\mathbb{R}^m$.
Let $\mu := \max_{y \in C'} \lambda(y)$.
Now $A^{-1}(F') = A^{-1}( \lambda^{-1}(\mu) ) = (\lambda \circ A)^{-1}(\mu)$.
But $\lambda \circ A$ is a non-zero linear functional
on $\mathbb{R}^n$, plus a constant.
$\square$
See also @Ziegler2012, Lemma 7.10.
## a zonotope and its generators
**Definition:**
A _zonotope_ $Z$ is a set of the form $L([0,1]^n) + z_0$
where $L : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^m$ is a surjective linear map. 
Simply stated, a zonotope is an linear image of a cube plus a translation
(an affine image of a cube).
Since the cube is convex, the zonotope is also convex.
The $n$ _generators_ of $Z$ are the images of the $n$ elementary vectors
$L(e_1), ... , L(e_n)$.
A point $z \in Z$ iff
$z = \alpha_1 L(e_1) ~+~ ... ~+~ \alpha_n L(e_n) + z_0$ with all $\alpha_i \in [0,1]$.
A zonotope is centrally symmetric about the point $L(1/2,...,1/2) + z_0$.
By reflecting through the center of symmetry,
each facet of $Z$ has a corresponding _antipodal_ facet.
The facets come in antipodal pairs.
Given a face $F$ of zonotope $Z$, the preimage of $F$ is a face $F'$
of the cube.
But every face of a cube is also a cube, and so $F$ is also a zonotope.
Let the normal vector of the supporting hyperplane of $F'$ be $w$.
Then the vectors $\{ \ L(e_i) | w_i=0 \ \}$ are all parallel
to the face $F$, and in fact they generate the linear subspace parallel to $F$.
We call $\{ \ L(e_i) | w_i=0 \ \}$ the _generators_ of $F$.
And important fact:
the number of generators of $F$ is the dimension of the preimage $F'$.
Note that a face of dimension $d$ may have _more_ than $d$ generators,
because they may be linearly dependent.
For a parallelogram face, even if no generators are 0, the face may have more
than 2 generators because some may be multiples of others.
If the dimension of $Z$ is $m$, we call it an _m-zonotope_.
**Theorem:** If $K$ is the convex hull of a finite set of points in $\mathbb{R}^n$, with $n \ge 3$.
Then $K$ is an $n$-zonotope iff all facets of $K$ are ($n{-}1$)-zonotopes.
For a proof of this hard result, plus much more, see @Bolker1969.
A zonotope of dimensions 1, 2, and 3
is called a _zonoseg_ , _zonogon_, and _zonohedron_, respectively.
The term "zonoseg" is mine, since I could not find a term for it in the literature.  Geometrically a zonoseg is just a line segment.
A zonoseg has only two faces - the endpoints of the segment.
A zonogon is a convex polygon with 0-faces (vertices) and 1-faces (edges).
Since the dimension of an edge is 1 less than the
dimension of the zonogon, an edge of a zonogon is also a facet.
It can be shown that a convex polygon is a zonogon iff it is centrally symmetric.
A zonohedron has 0-faces (vertices), 1-faces (edges), and 2-faces (facets).
All the facets are zonogons.
A parallelogram facet is called _trivial_,
and facets with more than 4 edges are _non-trivial_.
## convex cones and zonotopes
Let $\mathbb{R}^n_{\ge 0} := \{ \ (x_1,x_2,...x_n) \ | \ x_i \ge 0 \ \}$
denote the non-negative orthant in $\mathbb{R}^n$.
A _convex cone_ $K$ is a set of the form $K = L(\mathbb{R}^n_{\ge 0})$,
where $L : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^m$
is a surjective linear map.
The $n$ _generators_ of $K$ are the images of the $n$ elementary vectors
$L(e_1), ... , L(e_n)$.
$K$ is the set of all non-negative linear combinations of the generators.
If $K$ is a subset of a closed linear halfspace, it is called _salient_.
If $K$ is a subset of an open linear halfspace
(except for the vertex 0), it is called _pointed_.
These properties are equivalent to 0 being in the boundary of $K$,
and being a vertex of $K$, respectively.
Obviously, pointed implies salient, but salient does not imply pointed.
Given a zonotope $Z = L([0,1]^n) + z_0$ the map $L$ also defines
a convex cone $K$.
$Z$ is a subset of $K$, after translating $Z$ by $-z_0$.
We carry the two above properties of $K$ over to $Z$.
It is straightforward to show that
$Z$ is _salient_ iff $z_0$ is in the boundary of $Z$,
and 
$Z$ is _pointed_ iff $z_0$ is a vertex of $Z$.
If $Z$ is pointed then there is a "cutting plane" that has $z_0$
on one side, and all the other vertices on the other side.
The intersection of this cutting plane and $Z$ is called the
_vertex figure_ of $Z$ at $z_0$.
The vertex figure is actually more general,
and is defined for any vertex of a convex polyhedron,
see @Ziegler2012, p. 54.
In the case that $Z$ is a zonohedron,
the vertex figure at $z_0$ is a a convex polygon that we call the
_generator polygon_.
The polygon is only unique up to a 2D projective transformation.
## matroids and zonotopes
This section assumes some knowledge of _matroids_;
for background on them see the matroids vignette.
Given a zonotope $Z = L([0,1]^n) + z_0$ as above,
the generators define a matroid $M$.
Since $L$ is surjective, $\mathrm{rank}(M) = m$.
A hyperplane of $M$ corresponds to a pair of antipodal facets of $Z$
(the concept of _hyperplane_ here is the the one used in matroid theory).
Assume now that $m=3$, so $Z$ is a zonohedron
and all its facets are zonogons.
If $M$ is simple, then the number of sides
of a zonogon facet is twice the number of points in the corresponding hyperplane.
So a parallelogram corresponds to a hyperplane with 2 points,
which is called a _trivial_ hyperplane.
# Inversion
As before,
let $L : \mathbb{R}^n \twoheadrightarrow \mathbb{R}^m$ be a surjective linear map,
and let $Z := L([0,1]^n) + z_0$ for some $z_0 \in \mathbb{R}^m$.
From this setup, given a point $z \in Z$,
we know that the equation
\begin{equation}\tag{$\star$}
L(x) + z_0 = z  ~~~~ \textrm{for} ~ x \in [0,1]^n
\end{equation}
has a solution $x$.
The rest of this section looks at the solutions of
($\star$) in more depth.
## a uniqueness theorem
In this section we consider the question:
When is the solution of $(\star)$ unique ?
In the interior case, the answer is straightforward.
**Lemma:**
If $z$ is in the interior of $Z$, then the solution of $(\star)$ is unique iff $n=m$.
**Proof:**
If $n=m$ then $L$ is invertible so we are done.
If $n>m$ the nullspace of $L$ has positive dimension $n-m$.
By Theorem 4.2 of @Davis2018, we can pick an $x \in \operatorname{int}([0,1]^n)$
that satisfies $(\star)$.
Let $U \subset [0,1]^n$ be an open ball around $x$;
the intersection of the nullspace with $U$ is an infinite set.
$\square$
For $z \in Z$, let $F_z$ be the smallest face that contains $z$,
i.e. the intersection of all faces that contain $z$.
It is clear that $z$ is in the _relative interior_ of $F_z$,
i.e. $z \in \operatorname{relint}(F_z)$.
At the extremes, if $z$ is a vertex then $F_z$ is $\{ z \}$,
and if $z$ is in the interior of $Z$, then $F_z$ is $Z$
(here we allow $Z$ itself as an improper face).
The relative interiors of the faces form a partition of $Z$,
see @Ziegler2012, p. 61.
**Theorem:** Let $z$ and $Z$ and $F_z$ be as above.
Then the solution of $(\star)$
is unique iff the number of generators of $F_z$ is
equal to the dimension of $F_z$.
**Proof:**
The condition says that the dimension of the preimage of $F_z$
is the dimension of $F_z$.
Now apply the Lemma, with $Z$ replaced by the preimage of $F_z$.
$\square$
It is useful to reformulate the uniqueness theorem for the specific
case when $Z$ is a zonohedron ($m=3$).
**Theorem:** Let $z$ be in a zonohedron $Z$.
If $n>3$, then the solution of $(\star)$ is unique iff
none of the generators of $Z$ are 0 and:
$z$ is a vertex of $Z$  
or
$z$ is in an edge of $Z$, and the edge has one generator  
or
$z$ is in a parallelogram facet of $Z$, and the parallelogram has two generators
If the matroid of $Z$ is simple, i.e. no generators of $Z$
are 0 or multiples of each other, then this simplifies to:
**Theorem:** Let $z$ be in a zonohedron $Z$ whose matroid is simple.
If $n>3$, then the solution of $(\star)$ is unique iff
$z$ is in an edge or a parallelogram facet.
And for a zonogon we have:
**Theorem:** Let $z$ be in a zonogon $Z$ whose matroid is simple.
If $n>2$, then the solution of $(\star)$ is unique iff
$z$ is in the boundary of $Z$  (denoted by $\partial Z$).
## a right inverse on the boundary of a zonogon
```{r, echo=TRUE,  message=FALSE}
library(zonohedra)
```
Let $Z$ be a zonogon whose matroid is simple.
By the previous theorem there is a unique function
$\sigma : \partial Z \to [0,1]^n$ that is a
_right inverse_ for $x \mapsto L(x)+z_0$.
We have $L( \sigma(z) ) + z_0 = z$ for all $z \in \partial Z$.
Question: Is $\sigma()$ continuous ?
Well, on each edge it is linear, and so certainly continuous.
Moreover, on each vertex is is uniquely defined, and so the separate
linear maps on each edge must match up on the vertices.
So yes, $\sigma()$ is continuous; in fact it is _piecewise-linear_.
It is instructive to consider a very specific case.
Consider the figure:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6, fig.height=4, fig.cap='', out.width="80%", cache=FALSE }
zono =  polarzonogon( 14, 4 )
oldpar = par( omi=c(0,0,0,0), mai=c(0.8,0.7,0.7,0.2) )
plot( zono, elabels=T )
par( oldpar )
```
Denote the 4 generators by $z_1, z_2, z_3, z_4$;
these are labeled in the figure by just the indexes.
Along the bottom edge, $x_1$ increases from 0 to 1,
while the other $x$'s are 0.
When the first vertex $z_1=(1,0)$ is reached, $x_1$ remains at 1,
and on the 2nd edge $x_2$ increases from 0 to 1,
until the next vertex $z_1+z_2$, etc.
At any point on the lower boundary
$x = (1,...,1,\alpha,0, ... ,0)$;
i.e. a run of 1s, then an arbitrary $\alpha \in [0,1]$,
and then a run of 0s.
Both runs are allowed to be empty.
Similarly, along the upper boundary $x = (0,...,0,\alpha,1, ... ,1)$.
These two "low-pass" and "high-pass" filters are analogous
to Goethe's _edge colors_ (Kantenfarben), see @Koenderink p. 17.
## extending the right inverse, using parallelogram tilings
In the previous section, the right inverse $\sigma()$ was only
defined on $\partial Z$.
Question: Is there a way to extend $\sigma()$ across the interior ?
The answer lies in parallelogram tilings.
Consider the figure:
```{r, echo=TRUE,  message=TRUE,  warning=TRUE, fig.width=6, fig.height=4, fig.cap='', out.width="80%", cache=FALSE }
oldpar = par( omi=c(0,0,0,0), mai=c(0.8,0.7,0.7,0.2) )
plot( zono, tiling=T, elabels=T, tlabels=T )
par( oldpar )
```
The labels inside the parallelogram tiles are the generators of the tiles.
For points inside the 3 tiles that meet 0, the values of $x_i$ are obvious.
For a point inside tile
<span style="color: red;">1,3</span>,
$x_2=1$ and $x_1$ and $x_3$ vary in [0,1].
The rule for a point $z$ is to locate the tile containing $z$
and then the _origin_ of the tile.
Next, locate a path of tile edges from 0 to the origin,
and that determines the $x$ coordinate alues that are 1.
The $x$ coordinate values for the tile generators are the 2 coordinates
of $z$ in the tile relative to the origin, and all other $x$ values are 0.
For tile
<span style="color: red;">1,4</span>
there are 2 different paths to the origin of the tile,
but it doesn't matter since the $x$ indexes
on the different paths are the same: 2 and 3.
In general, two different paths are connected by a homotopy
that "crosses" one parallelogram at a time,
and each time, the 2 associated $x$ indexes are the same.
It is straightforward to verify that the right inverse $\sigma()$
defined by this rule is continuous.
The above tiling is just one of many,
and each different tiling generates a different right inverse.
For the 4-generator zonogon above, the number of different tilings is 8.
This sequence increases very rapidly with $n$;
for $n{=}8$ generators (16 sides) the number of
tilings is already more than $10^6$, see @A006245.
The above tiling is
an example of the _standard tiling_ in the **zonohedra** package,
which is generated by the following recipe.
Each generator is "lifted" to $\mathbb{R}^3$
by the mapping $(x,y) \mapsto (x,y,\sqrt{x^2+y^2})$.
The mapping "lifts" each generator to the cone $x^2 + y^2 = z^2$.
The lifted generators generate a zonohedron,
which has an upper half and a lower half.
The faces in the lower half are the ones that can be _seen_
from a a viewpoint far below the $xy$-plane, see @Ziegler2012, p. 130.
The parallelogram facets in the _lower_ half are projected down to
$\mathbb{R}^2$ and these form the _standard tiling_.
The standard tiling has the following nice property:
if the zonogon is pointed,
and the generators are in order by angle (clockwise or counterclockwise),
then every point $\sigma(z)$ has 2 _transitions_,
and the run of 1s does not wrap around.
For the definition of a 2-transition point of the cube,
see the
[The 2-Transition Subcomplex and the 2-Transition Surface](transitions.html) vignette.
The 2-transition concept is important for zonohedra coming
from colorimetry.
The standard tiling is denoted by $T_{min}$ in @Henriques2007 p. 13,
where the 2-transition property is also noted in equation (12).
## a right inverse on the boundary of a zonohedron
In this section,
let $Z$ be a zonohedron whose matroid is simple, with $n{>}3$.
Assume for simplicity that all facets are parallelograms.
Then by a previous theorem there is a unique function
$\sigma : \partial Z \to [0,1]^n$ that is a
_right inverse_ for $x \mapsto L(x)+z_0$.
This right inverse is unique on the edges,
which implies that $\sigma()$ is continuous.
Each parallelogram is the image of a square in $[0,1]^n$,
and the squares are "glued" together on the edges
to form a "surface" (in fact a topological sphere)
embedded in $[0,1]^n$.
We see another example of this in the
[The 2-Transition Subcomplex and the 2-Transition Surface](transitions.html) vignette.
Now suppose that a facet of $Z$ is an arbitrary zonogon,
with a high number of generators.
Then by rotating this non-trivial facet to the plane,
and choosing the standard tiling, we can use the construction
in the previous section to extend the right inverse across this facet.
Once again, the right inverse is unique on the edges,
which implies that the extended $\sigma()$ is continuous.
To summarize this section,
a right inverse $\sigma()$ defined on $\partial Z$ always exists
and is continuous,
but is only unique up to the selected tiling of the
non-trivial facets of $Z$.
<br>
# Software
The above sections are mathematical in nature.
This sections is about the implementation of the above
in the software package **zonohedra**.
The package only supports zonotopes of dimensions 1, 2, and 3,
which are called _zonosegs_, _zonogons_, and _zonohedra_, respectively.
The extra generality of the translation $z_0$ turned out to be an unnecessary
complication.
Thus, in the package, the constructors
`zonoseg()`, `zonogon()`, and `zonohedron()` only take a matrix argument,
and not $z_0$.
Many of the above theorems require that the matroid associated with
$Z$ is simple.
In the non-simple matroid case, the package ignores all generators that are 0.
And for a multiple group, when all the generators are positive multiples
of each other, it replaces these generators by their sum.
When some generators are negative multiples of each other,
the situation is more complicated and not yet documented.
The zonohedron is then computed from these "simplified" generators,
which has a simple matroid.
In many calculations, the central symmetry is used to reduce storage.
For example, only one facet in a pair of antipodal facets needs to be stored,
and the other can easily be derived by reflection.
In some cases, the symmetry is also used to reduce computation time.
The section **extending the right inverse, using parallelogram tilings**
is implemented in the function `invert()`,
which takes the zonogon as argument.
The section **a right inverse on the boundary of a zonohedron**
is implemented in the function `invertboundary()`,
which takes the zonohedron as argument.
For a pointed zonohedron $Z$, the function `plotpolygon()` plots
the generator polygon for $Z$ at 0.
<br>
# References
<div id="refs"></div>
<br>
# Session Information
This document was prepared
`r format(Sys.Date(), "%a %b %d, %Y")`
with the following configuration:
<pre>
```{r, echo=FALSE, results='asis'}
options( old_opt )
sessionInfo()
```
</pre>
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zonohedra/vignettes/zonotopes.Rmd 
 | 
					
	MATCH <- function(x, table, nomatch = NA, ...)
  UseMethod("MATCH")
  
MATCH.default <- function(x, table, nomatch = NA, ...) {
  if(is.atomic(x) && !is.object(x)) {
    if(inherits(table, "Date")) {
      x <- unclass(as.Date(x, origin = "1970-01-01"))
      table <- unclass(table)
    } else if(inherits(table, "POSIXt")) {
      x <- unclass(as.POSIXct(x, origin = "1970-01-01"))
      table <- unclass(as.POSIXct(table))
    }
  }
  match(x, table, nomatch = nomatch, ...)
}
MATCH.timeDate <- function(x, table, nomatch = NA, ...) {
  match(as.POSIXct(x), as.POSIXct(table), nomatch = nomatch, ...)
}
MATCH.times <- function(x, table, nomatch = NA, units = "sec", eps = 1e-10, ...) {
 match(trunc(x, units, eps), trunc(table, units, eps), nomatch = nomatch, ...)
}
MATCH.Date <- function(x, table, nomatch = NA, ...) {
  if(!inherits(table, "Date")) table <- as.Date(table)
  match(unclass(x), unclass(table), nomatch = nomatch, ...)
}
MATCH.POSIXct <- function(x, table, nomatch = NA, ...) {
  if(!inherits(table, "POSIXct")) table <- as.POSIXct(table)
  match(unclass(x), unclass(table), nomatch = nomatch, ...)
}
MATCH.POSIXlt <- function(x, table, nomatch = NA, ...) {
  x <- as.POSIXct(x)
  if(!inherits(table, "POSIXct")) table <- as.POSIXct(table)
  match(unclass(x), unclass(table), nomatch = nomatch, ...)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/MATCH.R 
 | 
					
	ORDER <- function(x, ...)
  UseMethod("ORDER")
ORDER.default <- function(x, ..., na.last = TRUE, decreasing = FALSE)
  order(x, ..., na.last = na.last, decreasing = decreasing)
ORDER.timeDate <- function(x, ...) {
  order(as.POSIXct(x), ...)
}
ORDER.chron <- ORDER.dates <- ORDER.times <- function(x, ...) {
  order(as.numeric(x), ...)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/ORDER.R 
 | 
					
	Ops.zoo <- function (e1, e2) 
{
    e <- if (missing(e2)) {
        NextMethod(.Generic)
    }
    else if (any(nchar(.Method) == 0L)) {
        NextMethod(.Generic)
    }
    else {
	merge(e1, e2, all = FALSE, retclass = NULL)
        NextMethod(.Generic)
    }
    if (is.null(attr(e, "index"))) {
        if(!missing(e2) && nchar(.Method)[1L] == 0L) {
	  out <- zoo(e, index(e2), attr(e2, "frequency"))
	} else {
	  out <- zoo(e, index(e1), attr(e1, "frequency"))
	}
    } else {
	out <- e
    }
    # the next statement is a workaround for a bug in R
    structure(out, class = class(out))
}
t.zoo <- function(x)
	t(as.matrix.zoo(x))
 
cumsum.zoo <- function(x) 
{
	if (length(dim(x)) == 0) x[] <- cumsum(coredata(x))
	  else x[] <- apply(coredata(x), 2, cumsum)
	return(x)
}
cumprod.zoo <- function(x) 
{
	if (length(dim(x)) == 0) x[] <- cumprod(coredata(x))
	  else x[] <- apply(coredata(x), 2, cumprod)
	return(x)
}
cummin.zoo <- function(x) 
{
	if (length(dim(x)) == 0) x[] <- cummin(coredata(x))
	  else x[] <- apply(coredata(x), 2, cummin)
	return(x)
}
cummax.zoo <- function(x) 
{
	if (length(dim(x)) == 0) x[] <- cummax(coredata(x))
	  else x[] <- apply(coredata(x), 2, cummax)
	return(x)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/Ops.zoo.R 
 | 
					
	aggregate.zoo <- function(x, by, FUN = sum, ..., regular = NULL, frequency = NULL, coredata = TRUE)
{
  ## index processing
  my.unique <- function(x) {
    ix <- MATCH(x, x) == seq_len(length(x))
    x[ix]
  }
  if(is.function(by)) by <- by(index(x))
  if(!is.list(by)) by <- list(by)
  ## sanity checks and option processing
  stopifnot(length(time(x)) == length(by[[1]]))
  if(is.null(frequency)) {
    if(is.null(regular)) regular <- inherits(x, "zooreg")
  } else {
    if(identical(regular, FALSE)) warning(paste(sQuote("regular"), "is ignored"))
    regular <- TRUE
  }
  ## aggregate data
  by_integer <- list(MATCH(by[[1]], by[[1]]))
  if(coredata) {
    df <- coredata(x)
  } else {
    df <- as.data.frame(x)
    if(ncol(df) > 1L) {
      for(i in 1L:ncol(df)) df[[i]] <- x[, i]
    } else {
      df[[1L]] <- x
    }
  }
  df <- aggregate(df, by_integer, match.fun(FUN), ...)
  if(length(unique(as.character(df[,1]))) == length(df[,1]))
      row.names(df) <- df[, 1]
  df <- df[, -1]
  if(is.matrix(x)) df <- as.matrix(df)
  
  ## regularity processing, set up return value
  ix <- my.unique(by[[1]])
  rval <- zoo(df, ix[!is.na(ix)])
  
  if(regular) {
    freq <- ifelse(is.null(frequency), frequency(rval), frequency)
    rval <- zoo(df, ix, freq)
  }
  
  return(rval)
}
# works even if zoo series has duplicates among its times
split.zoo <- function(x, f, drop = FALSE, ...) {
    ix <- time(x)
	xc <- coredata(x)
	if (length(dim(xc)) < 2) {
		lapply(split(seq_along(xc), f, drop = drop, ...), 
			function(ind) zoo(xc[ind], ix[ind]))
	} else {
		lapply(split(seq_len(nrow(xc)), f, drop = drop, ...), 
			function(ind) zoo(xc[ind, , drop = drop], ix[ind]))
	}
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/aggregate.zoo.R 
 | 
					
	as.Date <- function (x, ...) {
  ## for plain numeric input, call zoo:::as.Date.numeric
  if (!is.object(x) && is.numeric(x)) as.Date.numeric(x, ...)
  else UseMethod("as.Date")
}
as.Date.numeric <- function (x, origin, ...) {
  if (missing(origin)) origin <- "1970-01-01"
  if (identical(origin, "0000-00-00")) origin <- as.Date("0000-01-01", ...) - 1
  as.Date(origin, ...) + x
}
.as_Date_default <- function(x, ...) base::as.Date(x, ...)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/as.Date.R 
 | 
					
	# as.Date.numeric <- function (x, origin = "1970-01-01", ...) 
#  as.Date(origin, ...) + x
as.Date.ts <- function(x, offset = 0, ...) {
   time.x <- unclass(time(x)) + offset
   if (frequency(x) == 1)
	as.Date(paste(time.x, 1, 1, sep = "-"))
   else if (frequency(x) == 4)
	as.Date(paste((time.x + .001) %/% 1, 3*(cycle(x)-1)+1, 1, sep = "-"))
   else if (frequency(x) == 12)
	as.Date(paste((time.x + .001) %/% 1, cycle(x), 1, sep = "-"))
   else
	stop("unable to convert ts time to Date class")
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/as.Date.ts.R 
 | 
					
	as.zoo <- function(x, ...)
{
  UseMethod("as.zoo")
}
as.zoo.default <- function(x, ...)
{
  zoo(structure(x, dim = dim(x)), index(x), ...)
}
as.zoo.factor <- function(x, ...) 
{
  zoo(x, ...)
}
as.zoo.matrix <- function(x, ...) 
{
  zoo(x, ...)
}
as.zoo.data.frame <- function(x, ...) 
{
  zoo(as.matrix(x), ...)
}
as.zoo.fts <- function(x, ...) 
{
	zoo(as.matrix(x), attr(x, "dates"))
}
as.zoo.irts <- function(x, ...)
{
  zoo(x$value, x$time, ...)
}
as.zoo.its <- function(x, ...) 
{
	index <- attr(x, "dates")
	class(x) <- attr(x, "dates") <- NULL
	zoo(x, index, ...)
}
# as.mcmc.default can handle other direction
as.zoo.mcmc <- function(x, ...)
{
	as.zoo(as.ts(x, ...))
}
as.zoo.timeSeries <- function(x, ...) {
  zoo(as.matrix(x), timeSeries::time(x), ...)  
}
as.zoo.xts <- function(x, ...) {
  y <- coredata(x)
  if (is.null(dim(y)) && length(y) == 0) {
    y <- NULL
  }
  zoo(y, order.by = index(x), ...)
}
as.zooreg.xts <- function(x, frequency = NULL, ...) {
  as.zooreg(as.zoo(x, ...), frequency = frequency)
}
as.zoo.zoo <- function(x, ...) x
as.vector.zoo <- function(x, mode = "any")
	as.vector(as.matrix(x), mode = mode)
as.matrix.zoo <- function(x, ...) 
{
    y <- as.matrix(coredata(x), ...)
    if (identical(coredata(x), numeric(0))) dim (y) <- c(0, 0)
    if (length(y) > 0) {
	    colnames(y) <- if (length(colnames(x)) > 0) 
		colnames(x)
	    else {
		lab <- deparse(substitute(x), width.cutoff = 100L, nlines = 1L)
		if (NCOL(x) == 1) 
		    lab
		else paste(lab, 1:NCOL(x), sep = ".")
	    }
	} else if (nrow(y) != length(index(x))) {
		dim(y) <- c(length(index(x)), 0)
	}
    if (!is.null(y) && nrow(y) > 0 && is.null(row.names(y))) 
		row.names(y) <- index2char(index(x), frequency = attr(x, "frequency"))
    return(y)
}
as.data.frame.zoo <- function(x, row.names = NULL, optional = FALSE, ...)
{
	y <- as.data.frame(coredata(x), optional = optional, ...)
        if(NCOL(x) > 0 && !optional) {
		colnames(y) <- if (length(colnames(x)) > 0) 
			colnames(x)
		else {
			lab <- deparse(substitute(x), width.cutoff = 100L, nlines = 1L)
			if (NCOL(x) == 1) lab
	                  else paste(lab, 1:NCOL(x), sep = ".")
		}
	}
	if (!is.null(row.names)) row.names(y) <- row.names 
	  else {
	    tmp <- index2char(index(x), frequency = attr(x, "frequency"))
	    if (NROW(y) > 0 && !any(duplicated(tmp))) row.names(y) <- tmp
        }
	return(y)
}
as.list.zoo <- function(x, ...) {
	if (length(dim(x)) == 0) list(x)
  		else lapply(as.data.frame(x), zoo, index(x),  attr(x, "frequency"))
}
as.list.ts <- function(x, ...) {
	if (is.matrix(x))
		lapply(as.data.frame(x), ts, 
			start = start(x), end = end(x), freq = frequency(x))
	else
		list(x)
}
## regular series coercions
as.zooreg <- function(x, frequency = NULL, ...)
{
  UseMethod("as.zooreg")
}
as.zooreg.default <- function(x, frequency = NULL, ...)
{
  as.zooreg(as.zoo(x, ...), frequency = frequency)
}
as.zooreg.ts <- as.zoo.ts <- function(x, frequency = NULL, ...)
{
  xtsp <- tsp(x)
  if(is.null(frequency)) frequency <- xtsp[3]
  zooreg(coredata(x), start = xtsp[1], end = xtsp[2], frequency = frequency, ...)
} 
as.ts.zooreg <- function(x, ...)
{
  freq <- frequency(x)
  deltat <- 1/freq
  tt <- as.numeric(time(x))
  round. <- if(isTRUE(all.equal(c(deltat, tt), round(c(deltat, tt))))) {
    function(x) floor(x + 0.5)
  } else {
    function(x) deltat * floor(x/deltat + 0.5)
  }
  tt <- round.(tt)
  tt2 <- round.(seq(head(tt,1), tail(tt,1), deltat))
  fill <- list(...)$fill
  if(is.null(fill)) fill <- NA
  xx <- merge(zoo(coredata(x), tt), zoo(, tt2), fill = fill)
  ts(coredata(xx), start = tt[1], frequency = freq)
}
as.ts.zoo <- function(x, ...)
{
  if(is.regular(x)) {
    attr(x, "frequency") <- frequency(x)
    return(as.ts.zooreg(x, ...))
  } else {
    warning(paste(sQuote("x"), "does not have an underlying regularity"))
    return(ts(coredata(x)))
  }
}
as.zoo.zooreg <- function(x, ...) {
  attr(x, "frequency") <- NULL
  class(x) <- "zoo"
  return(x)
}
as.zooreg.zoo <- function(x, frequency = NULL, ...)
{
  if(!is.null(frequency)) {
    frequency(x) <- frequency
  } else {
    freq <- frequency(x)
    if(!is.null(freq)) {
      attr(x, "frequency") <- freq
      class(x) <- c("zooreg", "zoo")
    } else {
      warning(paste(sQuote("x"), "does not have an underlying regularity"))
      x <- zooreg(coredata(x))
    }
  }
  return(x)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/as.zoo.R 
 | 
					
	as.zoo.tis <- function(x, class = "ti", ...) {
  if (class == "ti") {
    as.zoo(as.zooreg(x, class = "ti", ...))
  } else if (class == "numeric") {
    zoo(tis::stripTis(x), time(tis::ti(x), offset = 0))
  } else {
    asFun <- paste("as", class, sep = ".")
    zoo(tis::stripTis(x), do.call(asFun, list(tis::POSIXct(tis::ti(x), offset = 0, tz = "GMT"))), ...)
  }
}
as.zooreg.tis <- function(x, frequency = NULL, class = "ti", ...) {
  if (class == "ti") zooreg(tis::stripTis(x), start = start(x), ...)
    else as.zooreg(as.zoo(x, class = class, ...), frequency = frequency)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/as.zoo.tis.R 
 | 
					
	barplot.zoo <- function(height, names.arg = NULL, ...)
{
  x <- coredata(height)
  if(!is.null(dim(x))) x <- t(x)
  if(is.null(names.arg)) names.arg <- index2char(index(height))
  barplot(x, names.arg = names.arg, ...)
}
boxplot.zoo <- function(x, ...) boxplot(coredata(x), ...)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/barplot.zoo.R 
 | 
					
	coredata <- function(x, ...)
  UseMethod("coredata")
coredata.default <- function(x, ...) x
coredata.zoo <- function(x, ...)
{
  attr(x, "class") <- attr(x, "oclass")
  attr(x, "index") <- attr(x, "oclass") <- attr(x, "frequency") <- NULL
  return(x)
}
## # experimental coredata.zoo to take advantage of new C code contributed from xts
## .coredata.zoo <- function(x, ...) {
##   if(length(x) == 0)
##     return(vector(storage.mode(x)))
##   .Call("zoo_coredata", x, TRUE, PACKAGE = "zoo")  # second arg is to copy most attr, for compat with xts
## }
coredata.ts <- function(x, ...)
{
  x <- unclass(x)
  attr(x, "tsp") <- NULL
  return(x)
}
coredata.irts <- function(x, ...)
{
  return(x$value)
}
coredata.its <- function(x, ...)
{
  return([email protected])
}
"coredata<-" <- function(x, value)
{
  UseMethod("coredata<-")
}
"coredata<-.zoo" <- function(x, value)
{
  stopifnot(length(x) == length(value))
  if(!(is.vector(value) || is.factor(value) || is.matrix(value) || is.data.frame(value)))
    stop(paste(dQuote("value"), ": attempt to assign invalid coredata to zoo object"))
  if(is.matrix(value) || is.data.frame(value)) value <- as.matrix(value)
    
  x[] <- value  
  attr(x, "oclass") <- attr(value, "class")
  return(x)
}
"coredata<-.ts" <- function(x, value)
{
  stopifnot(length(x) == length(value))
  dim(value) <- dim(x)
  x[] <- value
  return(x)
}
"coredata<-.irts" <- function(x, value)
{
  stopifnot(length(x$value) == length(value))
  dim(value) <- dim(x$value)
  x$value[] <- value
  return(x)
}
"coredata<-.its" <- function(x, value)
{
  stopifnot(length([email protected]) == length(value))
  dim(value) <- dim([email protected])
  [email protected][] <- as.matrix(value)
  return(x)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/coredata.R 
 | 
					
	fortify.zoo <- function(model, data, names = c("Index", "Series", "Value"),
		melt = FALSE, sep = NULL, ...)
{
  if (!is.null(sep) && !melt) stop("Cannot specify sep if melt = FALSE")
  ## dimensions
  n <- NROW(model)
  k <- NCOL(model)
  ## dots only named data.frame arguments
  dots <- list(...)
  dots <- dots[names(dots) %in%
    c("row.names", "check.rows", "check.names", "fix.empty.names", "stringsAsFactors")]
  
  ## series labels
  lab <- colnames(model)
  if(is.null(lab)) lab <- rep.int(deparse(substitute(model)), k)
  lab <- make.unique(lab)
  
  ## return data names
  nm <- c("Index", "Series", "Value")
  if(!is.null(names(names))) names <- names[nm]
  if(is.list(names)) {
    names(names) <- nm
    for(i in 1L:3L) if(is.null(names[[i]]) || anyNA(names[[i]])) names[[i]] <- nm[i]
    nm <- unlist(names)
  } else {
    names <- rep_len(names, 3L)
    nm[!is.na(names)] <- names[!is.na(names)]
  }
  
  ## either long format (melt = TRUE) or wide format (melt = FALSE)
  if(melt) {
    df <- if(k == 1L) {    
      do.call("data.frame", c(
        list(index(model), factor(rep.int(1, n), labels = lab), coredata(model)),
	dots))
    } else {
      do.call("data.frame", c(
        list(index(model)[rep.int(1:n, k)],
          factor(rep(1:k, each = n), levels = 1:k, labels = lab),
	  as.vector(coredata(model))),
	dots))
    }
    if (!is.null(sep)) {
      df <- data.frame(df[1L], 
        do.call("rbind", strsplit(as.character(df[[2L]]), ".", fixed = TRUE)),
	df[3L])
    }
    nl <- length(nm)
    names(df) <- c(nm[1L], make.unique(rep_len(nm[-c(1L, nl)], ncol(df) - 2L)), nm[nl])
  } else {
    df <- cbind(
      do.call("data.frame", c(list(index(model)), dots)), 
      coredata(model))
    names(df) <- c(nm[1L], lab)  
  }
  
  return(df)
}
autoplot.zoo <- function(object, geom = "line", facets, ...)
{
  ## convert to data.frame (and assure correct label
  ## processing by fortify.zoo)
  lab <- deparse(substitute(object))
  if(NCOL(object) == 1L) {
    if(is.null(dim(object))) dim(object) <- c(NROW(object), 1L)
    if(is.null(colnames(object))) colnames(object) <- lab
  }
  if(is.null(colnames(object))) colnames(object) <- paste(lab, 1:NCOL(object), sep = ".")
  df <- fortify.zoo(object, melt = TRUE)
  ## default for facets
  single <- nlevels(df$Series) == 1L
  if(missing(facets)) {
    auto <- TRUE
    facets <- if(single) NULL else Series ~ .
  } else {
    auto <- FALSE
  }
  ## process defaults as for old qplot-based interface
  if(is.character(geom)) geom <- get(paste0("geom_", geom), asNamespace("ggplot2"))
  if(inherits(facets, "formula")) facets <- ggplot2::facet_grid(facets)
  
  ## "fake" variables for nonstandard evaluation
  Index <- Value <- Series <- NULL
  ## call qplot
  gg <- if(single | (!is.null(facets) & auto)) {
    ggplot2::ggplot(df, ggplot2::aes(x = Index, y = Value, ...)) + geom() + facets + ggplot2::ylab(if(single) levels(df$Series) else "") + ggplot2::xlab("Index")
  } else {
    ggplot2::ggplot(df, ggplot2::aes(x = Index, y = Value, group = Series, colour = Series, ...)) + geom() + facets + ggplot2::ylab("") + ggplot2::xlab("Index")    
  }
  return(gg)
}
facet_free <- function (facets = Series ~ ., margins = FALSE, scales = "free_y", ...) {
  ggplot2::facet_grid(facets, margins = margins, scales = scales, ...)
}
yearmon_trans <- function(format = "%b %Y", n = 5) {
  breaks. <- function(x) as.yearmon(scales::pretty_breaks(n)(x))
  format. <- function(x) format(x, format = format)
  scales::trans_new("yearmon", transform = as.numeric, inverse = as.yearmon,
    breaks = breaks., format = format.)
}
scale_x_yearmon <- function(..., format = "%b %Y", n = 5) {
  ggplot2::scale_x_continuous(..., trans = yearmon_trans(format, n))
}
scale_y_yearmon <- function(..., format = "%b %Y", n = 5) {
  ggplot2::scale_y_continuous(..., trans = yearmon_trans(format, n))
}
yearqtr_trans <- function(format = "%Y-%q", n = 5) {
  breaks. <- function(x) as.yearqtr(scales::pretty_breaks(n)(x))
  format. <- function(x) zoo::format.yearqtr(x, format = format)
  scales::trans_new("yearqtr", transform = as.numeric, inverse = as.yearqtr,
    breaks = breaks., format = format.)
}
scale_x_yearqtr <- function(..., format = "%Y-%q", n = 5) {
  ggplot2::scale_x_continuous(..., trans = yearqtr_trans(format, n))
}
scale_y_yearqtr <- function(..., format = "%Y-%q", n = 5) {
  ggplot2::scale_y_continuous(..., trans = yearqtr_trans(format, n))
}
scale_type.yearqtr <- function(x) c("yearqtr", "continuous")
scale_type.yearmon <- function(x) c("yearmon", "continuous")
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/ggplot2.zoo.R 
 | 
					
	index <- function(x, ...)
{
  UseMethod("index")
}
index.default <- function(x, ...)
{
  seq_len(NROW(x))
}
index.zoo <- function(x, ...)
{
  attr(x, "index")
}
index.ts <- function(x, ...)
{
  xtsp <- tsp(x)
  seq(xtsp[1], xtsp[2], by = 1/xtsp[3])
}
time.zoo <- function(x, ...)
{
  index(x)
}
"index<-" <- function(x, value) 
{
	UseMethod("index<-")
}
"time<-" <- function(x, value) 
{
	UseMethod("time<-")
}
"index<-.zoo" <- function(x, value) 
{
	if(length(index(x)) != length(value)) 
	  stop("length of index vectors does not match")
	if(is.unsorted(ORDER(value)))
	  stop("new index needs to be sorted")	
	attr(x, "index") <- value
	return(x)
}
"time<-.zooreg" <- "index<-.zooreg" <- function(x, value) 
{
	if(length(index(x)) != length(value)) 
	  stop("length of index vectors does not match")
	if(is.unsorted(ORDER(value)))
	  stop("new index needs to be sorted")	
        ## check whether new index still conforms with
	## frequency, if not: drop frequency
        d <- try(diff(as.numeric(value)), silent = TRUE)
	ok <- if(inherits(d, "try-error") || length(d) < 1 || anyNA(d)) FALSE
	else {	    
            deltat <- min(d)
	    dd <- d/deltat
	    if(identical(all.equal(dd, round(dd)), TRUE)) {	    
                freq <- 1/deltat
                if(freq > 1 && identical(all.equal(freq, round(freq)), TRUE)) freq <- round(freq)
  	        identical(all.equal(attr(x, "frequency") %% freq, 0), TRUE)
	    } else {
	        FALSE
	    }
	}
	if(!ok) {
	  attr(x, "frequency") <- NULL
	  class(x) <- class(x)[-which(class(x) == "zooreg")]
	}
 	
	attr(x, "index") <- value
	return(x)
}
"time<-.zoo" <- function(x, value) 
{
	if(length(index(x)) != length(value)) 
	  stop("length of time vectors does not match")
	attr(x, "index") <- value
	return(x)
}
start.zoo <- function(x, ...) 
{
	if (length(index(x)) > 0) index(x)[1]
	  else NULL
}
end.zoo <- function(x, ...) 
{
	lx <- length(index(x))
	if (lx > 0) index(x)[lx]
	  else NULL
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/index.R 
 | 
					
	index2char <- function(x, ...) UseMethod("index2char")
index2char.default <- function(x, ...) as.character(x)
index2char.POSIXt <- function(x, ...) format(x, ...)
index2char.numeric <- function(x, frequency = NULL, digits = getOption("digits") - 3, ...)
{
  freq <- frequency
  if(is.null(freq)) return(as.character(round(x, digits = digits)))
  if(length(x) < 1) return(character(0))
  d <- diff(x)
  if(freq > 1 && identical(all.equal(freq, round(freq)), TRUE)) freq <- round(freq)
  if(identical(all.equal(freq*d, round(freq*d)), TRUE)) {
    if(freq == 1) return(as.character(round(x)))
      else return(paste(floor(x), "(", round((x - floor(x)) * freq) + 1, ")", sep = ""))
  } else
    return(as.character(round(x, digits = digits)))
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/index2char.R 
 | 
					
	is.regular <- function(x, strict = FALSE) {
  UseMethod("is.regular")
}
is.regular.zoo <- function(x, strict = FALSE)
{
  delta <- suppressWarnings(try(diff(as.numeric(index(x))), silent = TRUE))
  if(inherits(delta, "try-error") || anyNA(delta)) FALSE
  else if(length(delta) < 1) FALSE
  else if(strict) identical(all.equal(delta, rep.int(delta[1], length(delta))), TRUE)
  else {
    delta <- unique(delta)
    rval <- identical(all.equal(delta/min(delta), round(delta/min(delta))), TRUE)
    if(!rval && identical(all.equal(delta, round(delta)), TRUE)) rval <- TRUE
    rval
  }
}
is.regular.ts <- function(x, strict = FALSE) TRUE
is.regular.zooreg <- function(x, strict = FALSE)
{
  if(strict) is.regular.zoo(x, strict = TRUE) else TRUE
}
is.regular.default <- function(x, strict = FALSE) {
  is.regular(as.zoo(x), strict = strict)
}
frequency.zooreg <- function(x, ...) 
{
  attr(x, "frequency")
}
frequency.zoo <- function(x, ...)
{
  ## check whether frequency is available
  freq <- attr(x, "frequency")
  if(!is.null(freq) || length(index(x)) < 2) return(freq)
  ## check regularity
  delta <- suppressWarnings(try(diff(as.numeric(index(x))), silent = TRUE))
  reg <- if(inherits(delta, "try-error") || anyNA(delta)) FALSE
  else {
    delta <- unique(delta)
    rval <- identical(all.equal(delta/min(delta), round(delta/min(delta))), TRUE)
    if(rval) freq <- 1/min(delta)
    else if(identical(all.equal(delta, round(delta)), TRUE)) {
      ## special case: integer indexes
      ## get frequency as greatest common divisor (of differences)
      gcd <- function(x) {	
        gcd0 <- function(a, b) ifelse(b==0 | a==b, a, gcd0(b, a %% b))
        if(length(x) < 2) x <- c(x, as.integer(0))
        if(length(x) < 3) {
          return(gcd0(x[1], x[2]))
        } else {
          x <- sapply(1:(length(x) - 1), function(i) gcd0(x[i], x[i+1]))
          gcd(x)
        }
      }
      freq <- 1/gcd(delta)
      rval <- TRUE
    }
    rval
  }
  if(!reg) return(NULL)
  if(freq > 1 && identical(all.equal(freq, round(freq)), TRUE)) freq <- round(freq)
  return(freq)
}
"frequency<-" <- function(x, value)
  UseMethod("frequency<-")
  
"frequency<-.zoo" <- function(x, value) {
  delta <- suppressWarnings(try(diff(as.numeric(index(x))), silent = TRUE))
  freqOK <- if(inherits(delta, "try-error") || anyNA(delta)) FALSE
    else if(length(delta) < 1) TRUE
    else identical(all.equal(delta*value, round(delta*value)), TRUE)
  stopifnot(freqOK)
  if(value > 1 && identical(all.equal(value, round(value)), TRUE)) value <- round(value)
  attr(x, "frequency") <- value
  class(x) <- c("zooreg", "zoo")
  return(x)
}
"frequency<-.zooreg" <- function(x, value) {
  delta <- diff(as.numeric(index(x)))
  stopifnot(identical(all.equal(delta*value, round(delta*value)), TRUE))
  attr(x, "frequency") <- value
  return(x)
}
deltat.zoo <- function(x, ...)
{
  rval <- frequency.zoo(x, ...)
  if(is.null(rval)) NULL else 1/rval
}
deltat.zooreg <- function(x, ...)
{
  1/frequency.zooreg(x, ...)
}
cycle.zooreg <- function(x, ...)
{
  freq <- frequency(x)
  ix <- as.numeric(index(x))
  d <- diff(ix)
  if(!identical(all.equal(freq*d, round(freq*d)), TRUE))
    stop(paste(sQuote("cycle"), "not available for", sQuote("x")))  
  return(zoo(round((ix - floor(ix)) * freq) + 1, order.by = index(x), freq))
}
cycle.zoo <- function(x, ...)
{
  if(is.regular(x)) cycle.zooreg(x)
    else stop(sQuote("x"), "is not regular")
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/is.regular.R 
 | 
					
	rbind.zoo <- function(..., deparse.level = 1)
{  
  args <- Filter(Negate(is.null), list(...))
  indexes <- do.call("c", unname(lapply(args, index)))
  my.table <- function(x) {
    ix <- ORDER(x)
    x <- x[ix]
    table(MATCH(x,x))
  }
  if(max(my.table(indexes)) > 1L) stop("indexes overlap")
  if(any(sapply(args, function(x) is.null(dim(x)) && length(x) == 0L && length(index(x)) > 0L)))
    stop("zero-length vectors with non-zero-length index are not allowed")  
  ncols <- sapply(args, NCOL)
  if(!all(ncols == ncols[1L])) stop("number of columns differ")
  
  ## process colnames() if any
  nams <- lapply(args, colnames)
  namsNULL <- sapply(nams, is.null)
  if(all(namsNULL)) namsOK <- TRUE else {
    if(sum(namsNULL) > 0L) namsOK <- FALSE else {
      nam1 <- nams[[1L]]
      namsID <- sapply(nams, function(x) identical(x, nam1))
      if(all(namsID)) namsOK <- TRUE else {
        namsSORT <- sapply(nams, function(x) identical(sort(x), sort(nam1)))
	if(!all(namsSORT)) namsOK <- FALSE else {
	  namsOK <- TRUE
	  for(i in which(!namsID)) args[[i]] <- args[[i]][,nam1]
	}
      }
    }
  }
  if(!namsOK) warning("column names differ")
  ## collect data
  argsdata <- lapply(args, coredata)
  ## (special case: rbinding of vectors with one-column matrices)
  nulldim <- sapply(argsdata, function(a) is.null(dim(a)))
  if(ncols[1L] == 1L) {
    if(nulldim[1] & any(!nulldim)) {
      argsdata <- lapply(argsdata, function(a) if(is.null(dim(a))) a else a[,1, drop = TRUE])
      nulldim <- rep(TRUE, length(nulldim))
    }
    if(!nulldim[1] & any(nulldim)) {
      argsdata <- lapply(argsdata, function(a) if(is.null(dim(a))) as.matrix(a) else a)
      nulldim <- rep(FALSE, length(nulldim))
    }
  }
  if((ncols[1L] > 1L) | !all(nulldim))
    rval <- zoo(do.call("rbind", argsdata), indexes)
  else
    rval <- zoo(do.call("c", argsdata), indexes)
  freq <- if(!("zooreg" %in% unlist(sapply(args, class)))) NULL
            else {
	      freq <- c(frequency(rval), unlist(sapply(args, frequency)))
	      if((length(freq) == (length(args)+1L)) && 
	         identical(all.equal(max(freq)/freq, round(max(freq)/freq)), TRUE))
		 max(freq) else NULL
	    }
  if(!is.null(freq)) {
    attr(rval, "frequency") <- freq
    class(rval) <- c("zooreg", class(rval))
  }
  return(rval)
}
c.zoo <- function(...) {
    rbind.zoo(...)
}
cbind.zoo <- function(..., all = TRUE, fill = NA, suffixes = NULL, drop = FALSE, sep = ".")
{
  merge.zoo(..., all = all, fill = fill, suffixes = suffixes, retclass = "zoo", drop = drop, sep = sep)
}
merge.zoo <- function(..., all = TRUE, fill = NA, suffixes = NULL, check.names = FALSE, retclass = c("zoo", "list", "data.frame"), drop = TRUE, sep = ".")
{
    if (!is.null(retclass)) retclass <- match.arg(retclass)
    # cl are calls to the args and args is a list of the arguments
    cl <- as.list(match.call())
    cl[[1]] <- cl$all <- cl$fill <- cl$retclass <- cl$suffixes <- cl$check.names <- cl$drop <- cl$sep <- NULL
    args <- list(...)
    parent <- parent.frame()
    is.plain <- function(x) 
	all(class(x) %in% c("array", "integer", "numeric", "factor", "matrix", "logical", "character"))
    is.scalar <- function(x) is.plain(x) && length(x) == 1
    # ensure all ... plain args are of length 1 or have same NROW as arg 1
    stopifnot(all(sapply(args, function(x) is.zoo(x) || !is.plain(x) ||
      (is.plain(x) && (NROW(x) == NROW(args[[1]]) || is.scalar(x))))))
    scalars <- sapply(args, is.scalar)
    if(!is.zoo(args[[1]])) args[[1]] <- as.zoo(args[[1]])
    for(i in seq_along(args))
        if (is.plain(args[[i]]))  
            args[[i]] <- zoo(args[[i]], index(args[[1]]), attr(args[[1]], "frequency"))
	else if (!is.zoo(args[[i]]))
            args[[i]] <- as.zoo(args[[i]])
    ## retain frequency	if all series have integer multiples of the same frequency
    ## and at least one of the original objects is a "zooreg" object	
    freq <- if(!("zooreg" %in% unlist(sapply(args, class)))) NULL
        else {
	  freq <- unlist(sapply(args, frequency))
	  if((length(freq) == length(args)) && 
	     identical(all.equal(max(freq)/freq, round(max(freq)/freq)), TRUE))
	     max(freq) else NULL
	}
    # use argument names if suffixes not specified
    if (is.null(suffixes)) {
        makeNames <- function(l) {
            nm <- names(l)
            fixup <- if (is.null(nm)) 
                seq_along(l)
            else nm == ""
            dep <- sapply(l[fixup], function(x) deparse(x)[1])
            if (is.null(nm)) 
                return(dep)
            if (any(fixup)) 
                nm[fixup] <- dep
            nm
        }
        suffixes <- makeNames(as.list(substitute(list(...)))[-1])
    }
    if (length(suffixes) != length(cl)) {
        warning("length of suffixes and does not match number of merged objects")
        suffixes <- rep(suffixes, length.out = length(cl))
    }
    # extend all to a length equal to the number of args
    all <- rep(as.logical(all), length.out = length(cl))
    ## check indexes:
    indexlist <- lapply(args, index)
    ## 1. for non-unique entries
    index_duplicates <- function(x) length(unique(MATCH(x, x))) < length(x)
    if(any(sapply(indexlist, index_duplicates)))
      stop("series cannot be merged with non-unique index entries in a series")
    ## 2. for differing classes
    indexclasses <- sapply(indexlist, function(x) class(x)[1])
    if (!all(indexclasses == indexclasses[1L])) {
        warning(paste("Index vectors are of different classes:", 
            paste(indexclasses, collapse = " ")))
	if(all(indexclasses %in% c("numeric", "integer"))) {
	    indexlist <- lapply(indexlist, as.numeric)
	} else if(all(vapply(indexlist, function(e) inherits(e, "Date") || is.numeric(e), NA))) {
            indexlist <- lapply(indexlist, as.Date)
	}
    }
    # fn to get the unique elements in x, in sorted order, using only
    # [, match, length and order
    sort.unique <- function(x) {
        ix <- MATCH(x, x) == seq_len(length(x))
	x <- x[ix]
        ix <- ORDER(x)
        x[ix]
    }
    # fn to get intersection of each element in list of lists
    intersect.list <- function(list) { 
        my.table <- function(x) {
	   ix <- ORDER(x)
           x <- x[ix]
           table(MATCH(x, x))
	}
	union <- do.call("c", unname(list))
	sort.unique(union)[which(my.table(union) == length(list))]
    }
    indexintersect <- intersect.list(indexlist)
    # get the indexes of the final answer which is the union of
    # all indexes of args corresponding to all=TRUE with the intersection
    # of all indexes
    indexunion <- do.call("c", unname(indexlist[all]))
    
    indexes <-  if(is.null(indexunion)) indexintersect
      else sort.unique(c(indexunion, indexintersect))
    # previously, we used to do:
    # if (is.null(indexunion)) indexunion <- do.call("c", indexlist)[0]
    # indexes <- sort.unique(c(indexunion, indexintersect))
    ## check whether resulting objects still got the same frequency
    if(!is.null(freq)) {
      freq <- c(frequency(zoo(,indexes)), freq)
      freq <- if((length(freq) == 2) && identical(all.equal(max(freq)/freq, round(max(freq)/freq)), TRUE))
        max(freq) else NULL
    }
    # the f function does the real work
    # it takes a zoo object, a, and fills in a matrix corresponding to
    # indexes with the values in a. ret.zoo is TRUE if it is to return
    # a zoo object.  If ret.zoo is FALSE it simply returns with the matrix
    # just calculated.  
    # match0 is convenience wrapper for MATCH with nomatch=0 default
    match0 <- function(a, b, nomatch = 0, ...) MATCH(a, b, nomatch = nomatch, ...)
    f <- if (any(all)) {
       function(a, ret.zoo = TRUE) {
        if (length(a) == 0 && length(dim(a)) == 0)
	   return(if(ret.zoo) {
	            rval <- zoo(, indexes)
	            attr(rval, "frequency") <- freq
	            if(!is.null(freq)) class(rval) <- c("zooreg", class(rval))
		    rval
		  } else numeric())
        z <- matrix(fill, length(indexes), NCOL(a))
	if (length(dim(a)) > 0)
           z[match0(index(a), indexes), ] <- a[match0(indexes, index(a)), , drop = FALSE]        
        else {
           z[match0(index(a), indexes), ] <- a[match0(indexes, index(a))]
           z <- z[, 1, drop=TRUE]
        }
 	if (ret.zoo) {
	  z <- zoo(z, indexes)
	  attr(z, "oclass") <- attr(a, "oclass")
	  attr(z, "levels") <- attr(a, "levels")
	  attr(z, "frequency") <- freq
	  if(!is.null(freq)) class(z) <- c("zooreg", class(z))
	}
	return(z)
      }
    
    } else {
    # if all contains only FALSE elements then the following f is used
    # instead of the prior f for performance purposes.  If all contains
    # only FALSE then the resulting index is the intersection of the
    # index of each argument so we can just return a[index] or a[index,].
    # Also if we are not to return a zoo object then unclass it prior to return.
      function(a, ret.zoo = TRUE) {
	if (!ret.zoo) class(a) <- NULL
	if (length(dim(a)) == 0) {
		if (length(a) == 0) {
		   rval <- if(ret.zoo) zoo(, indexes) else numeric()
		} else
		   rval <- as.zoo(a[match0(indexes, attr(a, "index"))])
	} else
		rval <- as.zoo(a[match0(indexes, attr(a, "index")), , drop=FALSE])
        if(is.zoo(rval) && !is.null(freq)) {
	  attr(rval, "frequency") <- freq
	  class(rval) <- unique(c("zooreg", class(rval)))
	}
	return(rval)
      }
    }
    # if retclass is NULL do not provide a return value but instead
    # update each argument that is a variable, i.e. not an expression,
    # in place.  
    if (is.null(retclass)) {
        for(vn in cl) {
           if (is.name(vn))
           tryCatch(
	     eval(substitute(v <- f(v), list(f = f, v = vn)), parent), 
	     condition = function(x) {}
           )
        }
	invisible(return(NULL))
    } 
    # apply f to each arg, put result of doing this on all args in list rval
    # and then cbind that list together to produce the required matrix
    rval <- lapply(args, f, ret.zoo = retclass %in% c("list", "data.frame"))
    ## have commented this next line out.  Is it needed?
    # for(i in which(scalars)) rval[[i]] <- rval[[i]][] <- zoo(coredata(rval[[i]])[1], index(rval[[1]]), freq)
    names(rval) <- suffixes
    if (retclass == "list") { 
	return(rval)
    }
    if (retclass == "data.frame") {
      ## transform list to data.frame
      ## this is simple if all list elements are vectors, but with
      ## matrices a bit more effort seems to be needed:
      charindex <- index2char(index(rval[[1]]), frequency = freq)
      nam1 <- names(rval)
      rval <- lapply(rval, as.list)
      todf <- function(x) {
        class(x) <- "data.frame"
        attr(x, "row.names") <- charindex
        return(x)
      }
      rval <- lapply(rval, todf)
      ## name processing
      nam2 <- sapply(rval, function(z) 1:NCOL(z))
      for(i in 1:length(nam2)) nam2[[i]] <- paste(names(nam2)[i], nam2[[i]], sep = sep)
      nam1 <- unlist(ifelse(sapply(rval, NCOL) > 1, nam2, nam1))
      rval <- do.call("cbind", rval)
      names(rval) <- nam1
      ## turn zoo factors into plain factors
      is.zoofactor <- function(x) !is.null(attr(x, "oclass")) && attr(x, "oclass") == "factor"
      for(i in 1:NCOL(rval)) if(is.zoofactor(rval[,i])) rval[,i] <- coredata(rval[,i])
      return(rval)
    }
    # remove zero length arguments
    rval <- rval[sapply(rval, function(x) length(x) > 0)]
    # if there is more than one non-zero length argument then cbind them
    # Note that cbind will create matrices, even when given a single vector, 
    # so - if drop=TRUE - we do not to use it in the single vector case.
    rval <- if (length(rval) > 1L | (length(rval) == 1L & !drop))
	do.call("cbind", rval)
    else if (length(rval) > 0L)
	rval[[1]]
    # return if vector since remaining processing is only for column names
    if (length(dim(rval)) == 0L) {
      # fixed bug: coredata was missing
      rval <- zoo(coredata(rval), indexes)
      attr(rval, "frequency") <- freq
      if(!is.null(freq)) class(rval) <- c("zooreg", class(rval))
      return(rval)
    }
    # processing from here on is to compute nice column names
    if (length(unlist(sapply(args, colnames))) > 0) {
        fixcolnames <- function(a) {
            # if (length(a) == 0) 
            #   return(NULL)
            if (length(dim(a)) ==0) {
                if (length(a) == 0) return(NULL) else return("")
            } else {
				if (ncol(a) == 0) return(NULL)
                rval <- colnames(a)
                if (is.null(rval)) {
                  rval <- paste(1:NCOL(a), suffixes[i], sep = sep)
                }
                else {
                  rval[rval == ""] <- as.character(which(rval == ""))
                }
                return(rval)
            }
        }
        zoocolnames <- lapply(args, fixcolnames)
        zcn <- unlist(zoocolnames)
        fixme <- lapply(zoocolnames, function(x) x %in% zcn[duplicated(zcn)])
        f <- function(i) {
            rval <- zoocolnames[[i]]
            rval[rval == ""] <- suffixes[i]
            rval
        }
        zoocolnames <- lapply(seq_along(args), f)
        f <- function(i) ifelse(fixme[[i]], paste(zoocolnames[[i]], 
            suffixes[i], sep = sep), zoocolnames[[i]])
        if (any(duplicated(unlist(zoocolnames)))) 
            zoocolnames <- lapply(seq_along(args), f)
    } else {
        fixcolnames <- function(a) {
            if (length(a) == 0) 
                return(NULL)
            if (NCOL(a) < 2) 
                return("")
            else return(paste(sep, 1:NCOL(a), sep = ""))
        }
        zoocolnames <- lapply(args, fixcolnames)
        zoocolnames <- lapply(seq_along(args), function(i) 
		if (!is.null(zoocolnames[[i]])) # NULL returned if false
			paste(suffixes[i], zoocolnames[[i]], sep = ""))
    }
	zoocolnames <- unlist(zoocolnames)
	colnames(rval) <- if (check.names) make.names(make.unique(zoocolnames))
		else if (ncol(rval) == length(zoocolnames)) zoocolnames else 
		colnames(rval)
    # rval <- zoo(rval, indexes)
    rval <- zoo(coredata(rval), indexes)
    attr(rval, "frequency") <- freq
    if(!is.null(freq)) class(rval) <- c("zooreg", class(rval))
    return(rval)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/merge.zoo.R 
 | 
					
	
na.StructTS <- function(object, ...) UseMethod("na.StructTS")
na.StructTS.ts <- function(object, ..., na.rm = FALSE, maxgap = Inf)
{
    na.StructTS.0 <- function(y) {
        yf <- y
		isna <- is.na(y)
		yf[isna] <- rowSums(tsSmooth(StructTS(y))[,-2])[isna]
        .fill_short_gaps(y, yf, maxgap = maxgap)
    }
    object[] <- if (length(dim(object)) == 0) na.StructTS.0(object)
                else apply(object, 2, na.StructTS.0)
    if (na.rm) na.trim(object, is.na = "all") else object
}
na.StructTS.zoo <- function(object, ..., na.rm = FALSE, maxgap = Inf) {
	z <- na.StructTS(as.ts(object), ..., na.rm = FALSE, maxgap = maxgap)
	z <- as.zoo(z)
	time(z) <- time(object)
    if (na.rm) na.trim(z, is.na = "all") else z
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/na.StructTS.R 
 | 
					
	na.aggregate <- function(object, ...) UseMethod("na.aggregate")
## fills NA values with some aggregated function of the data.
## generalises imputing by the overall mean, by calendar month, etc.
na.aggregate.default <- function(object, by = 1, ..., FUN = mean, na.rm = FALSE, maxgap = Inf)
{
    if (is.function(by)) by <- by(time(object), ...)
    ## applied to each aggregated group in each series:
    f <- function(x)
        replace(x, is.na(x), FUN(x[!is.na(x)]))
    na.aggregate.0 <- function(y) {
        yf <- ave(y, by, FUN = f)
        .fill_short_gaps(y, yf, maxgap = maxgap)
    }
    object[] <- if (length(dim(object)) == 0) na.aggregate.0(object)
                else apply(object, 2, na.aggregate.0)
    if (na.rm) na.trim(object, is.na = "all") else object
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/na.aggregate.R 
 | 
					
	na.approx <- function(object, ...) UseMethod("na.approx")
na.approx.zoo <- function(object, x = index(object), xout, ..., na.rm = TRUE, maxgap = Inf, along) {
    if (!missing(along)) {
        warning("along to be deprecated - use x instead")
        if (missing(x)) x <- along
    }
    missing.xout <- missing(xout) || is.null(xout)
    if (is.function(x)) x <- x(index(object))
    if (!missing.xout && is.function(xout)) xout <- xout(index(object))
    order.by <- if (missing.xout) index(object) else xout
    xout <- if (missing.xout) x else xout
    if (missing.xout || identical(xout, index(object))) {
        result <- object
    } else {
        object.x <- object
        if (!identical(class(x), class(xout))) {
            index(object.x) <- as.numeric(x)
            xout <- as.numeric(xout)
        } else {
            index(object.x) <- x
        }
        objectm <- merge(object.x, zoo(, xout))
        if (length(dim(objectm)) == 2) colnames(objectm) <- colnames(object)
        result <- window(objectm, index. = xout)
    }
    result[] <- na.approx.default(object, x = x, xout = xout, na.rm = FALSE, ..., maxgap = maxgap)
    if ((!missing(order.by) && !is.null(order.by)) || !missing.xout) {
        index(result) <- order.by
    }
    if (na.rm) {
        result <- na.trim(result, is.na = "all", maxgap = maxgap)
    }
    result
}
na.approx.zooreg <- function(object, ...) {
    object. <- structure(object, class = setdiff(class(object), "zooreg"))
    as.zooreg(na.approx(object., ...))
}
na.approx.default <- function(object, x = index(object), xout = x, ..., na.rm = TRUE, maxgap = Inf, along) {
    if (!missing(along)) {
        warning("along to be deprecated - use x instead")
        if (missing(x)) x <- along
    }
    na.approx.vec <- function(x, y, xout = x, ...) {
        na <- is.na(y)
	if(sum(!na) < 2L) {
	    ## approx() cannot be applied here, hence simply:
	    yf <- rep.int(NA, length(xout))
	    mode(yf) <- mode(y)
	    if(any(!na)) {
	        if(x[!na] %in% xout) {
		    yf[xout == x[!na]] <- y[!na]
		}
	    }
	    return(yf)
	}
	if(all(!na) && (length(xout) > maxgap) && !all(xout %in% x)) {
	    ## for maxgap to work correctly 'y' has to contain
	    ## actual NAs and be expanded to the full x-index
	    xf <- sort(unique(c(x, xout)))
	    yf <- rep.int(NA, length(xf))
	    yf[MATCH(x, xf)] <- y
	    x <- xf
	    y <- yf
	}
        yf <- approx(x[!na], y[!na], xout, ...)$y
        if (maxgap < length(y)) {
            ## construct a series like y but with only gaps > maxgap
            ## (actual values don't matter as we only use is.na(ygap) below)
            ygap <- .fill_short_gaps(y, seq_along(y), maxgap = maxgap)
            ## construct y values at 'xout', keeping NAs from ygap
            ## (using indexing, as approx() does not allow NAs to be propagated)
            ix <- approx(x, seq_along(y), xout, ...)$y
            yx <- ifelse(is.na(ygap[floor(ix)] + ygap[ceiling(ix)]), NA, yf)
            yx
        } else {
            yf
        }
    }
    if (!identical(length(x), length(index(object)))) {
        stop("x and index must have the same length")
    }
    x. <- as.numeric(x)
    if (missing(xout) || is.null(xout)) xout <- x.
    xout. <- as.numeric(xout)
    object. <- coredata(object)
    result <- if (length(dim(object.)) < 2) {
        na.approx.vec(x., coredata(object.), xout = xout., ...)
    } else {
        apply(coredata(object.), 2, na.approx.vec, x = x., xout = xout., ...)
    }
    if (na.rm) {
        result <- na.trim(result, is.na = "all", maxgap = maxgap)
    }
    result
}
na.approx.ts <- function(object, ...) {
    as.ts(na.approx(as.zoo(object), ...))
}
## x = series with gaps
## fill = same series with filled gaps
.fill_short_gaps <- function(x, fill, maxgap) {
    if (maxgap <= 0)
        return(x)
    if (maxgap >= length(x))
        return(fill)
    naruns <- rle(is.na(x))
    naruns$values[naruns$lengths > maxgap] <- FALSE
    naok <- inverse.rle(naruns)
    x[naok] <- fill[naok]
    return(x)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/na.approx.R 
 | 
					
	
# fill is a 3 component list or is coerced to one representing
# fill char to left of leftmost non-NA, fill character to interior NAs
#  and fill char to right of rightmost non-NA
# If component is "extend" then left or rightmost NA is extended or interior
#  NA is linearly interpolated
# If component is NULL then the corresponding NA is dropped.
na.fill0 <- function(object, fill, ix = !is.na(object))
{
  if (length(object) == 0L) {
    object
  } else if (length(fill) == 0L || sum(lengths(as.list(fill))) == 0L) {
    structure(object[ix], na.action = which(!ix))
  } else if (length(fill) == 1L) {
    if (identical(as.list(fill)[[1L]], "extend"))
      stop("fill cannot be 'extend'")
    if (!is.logical(ix)) ix <- seq_along(object) %in% ix
    replace(object, !ix, as.list(fill)[[1L]])
  } else {
    fill <- rep(as.list(fill), length = 3L)
    if (identical(fill[[2L]], "extend")) 
      stop("fill[[2L]] cannot be 'extend'")
    ix <- if (is.logical(ix)) rep(ix, length = length(object)) else seq_along(object) %in% ix
    wx <- which(ix)
    if (length(wx) == 0L) {
      object[] <- fill[[2L]]
      object
    } else {
      rng <- range(wx)
      if (identical(fill[[1L]], "extend")) fill[[1L]] <- object[rng[1L]]
      if (identical(fill[[3L]], "extend")) fill[[3L]] <- object[rng[2L]]
      fill_lens <- lengths(fill)
      pre <- seq_along(ix) < rng[1L]
      post <- seq_along(ix) > rng[2L]
      if (fill_lens[2L]) object[!ix] <- fill[[2L]]
      if (fill_lens[1L]) object[pre] <- fill[[1L]]
      if (fill_lens[3L]) object[post] <- fill[[3L]]
      omit <- (pre & !fill_lens[1L]) |
              (!pre & !post & !ix & !fill_lens[2L]) |
              (post & !fill_lens[3L])
      object <- object[!omit]
      if (sum(omit)) structure(object, na.action = which(omit)) else object
    }
  }
}
na.fill <- function(object, fill, ...) UseMethod("na.fill")
na.fill.zoo <- function(object, fill, ix, ...) {
	if (length(dim(object)) == 2 && NCOL(object) > 1) {
		ixmiss <- missing(ix)
		L <- lapply(1:NCOL(object), 
				function(i) {
					if (ixmiss) ix <- !is.na(object[,i])
					na.fill(object[,i], fill, ix, ...)
				})
		out <- do.call("merge", c(L, all = FALSE))
		colnames(out) <- colnames(object)
		return(out)
	}
	if (missing(ix)) ix <- !is.na(object)
	if ((is.logical(ix) && any(ix)) || (!is.logical(ix) && length(ix))) {
		n <- length(object)
		# integer indexes for output points which are present
		wix <- if (is.logical(ix)) which(ix) else ix
		# min and max integer index
		wx.min <- head(wix, 1) 
		wx.max <- tail(wix, 1)
		# similar to wrng <- wx.min:wx.max
		wrng <- seq(wx.min, length.out = wx.max - wx.min + 1)
		# recycle to length 3
		fill <- rep(as.list(fill), length.out = 3)
		# we will be coercing fill values to the class of coredata(data).
		# This allows fill=c("extend", NA) to work even though NA is coerced to
		#  a character NA.
		as.cls <- if (is.integer(coredata(object))) {
		  as.integer
		} else if(is.numeric(coredata(object))) {
		  as.numeric
		} else if(is.character(coredata(object))) {
		  as.character
                } else {
		  as.logical
		}
		fill <- lapply(fill, function(x) if (is.character(x) &&
			pmatch(x, "extend", nomatch = 0)) "extend" else as.cls(x))
		# fill points on left
		if (length(fill[[1]]) > 0) 
			if (!is.null(fill[[1]])) object[seq_len(wx.min - 1)] <- 
				if (is.character(fill[[1]]) && !is.na(fill[[1]]) && fill[[1]] == "extend")
						object[[wx.min]] else fill[[1]]
		# fill intermediate points
		# - this is for zoo method, for zooreg method it would be possible to
		#   perform linear interpolation in proportion to time rather than
		#   in proportion to the integer index
		if (length(fill[[2]]) > 0) {
			if (is.character(fill[[2]]) && !is.na(fill[[2]]) && fill[[2]] == "extend") object[wrng] <- 
					# as.list(approx(wix, unlist(object[wix]), xout = wrng)$y)
					approx(wix, unlist(object[wix]), xout = wrng)$y
			else object[intersect(which(!ix), wrng)] <- fill[[2]]
		}
		# fill points on right
		if (length(fill[[3]]) > 0) 
			object[seq(wx.max + 1, length.out = n - wx.max)] <- 
				if (is.character(fill[[3]]) && !is.na(fill[[3]]) && fill[[3]] == "extend")
						object[[wx.max]] else fill[[3]]
		keep <- seq_len(n)
		if (length(fill[[1]]) == 0) keep <- unique(pmax(wx.min, keep))
		if (length(fill[[2]]) == 0) {
			wrng <- seq(wx.min, length.out = wx.max - wx.min + 1)
			keep <- setdiff(keep, intersect(which(!ix), wrng))
		}
		if (length(fill[[3]]) == 0) keep <- unique(pmin(wx.max, keep)) 
		return(object[keep, , drop = is.null(dim(object))])
	} else if(length(fill)) {
	  fill <- unlist(fill[1])[1]
	  object[is.na(object)] <- if(!is.na(fill) && fill == "extend") NA else fill
	  return(object)
	}
}
na.fill.default <- function(object, fill, ix, ...) {
	coredata(na.fill(zoo(object), fill, ix, ...))
}
	
na.fill.ts <- function(object, fill, ix, ...) {
	as.ts(na.fill(as.zoo(object), fill, ix, ...))
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/na.fill.R 
 | 
					
	na.locf0 <- function(object, fromLast = FALSE, maxgap = Inf, coredata = NULL) {
  if(is.null(coredata)) coredata <- inherits(object, "ts") || inherits(object, "zoo") || inherits(object, "its") || inherits(object, "irts")
  if(coredata) {
    x <- object
    object <- if (fromLast) rev(coredata(object)) else coredata(object)
  } else {
    if(fromLast) object <- rev(object)
  }
  ok <- which(!is.na(object))
  if(is.na(object[1L])) ok <- c(1L, ok)
  gaps <- diff(c(ok, length(object) + 1L))
  object <- if(any(gaps > maxgap)) {
    .fill_short_gaps(object, rep(object[ok], gaps), maxgap = maxgap)
  } else {
    rep(object[ok], gaps)
  }
  if (fromLast) object <- rev(object)
  if(coredata) {
    x[] <- object
    return(x)
  } else {
    return(object)
  }
}
na.locf <- function(object, na.rm = TRUE, ...)
	UseMethod("na.locf")
na.locf.default <- function(object, na.rm = TRUE, fromLast, rev, maxgap = Inf, rule = 2, ...) {
	L <- list(...)
	if ("x" %in% names(L) || "xout" %in% names(L)) {
		if (!missing(fromLast)) {
			stop("fromLast not supported if x or xout is specified")
		}
		return(na.approx(object, na.rm = na.rm, 
			maxgap = maxgap, method = "constant", rule = rule, ...))
	}
   	if (!missing(rev)) {
	   warning("na.locf.default: rev= deprecated. Use fromLast= instead.")
	   if (missing(fromLast)) fromLast <- rev
	} else if (missing(fromLast)) fromLast <- FALSE
	rev <- base::rev
	object[] <- if (length(dim(object)) == 0)
		na.locf0(object, fromLast = fromLast, maxgap = maxgap)
	else
		apply(object, length(dim(object)), na.locf0, fromLast = fromLast, maxgap = maxgap)
	if (na.rm) na.trim(object, is.na = "all") else object
}
na.locf.data.frame <- function(object, na.rm = TRUE, fromLast = FALSE, maxgap = Inf, ...)
{
    object[] <- lapply(object, na.locf0, fromLast = fromLast, maxgap = maxgap)
    if (na.rm) na.omit(object) else object
}
na.contiguous.data.frame <-
na.contiguous.zoo <- function(object, ...) 
{
    if (length(dim(object)) == 2) 
        good <- apply(!is.na(object), 1, all)
    else good <- !is.na(object)
    if (!sum(good)) 
        stop("all times contain an NA")
    tt <- cumsum(!good)
    ln <- sapply(0:max(tt), function(i) sum(tt == i))
    seg <- (seq_along(ln)[ln == max(ln)])[1] - 1
    keep <- (tt == seg)
    st <- min(which(keep))
    if (!good[st]) 
        st <- st + 1
    en <- max(which(keep))
    omit <- integer(0)
    n <- NROW(object)
    if (st > 1) 
        omit <- c(omit, 1:(st - 1))
    if (en < n) 
        omit <- c(omit, (en + 1):n)
    cl <- class(object)
    if (length(omit)) {
        object <- if (length(dim(object))) 
            object[st:en, ]
        else object[st:en]
        attr(omit, "class") <- "omit"
        attr(object, "na.action") <- omit
        if (!is.null(cl)) 
            class(object) <- cl
    }
    object
}
na.contiguous.list <- function(object, ...)
	lapply(object, na.contiguous)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/na.locf.R 
 | 
					
	na.spline <- function(object, ...) UseMethod("na.spline")
na.spline.zoo <- function(object, x = index(object), xout, ..., na.rm = TRUE, maxgap = Inf, along) {
    if (!missing(along)) {
        warning("along to be deprecated - use x instead")
        if (missing(x)) x <- along
    }
    missing.xout <- missing(xout) || is.null(xout)
    if (is.function(x)) x <- x(index(object))
    if (!missing.xout && is.function(xout)) xout <- xout(index(object))
    order.by <- if (missing.xout) index(object) else xout
    xout <- if (missing.xout) x else xout
    if (missing.xout || identical(xout, index(object))) {
        result <- object
    } else {
        object.x <- object
        if (!identical(class(x), class(xout))) {
            index(object.x) <- as.numeric(x)
            xout <- as.numeric(xout)
        } else {
            index(object.x) <- x
        }
        objectm <- merge(object.x, zoo(, xout))
        if (length(dim(objectm)) == 2) colnames(objectm) <- colnames(object)
        result <- window(objectm, index = xout)
    }
    result[] <- na.spline.default(object, x = x, xout = xout, na.rm = FALSE, ..., maxgap = maxgap)
    if ((!missing(order.by) && !is.null(order.by)) || !missing.xout) {
        index(result) <- order.by
    }
    if (na.rm) {
        result <- na.trim(result, is.na = "all", maxgap = maxgap)
    }
    result
}
na.spline.zooreg <- function(object, ...) {
    object. <- structure(object, class = setdiff(class(object), "zooreg"))
    as.zooreg(na.spline(object., ...))
}
na.spline.default <- function(object, x = index(object), xout = x, ..., na.rm = TRUE, maxgap = Inf, along) {
    if (!missing(along)) {
        warning("along to be deprecated - use x instead")
        if (missing(x)) x <- along
    }
    na.spline.vec <- function(x, y, xout = x, ...) {
        na <- is.na(y)
	if(sum(!na) < 1L) {
	    ## splinefun() cannot be applied here, hence simply:
	    yf <- rep.int(NA, length(xout))
	    mode(yf) <- mode(y)
	    if(any(!na)) {
	        if(x[!na] %in% xout) {
		    yf[xout == x[!na]] <- y[!na]
		}
	    }
	    return(yf)
	}
	if(all(!na) && (length(xout) > maxgap) && !all(xout %in% x)) {
	    ## for maxgap to work correctly 'y' has to contain
	    ## actual NAs and be expanded to the full x-index
	    xf <- sort(unique(c(x, xout)))
	    yf <- rep.int(NA, length(xf))
	    yf[MATCH(x, xf)] <- y
	    x <- xf
	    y <- yf
	}
        yf <- splinefun(x[!na], y[!na], ...)(xout)
        if (maxgap < length(y)) {
            ## construct a series like y but with only gaps > maxgap
            ## (actual values don't matter as we only use is.na(ygap) below)
            ygap <- .fill_short_gaps(y, seq_along(y), maxgap = maxgap)
            ## construct y values at 'xout', keeping NAs from ygap
            ## (using indexing, as approx() does not allow NAs to be propagated)
            ix <- splinefun(x, seq_along(y), ...)(xout)
            yx <- ifelse(is.na(ygap[floor(ix)] + ygap[ceiling(ix)]), NA, yf)
            yx
        } else {
            yf
        }
    }
    if (!identical(length(x), length(index(object)))) {
        stop("x and index must have the same length")
    }
    x. <- as.numeric(x)
    if (missing(xout) || is.null(xout)) xout <- x.
    xout. <- as.numeric(xout)
    object. <- coredata(object)
    result <- if (length(dim(object.)) < 2) {
        na.spline.vec(x., coredata(object.), xout = xout., ...)
    } else {
        apply(coredata(object.), 2, na.spline.vec, x = x., xout = xout., ...)
    }
    if (na.rm) {
        result <- na.trim(result, is.na = "all", maxgap = maxgap)
    }
    result
}
na.spline.ts <- function(object, ...) {
    as.ts(na.spline(as.zoo(object), ...))
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/na.spline.R 
 | 
					
	na.trim <- function(object, ...) UseMethod("na.trim")
na.trim.default <- function (object, sides = c("both", "left", "right"), 
	is.na = c("any", "all"), maxgap = Inf, ...)
{
   is.na <- match.arg(is.na, c("any", "all"))
   nisna <- if (is.na == "any" || length(dim(object)) < 2L)  {
	complete.cases(object)
   } else rowSums(!is.na(object)) > 0
   rlength <- function(x) if(all(!x)) length(x) else min(which(x)) - 1L
   idx <- switch(match.arg(sides),
       left = {
           idx0 <- cumsum(nisna) > 0
	   idx0 | rlength(idx0) > maxgap
       },
       right = {
           idx0 <- cumsum(rev(nisna) > 0) > 0
	   rev(idx0) | rlength(idx0) > maxgap
       },
       both = {
           idx0l <- cumsum(nisna) > 0
	   idx0r <- cumsum(rev(nisna) > 0) > 0
	   (idx0l | rlength(idx0l) > maxgap) & (rev(idx0r) | rlength(idx0r) > maxgap)
       }
   )
   if (length(dim(object)) < 2L)
       object[idx]
   else
       object[idx,, drop = FALSE]
}
## need a 'ts' method because indexing destroys ts attributes
na.trim.ts <- function (object, ...)
{
    as.ts(na.trim(as.zoo(object), ...))
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/na.trim.R 
 | 
					
	make.par.list <- function(nams, x, n, m, def, recycle = sum(unnamed) > 0) {
##FIXME: should defaults for n, m, def be available?
# if nams are the names of our variables and x is a parameter
# specification such as list(a = c(1,2), c(3,4)) then 
# create a new list which uses the named variables from x
# and assigns the unnamed in order.  For the remaining variables
# assign them the default value if recycle = FALSE or recycle the
# unnamed variables if recycle = TRUE.  The default value for
# recycle is TRUE if there is at least one unnamed variable
# in x and is false if there are only named variables in x.
# n is the length of the series and m is the total number of series
# It only needs to know whether m is 1 or greater than m.
# def is the default value used when recycle = FALSE
# recycle = TRUE means recycle unspecified values
# recycle = FALSE means replace values for unspecified series with def
# Within series recycling is done even if recycle=FALSE.
  # Should we allow arbirary names in 1d case?
  # if (m > 1) stopifnot(all(names(x) %in% c("", nams)))
  if (!is.list(x)) x <- if (m == 1) list(x) else as.list(x)
  y <- vector(mode = "list", length = length(nams))
  names(y) <- nams
  in.x <- nams %in% names(x)
  unnamed <- if (is.null(names(x))) rep(TRUE, length(x)) else names(x) == ""
  if (!recycle) y[] <- def
  y[in.x] <- x[nams[in.x]]
  if (recycle) {
    stopifnot(sum(unnamed) > 0)
    y[!in.x] <- rep(x[unnamed], length.out = sum(!in.x)) ## CHECK, this was: x[unnamed]
  } else {
    y[which(!in.x)[seq_len(sum(unnamed))]] <- x[unnamed]
  }
  lapply(y, function(y) if (length(y)==1) y else rep(y, length.out = n))
}
plot.zoo <- function(x, y = NULL, screens, plot.type, panel = lines, 
  xlab = "Index", ylab = NULL, main = NULL, xlim = NULL, ylim = NULL,
  xy.labels = FALSE, xy.lines = NULL, yax.flip = FALSE,
  oma = c(6, 0, 5, 0), mar = c(0, 5.1, 0, if(yax.flip) 5.1 else 2.1), 
  col = 1, lty = 1, lwd = 1, pch = 1, type = "l", log = "",
  nc, widths = 1, heights = 1, ...)
{
  ## if y supplied: scatter plot y ~ x
  if(!is.null(y)) {
    if(NCOL(x) > 1 || NCOL(y) > 1) stop("scatter plots only for univariate zoo series")
    xyzoo <- merge.zoo(x, y, all = FALSE)
    xy <- coredata(xyzoo)
    xy <- xy.coords(xy[,1], xy[,2])
    xlab <- if(missing(xlab)) deparse(substitute(x)) else xlab
    ylab <- if(missing(ylab)) deparse(substitute(y)) else ylab
    xlim <- if(is.null(xlim)) range(xy$x[is.finite(xy$x)]) else xlim
    ylim <- if(is.null(ylim)) range(xy$y[is.finite(xy$y)]) else ylim
    if(is.null(main)) main <- ""
    do.lab <- if(is.logical(xy.labels)) xy.labels else TRUE
    if(is.null(xy.lines)) xy.lines <- do.lab
    ptype <- if(do.lab) "n" else if(missing(type)) "p" else type
    plot.default(xy, type = ptype,col = col, pch = pch, main = main,
      xlab = xlab, ylab = ylab, xlim = xlim, ylim = ylim, log = log, ...)
    if(do.lab) text(xy, col = col,
      labels = if(!is.logical(xy.labels)) xy.labels else index2char(index(xyzoo)), ...)
    if(xy.lines) lines(xy, col = col, lty = lty, lwd = lwd, type = if(do.lab) "c" else "l", ...)
    return(invisible(xyzoo))
  }
  ## Else : no y, only x
  recycle <- function(a, len, nser)
     rep(lapply(as.list(a), rep, length.out = len), length.out = nser)
  # same as range except it passes pairs through
  range2 <- function(x, ...) if (length(x) == 2) x else range(x, ...)
  if (missing(plot.type)) {
	plot.type <- if (missing(screens)) "multiple"
	else if (length(unique(screens) == 1)) "single" 
	else "multiple"
  }
  plot.type <- match.arg(plot.type, c("multiple", "single"))
  nser <- NCOL(x)
  if (missing(screens)) {
	screens <- if (plot.type == "single") 1 else seq_len(nser)
  }	
  dots <- list(...)
  x.index <- index(x)
  if(is.ts(x.index)) x.index <- as.vector(x.index)
  cn <- if (is.null(colnames(x))) paste("V", seq_len(nser), sep = "")
	  else colnames(x)
  screens <- make.par.list(cn, screens, NROW(x), nser, 1)
  screens <- as.factor(unlist(screens))[drop = TRUE]
  ngraph <- length(levels(screens))
  if(nser > 1 && (plot.type == "multiple" || ngraph > 1)) {
    if (ngraph == 1) { 
	screens <- as.factor(seq(nser))
	ngraph <- nser
    }
    if(is.null(main)) main <- deparse(substitute(x))
    main.outer <- TRUE
    if(is.null(ylab)) ylab <- colnames(x)[!duplicated(screens)]
    if(is.null(ylab)) ylab <- paste("Series", which(!duplicated(screens)))
    if(is.call(ylab)) ylab <- as.expression(ylab)
    ylab <- rep(ylab, length.out = ngraph)
    if(!is.list(ylab)) ylab <- as.list(ylab)
    lty <- rep(lty, length.out = nser)
    lwd <- rep(lwd, length.out = nser)
    col <- make.par.list(cn, col, NROW(x), nser, 1)
    pch <- make.par.list(cn, pch, NROW(x), nser, par("pch"))
    type <- make.par.list(cn, type, NROW(x), nser, "l")
    if (!is.null(ylim)) {
        if (is.list(ylim)) ylim <- lapply(ylim, range2, na.rm = TRUE)
	else ylim <- list(range2(ylim, na.rm = TRUE))
	ylim <- lapply(make.par.list(cn, ylim, 2, nser, NULL), function(x) 
		if (is.null(x) || length(na.omit(x)) == 0) NULL 
		else range2(x, na.rm = TRUE))
    }
    panel <- match.fun(panel)
    if(missing(nc)) nc <- if(ngraph >  4) 2 else 1
    oldpar <- par(no.readonly = TRUE)
    on.exit({ par(oldpar) })
    nr <- ceiling(ngraph / nc)
    layout(matrix(seq(nr*nc), nr), widths = widths, heights = heights)
    par(mar = mar, oma = oma)
	# TRUE if all elements of L are the same -- else FALSE
	allsame <- function(L) {
		f <- function(x, y) if (identical(x, y)) x
		!is.null(Reduce(f, L))
	}
	# idx is vector of indices into ylim.  
	# If the entries indexed by it are all the same then use that common value;
	# otherwise, if the ylim are specified use the range of the ylim values;
	# otherwise, use the range of the data
	f <- function(idx) if (allsame(ylim)) ylim[idx][[1]]
		else if (!is.null(ylim) && length(idx) > 0 && 
			length(unlist(ylim[idx])) > 0) range(ylim[idx], finite = TRUE)
		else range(x[, idx], na.rm = TRUE)
	# ranges is indexed by screen
	ranges <- tapply(1:ncol(x), screens, f)
    for(j in seq_along(levels(screens))) {
      panel.number <- j
      y.side <- if (j %% 2 || !yax.flip) 2 else 4
      range. <- rep(ranges[[j]], length.out = length(time(x)))
      if(j%%nr==0 || j == length(levels(screens))) {
			args <- list(x.index, range., xlab = "", ylab = "", yaxt = "n",
				xlim = xlim, ylim = ylim[[j]], log = log, ...)
			args$type <- "n"
			do.call("plot", args)
			mtext(xlab, side = 1, line = 3)
      } else {      
			args <- list(x.index, range., xaxt = "n", yaxt = "n", xlab = "", 
				ylab = "", xlim = xlim, ylim = ylim[[j]], log = log, ...)
			args$type <- "n"
			do.call("plot", args)
			if ("bty" %in% names(args) && args$bty == "n") {} else box()
      }
      do.call("axis", c(list(side = y.side, xpd = NA), dots))
      mtext(ylab[[j]], y.side, line = 3)
      for(i in which(screens == levels(screens)[j])) {
        ## for potential usage in panel function
        series.number <- i
        series.within.screen <- ave(seq_along(screens), screens, FUN = seq_along)[series.number]
       
        ## draw individual lines/points with panel function
        panel(x.index, x[, i], col = col[[i]], pch = pch[[i]], lty = lty[i], lwd = lwd[i], type = type[[i]], ...)
      }
    }
  } else {
    if(is.null(ylab)) ylab <- deparse(substitute(x))
    if(is.call(ylab)) ylab <- as.expression(ylab)
    if(is.null(main)) main <- ""
    main.outer <- FALSE
    if(is.null(ylim)) ylim <- range(x, na.rm = TRUE)
	else ylim <- range2(c(ylim, recursive = TRUE), na.rm = TRUE)
    lty <- rep(lty, length.out = nser)
	lwd <- rep(lwd, length.out = nser)
    col <- make.par.list(cn, col, NROW(x), nser, 1)
    pch <- make.par.list(cn, pch, NROW(x), nser, par("pch"))
    type <- make.par.list(cn, type, NROW(x), nser, "l")
   
    dummy <- rep(range(x, na.rm = TRUE), 
	length.out = length(index(x)))
	    
    args <- list(x.index, dummy, xlab = xlab, ylab = ylab[1], ylim = ylim, xlim = xlim, log = log, ...)
    args$type <- "n"
    do.call("plot", args)
	if ("bty" %in% names(args) && args$bty == "n") {} else box()
    y <- as.matrix(x)
    for(i in 1:nser) {
      panel(x.index, y[, i], col = col[[i]], pch = pch[[i]], lty = lty[i], 
        lwd = lwd[i], type = type[[i]], ...)
    }
  }
  dots <- list(...)
  title.args <- c(list(main = main, outer = main.outer),
    dots[grep("[.]main$", names(dots))])
  do.call("title", title.args)
  return(invisible(x))
}
lines.zoo <- function(x, y = NULL, type = "l", ...)
{
  if (is.null(y)) {
     if(NCOL(x) == 1) lines(index(x), x, type = type, ...)
       else stop("Can't plot lines for multivariate zoo object")
  } else
     lines(coredata(cbind(x,y)), type = type, ...)
}
points.zoo <- function(x, y = NULL, type = "p", ...)
  lines(x, y, type = type, ...)
plot.tis <- function(x, ...) eval.parent(substitute(plot(as.zoo(x), ...)))
plot.ti <- function (x, y, xlab = "", ...) 
{
	x <- tis::POSIXct(x)
	NextMethod()
}
points.ti <- lines.ti <- function(x, ...) {
	x <- tis::POSIXct(x)
	NextMethod()
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/plot.zoo.R 
 | 
					
	read.zoo <- function(file, format = "", tz = "", FUN = NULL,
  regular = FALSE, index.column = 1, drop = TRUE, FUN2 = NULL, 
  split = NULL, aggregate = FALSE, ..., text, read = read.table)
{
  if (missing(file) && !missing(text)) {
        file <- textConnection(text)
        on.exit(close(file))
  }
  ## if file is a vector of file names
  if (is.character(file) && length(file) > 1) {
	mc <- match.call()
	pf <- parent.frame()
	L <- sapply(file, function(file) eval(replace(mc, 2, file), pf), 
			simplify = FALSE)
	return(do.call("merge", L))
  }
  ## read data
  rval <- if (is.data.frame(file)) {
    if(inherits(file, "tbl")) as.data.frame(file) else file
  } else {
    read(file, ...)
  }
  ## if time index appears to be already processed, use FUN = identity
  if (is.data.frame(file) && 
      length(index.column) == 1 && 
      !is.character(rval[[index.column]]) &&
      !is.factor(rval[[index.column]]) &&
      missing(tz) &&
      missing(format) &&
      missing(FUN)) FUN <- identity
  ## if time index is POSIXlt it is coerced to POSIXct
  if (is.data.frame(file) && 
      length(index.column) == 1 && 
      inherits(rval[[index.column]], "POSIXlt")) rval[[index.column]] <- as.POSIXct(rval[[index.column]])
  # returns TRUE if a formal argument x has no default
  no.default <- function(x) typeof(x) %in% c("symbol", "language")
  if (is.null(FUN) && is.null(FUN2)) {
	index.column <- as.list(index.column)
  } else if (identical(FUN, paste)) {
	index.column <- as.list(index.column)
  } else if (is.null(FUN) && identical(FUN2, paste)) {
	index.column <- as.list(index.column)
  } else if (!is.null(FUN) && !is.list(index.column) && length(index.column) <=
		length(sapply(formals(match.fun(FUN)), no.default))) {
	index.column <- as.list(index.column)
  } else if (is.null(FUN) && !is.null(FUN2) && length(index.column) <= 
		length(sapply(formals(match.fun(FUN2)), no.default))) {
	index.column <- as.list(index.column)
  }
  if (is.list(index.column) && length(index.column) == 1 && 
	index.column[[1]] == 1) index.column <- unlist(index.column)
  is.index.column <- unname(unlist(index.column))
  is.index.column <- if(is.numeric(is.index.column)) {
     seq_along(rval) %in% is.index.column
  } else {
     seq_along(rval) %in% is.index.column | names(rval) %in% is.index.column
 }
  name.to.num <- function(x) if (is.character(x))
		match(x, names(rval), nomatch = 0) else x
  index.column <- if (is.list(index.column)) index.column <- 
	lapply(index.column, name.to.num)
  else name.to.num(index.column)
  ## convert factor columns in index to character
  is.fac <- sapply(rval, is.factor)
  is.fac.index <- is.fac & is.index.column
  if (any(is.fac.index)) rval[is.fac.index] <- 
	lapply(rval[is.fac.index], as.character)
  ## if file does not contain index or data
  if(NROW(rval) < 1) {
    if(is.data.frame(rval)) rval <- as.matrix(rval)
    if(NCOL(rval) > 1) rval <- rval[, ! is.index.column, drop = drop]
    rval2 <- zoo(rval)
    return(rval2)
  }
  ## extract index
  if(NCOL(rval) < 1) stop("data file must specify at least one column")
  
  ## extract index, retain rest of the data
  ix <- if (identical(index.column, 0) || identical(index.column, list(0)) ||
	identical(index.column, 0L) || identical(index.column, list(0L))) {
	attributes(rval)$row.names
  } else if (is.list(index.column)) {
	sapply(index.column, function(j) rval[,j], simplify = FALSE)
  } else rval[,index.column]
  # split. is col number of split column (or Inf, -Inf or NULL)
  split. <- if (is.character(split)) match(split, colnames(rval), nomatch = 0)
  else split
  rval2 <- if (is.null(split.)) {
    rval[ , ! is.index.column, drop = FALSE]
  } else {
     split.values <- if (is.character(split) || all(is.finite(split))) rval[, split]
	 else {
		# Inf: first value in each run is first series, etc.
	    # -Inf: last value in each run is first series, etc.
		if (identical(split, Inf)) ave(ix, ix, FUN = seq_along)
	    else if (identical(split, -Inf)) ave(ix, ix, FUN = function(x) rev(seq_along(x)))
	    else ix
	 }
	 if (0 %in% split.) {
		stop(paste("split:", split, "not found in colnames:", colnames(rval)))
	 }
	 rval[,-c(if (all(is.finite(split.))) split. else 0, which(is.index.column)), drop = FALSE]
  }
  if(is.factor(ix)) ix <- as.character(ix)
  rval3 <- if(is.data.frame(rval2)) as.matrix(rval2) else  if(is.list(rval2)) t(rval2) else rval2
  
  if(NCOL(rval3) == 1 && drop) rval3 <- drop(rval3)
    
  ## index transformation functions
  toDate <- if(missing(format) || is.null(format)) {
     function(x, ...) as.Date(format(x, scientific = FALSE))
  } else {
     function(x, format) {
       if(any(sapply(c("%H", "%M", "%S"), function(y) grepl(y, format, fixed = TRUE)))) {
         warning("the 'format' appears to be for a date/time, please specify 'tz' if you want to create a POSIXct time index")
       }
       as.Date(format(x, scientific = FALSE), format = format)
     }
  }
  toPOSIXct <- if (missing(format) || is.null(format)) {
        function(x, tz) as.POSIXct(format(x, scientific = FALSE), tz = tz)
  } else function(x, format, tz) {
        as.POSIXct(strptime(format(x, scientific = FALSE), tz = tz, format = format))
  }
  toDefault <- function(x, ...) {
    rval. <- try(toPOSIXct(x, tz = ""), silent = TRUE)
    if(inherits(rval., "try-error"))
      rval. <- try(toDate(x), silent = TRUE)
    else {
      hms <- as.POSIXlt(rval.)
      hms <- hms$sec + 60 * hms$min + 3600 * hms$hour
      if(isTRUE(all.equal(hms, rep.int(hms[1], length(hms))))) {
        rval2. <- try(toDate(x), silent = TRUE)
        if(!inherits(rval2., "try-error")) rval. <- rval2.
      }
    }
    if(inherits(rval., "try-error")) rval. <- rep(NA, length(x))
    return(rval.)
  }
  toNumeric <- function(x, ...) x
  
  ## setup default FUN
  if ((missing(FUN) || is.null(FUN)) && !missing(FUN2) && !is.null(FUN2)) {
	FUN <- FUN2
	FUN2 <- NULL
  }
  FUN0 <- NULL
  if(is.null(FUN)) {
	if (is.list(index.column)) FUN0 <- paste
    FUN <- if (!missing(tz) && !is.null(tz)) toPOSIXct
        else if (!missing(format) && !is.null(format)) toDate
        else if (is.numeric(ix)) toNumeric
        else toDefault
  }
  FUN <- match.fun(FUN)
 processFUN <- function(...) {
	if (is.data.frame(..1)) FUN(...)
	else if (is.list(..1)) {
		if (is.null(FUN0)) do.call(FUN, c(...))
		else {
			L <- list(...)
			L[[1]] <- do.call(FUN0, L[[1]])
			do.call(FUN, L)
		}
	} else FUN(...)
  }
  ## compute index from (former) first column
  ix <- if (missing(format) || is.null(format)) {
    if (missing(tz) || is.null(tz)) processFUN(ix) else processFUN(ix, tz = tz)
  } else {
    if (missing(tz) || is.null(tz)) processFUN(ix, format = format) 
    else processFUN(ix, format = format, tz = tz)
  }
  if (!is.null(FUN2)) {
	FUN2 <- match.fun(FUN2)
	ix <- FUN2(ix)
  }
  
  ## sanity checking
  if(anyNA(ix)) {
    idx <- which(is.na(ix))
	msg <- if (length(idx) == 1)
		paste("index has bad entry at data row", idx)
	else if (length(idx) <= 100)
		paste("index has bad entries at data rows:", paste(idx, collapse = " "))
	else paste("index has", length(idx), "bad entries at data rows:", 
		paste(head(idx, 100), collapse = " "), "...")
	stop(msg)
  }
  if(length(ix) != NROW(rval3)) stop("index does not match data")
  
  ## setup zoo object and return 
  ## Suppress duplicates warning if aggregate specified
  if(identical(aggregate, TRUE)) {
    agg.fun <- mean
  } else if(identical(aggregate, FALSE)) {
    agg.fun <- NULL
  } else {
    agg.fun <- match.fun(aggregate)
    if(!is.function(agg.fun)) stop(paste("invalid specification of", sQuote("aggregate")))
  }
  remove(list = "aggregate")
  if (is.null(split)) {
	rval4 <- if (!is.null(agg.fun)) aggregate(zoo(rval3), ix, agg.fun)
	else zoo(rval3, ix)
    rval8 <- if(regular && is.regular(rval4)) as.zooreg(rval4) else rval4
  } else {
	split.matrix <- split.data.frame
	rval4 <- split(rval3, split.values)
	ix <- split(ix, split.values)
	# rval5 <- mapply(zoo, rval4, ix)
    rval5 <- if (!is.null(agg.fun)) {
		lapply(seq_along(rval4), function(i) {
			aggregate(zoo(rval4[[i]]), ix[[i]], agg.fun)
		})
	} else lapply(seq_along(rval4), function(i) zoo(rval4[[i]], ix[[i]]))
	names(rval5) <- names(rval4)
    rval6 <- if(regular) {
		lapply(rval5, function(x) if (is.regular(x)) as.zooreg(x) else x)
	} else rval5
	rval8 <- do.call(merge, rval6)
  }
	
  return(rval8)
}
read.table.zoo <- function(file, format = "", tz = "", FUN = NULL,
  regular = FALSE, index.column = 1, drop = TRUE, FUN2 = NULL, 
  split = NULL, aggregate = FALSE, ...)
{
  file <- read.table(file, ...)
  read.zoo(file, format = format, tz = tz, FUN = FUN, regular = regular,
    index.column = index.column, drop = drop, FUN2 = FUN2, 
    split = split, aggregate = aggregate)
  
}
read.csv.zoo <- function(..., read = read.csv) {
  read.zoo(..., read = read)
}
read.csv2.zoo <- function(..., read = read.csv2) {
  read.zoo(..., read = read)
}
read.delim.zoo <- function(..., read = read.delim) {
  read.zoo(..., read = read)
}
read.delim2.zoo <- function(..., read = read.delim2) {
  read.zoo(..., read = read)
}
write.zoo <- function(x, file = "", index.name = "Index",
  row.names = FALSE, col.names = NULL, ...)
{
  if(is.null(col.names)) col.names <- !is.null(colnames(x))
  dx <- as.data.frame(x)  
  stopifnot(all(names(dx) != index.name))
  dx[[index.name]] <- as.character(index(x))
  dx <- dx[, c(ncol(dx), 1:(ncol(dx)-1))]
  write.table(dx, file = file, row.names = row.names, col.names = col.names, ...)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/read.zoo.R 
 | 
					
	
# data is a zoo object or a plain vector or matrix
#
# width is 
# - a list of integer vectors representing offsets or a plain vector of widths.
#   There is one per time point or its recycled if too short.  recycling uses
#   by= argument if length(width) is 1; otherwise, by is ignored.
#   If width represents widths then they are turned into offsets using align.
#
# If we are at 5th time of data and width[[5]] is c(-2,-1,0) then FUN is applied
#   to positions i + width[[i]] = 5 + c(-2,-1,0)  = 3:5 of z (so in terms of
#   a width specification it would be the same as width = 3, align = "right").
#
# Therefore we have the following transformations:
#   widths are converted to offsets which are converted to positions.  
#   The offsets are the components of width and the 
#   positions are i+width[[i]] after partial processing.  partial can be:
#   - logical.  FALSE means that all offets must exist or else no result is 
#     produced for that time point.  TRUE means that at least one offset must 
#     exist.
#   - numeric.  The minimum number of offsets that must exist.  If < 0 then
#     all elements of the offset must exist.  Note that TRUE corresponds to 1
#     and FALSE correspoinds to -1.  These are the two most common values.
#
# For points that are not computed they are filled in with fill.  fill has 
#   three elements and is recycled if too short.   fill = NULL is the default.
#   The elements represent what to fill the left points, interior points and
#   right points.  NULL causes no filling and "extend" causes the first or
#   last point to be repeated or interior points to be linearly approximated.
# wrapper around rollapply which defaults to align = "right"
rollapplyr <- function(..., align = "right") {
	rollapply(..., align = align)
}
rollapply <- function(data, ...) UseMethod("rollapply")
rollapply.default <- function(data, ...) {
        if (length(data) < 1L) return(data)
	coredata(rollapply(zoo(data), ...))
}
rollapply.ts <- function(data, ...) {
        if (length(data) < 1L) return(data)
	as.ts(rollapply(as.zoo(data), ...))
}
rollapply.zoo <- function(data, width, FUN, ..., by = 1, 
	by.column = TRUE, fill = if (na.pad) NA, na.pad = FALSE,
	partial = FALSE, align = c("center", "left", "right"), coredata = TRUE) {
        if (length(data) < 1L) return(data)
	if (!missing(na.pad)) {
		warning("na.pad argument is deprecated")
	}
    if (is.vector(width) && !is.list(width) && length(width) == 1 &&
		by.column && length(by) == 1 && by == 1 && (missing(partial) | identical(partial, FALSE)) &&
		length(list(...)) < 1 && length(sw <- deparse(substitute(FUN))) == 1) {
		  if (sw == "mean" && !anyNA(data)) {
				return(rollmean(data, width, fill = fill, align = align))
		  } else if (sw == "median" && width %% 2 == 1 && !anyNA(data)) {
				return(rollmedian(data, width, fill = fill, align = align))
	      } else if (sw == "max") {
				return(rollmax(data, width, fill = fill, align = align))
	      }
	}
	FUN <- match.fun(FUN)
	if (by.column && length(dim(data)) == 2) {
		z <- do.call(merge,
			lapply(1:NCOL(data), function(j)
				rollapply(data[, j, drop = TRUE], width = width, FUN = FUN, ...,
					by = by, by.column = by.column, fill = fill,
					partial = partial, align = align, coredata = coredata)
			)
		)
		if (NCOL(data) == 1) dim(z) <- c(length(z), 1)
		colnames(z) <- if (NCOL(z) == NCOL(data)) colnames(data)
		return(z)
	}
	if (is.logical(partial)) partial <- if (partial) 1 else -1
	# convert widths to offsets using align
	align <- match.arg(align)
	width <- if (!is.list(width)) {
	  lapply(trunc(width), function(w) {
			if (align == "right") seq(to = 0, length.out = w)
			else if (align == "center") seq(to = floor(w/2), length.out = w)
			else seq(from = 0, length.out = w)
	  })
	} else {
	  lapply(width, function(w) {
	                if(is.null(w)) NULL else trunc(w)
	  })
	}
	# recycle width (using by if length(width) == 1)
	width <- if (length(width) == 1) {
		w <- rep(list(NULL), NROW(data))
		start.at <- if (partial < 0) max(-min(width[[1]]), 0) + 1 else 1
		start.at <- min(NROW(data), start.at)
		replace(w, seq(start.at, NROW(data), by = by), width)
	} else rep(width, length.out = NROW(data))
	f <- if (is.null(dim(data))) {
		# undimensioned
		#
		# if FUN is to be evaluated at offsets for the ith point then calculate
		# positions, do partial processing and apply FUN
		function(i, offsets, data, ...) { 
			if (is.null(offsets)) return(NULL)
			posns <- i + offsets
			ix <- posns >= 1 & posns <= NROW(data)
			if (partial < 0) {
				if (all(ix)) FUN(data[posns], ...)
			} else if (sum(ix) >= partial) {
				FUN(data[replace(posns, !ix, 0)], ...)
			}
		}
        } else {
		# dimensioned
		#
		# same f as in TRUE leg except data[.] becomes data[.,]
		function(i, offsets, data, ...) { 
			if (is.null(offsets)) return(NULL)
			posns <- i + offsets
			ix <- posns >= 1 & posns <= NROW(data)
			if (partial < 0) {
				if (all(ix)) FUN(data[posns,], ...)
			} else if (sum(ix) >= partial) {
				FUN(data[replace(posns, !ix, 0),], ...)
			}
		}
	}
        dat <- if(coredata) coredata(data) else data
	dat <- mapply(f, seq_along(time(data)), width, 
		MoreArgs = list(data = dat, ...), SIMPLIFY = FALSE) 
		
	ix <- !sapply(dat, is.null) # integer indexes of non-nulls
        ## flatten data if output of FUN has a dim and > 1 row (e.g., matrix or data.frame)
        if(any(sapply(dat[ix], function(d) !is.null(nrow(d)) && nrow(d) > 1L))) {
	    dat[ix] <- lapply(dat[ix], function(d) unlist(as.data.frame(d)))
	}
	if (!missing(fill) || !missing(na.pad)) {
		# replace NULLs with NAs
		dat <- lapply(dat, function(x) if (is.null(x)) NA else x)
		# construct zoo object
		dat <-
		if (max(sapply(dat, length)) > 1)
			zoo(do.call("rbind", dat), index(data), attr(data, "frequency"))
		else
			zoo(do.call("c", dat), index(data), attr(data, "frequency"))
		# perform filling
		dat <- na.fill(dat, fill, ix)
	} else {
		# construct zoo object removing points corresponding to NULL
		dat <- if (max(sapply(dat, length)) > 1)
			zoo(do.call("rbind", dat), index(data)[ix], attr(data, "frequency"))
		else
			zoo(do.call("c", dat), index(data)[ix], attr(data, "frequency"))
	}
	dat
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/rollapply.R 
 | 
					
	# rollmean, rollmax, rollmedian (, rollmad) based on code posted by Jarek Tuszynski at
# https://www.stat.math.ethz.ch/pipermail/r-help/2004-October/057363.html
# ToDo: rollmad, currently rollapply() can be used
rollmeanr <- function(..., align = "right") {
	rollmean(..., align = align)
}
rollmean <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...) {
	UseMethod("rollmean")
}
rollmean.zoo <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...) {
  if (length(x) < 1L) return(x)
  if (!missing(na.pad)) warning("na.pad is deprecated. Use fill.")
  align <- match.arg(align)
  n <- length(index(x))
  k <- trunc(k)
  if(k > n || anyNA(coredata(x))) return(rollapply(x, k, FUN = (mean), fill = fill, align = align, ...))
  if (length(dim(x)) == 2) {
	  # merge is the only zoo specific part of this method
	  
	  out <- do.call("merge", c(lapply(1:NCOL(x), function(i) {
		rollmean(x[, i, drop = TRUE], k, fill = fill, align = align, ...)
	  }), all = FALSE))
	  if (ncol(x) == 1) dim(out) <- c(length(out), 1)
	  colnames(out) <- colnames(x)
	  return(out)
  }
  ix <- switch(align,
      "left" = { 1:(n-k+1) },
      "center" = { floor((1+k)/2):ceiling(n-k/2) },
      "right" = { k:n })
  xu <- unclass(x)
  y <- xu[k:n] - xu[c(1, seq_len(n-k))] # difference from previous
  y[1] <- sum(xu[1:k])		 # find the first
  # sum precomputed differences
  rval <- cumsum(y)/k
  x[ix] <- rval
  na.fill(x, fill = fill, ix)
}
rollmean.default <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{		
		if (length(x) < 1L) return(x)
		coredata(rollmean(zoo(x), k, fill = fill, align = align, ...))
}
rollmean.ts <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
		if (length(x) < 1L) return(x)
		as.ts(rollmean(as.zoo(x), k, fill = fill, align = align, ...))
}
rollsumr <- function(..., align = "right") {
	rollsum(..., align = align)
}
rollsum <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...) {
	UseMethod("rollsum")
}
rollsum.zoo <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
  if (length(x) < 1L) return(x)
  if (!missing(na.pad)) warning("na.pad is deprecated. Use fill.")
  align <- match.arg(align)
  n <- length(index(x))
  k <- trunc(k)
  if(k > n || anyNA(coredata(x))) return(rollapply(x, k, FUN = (sum), fill = fill, align = align, ...))
  if (length(dim(x)) == 2) {
	  # merge is the only zoo specific part of this method
	  
	  out <- do.call("merge", c(lapply(1:NCOL(x), function(i) {
		rollsum(x[, i, drop = TRUE], k, fill = fill, align = align, ...)
	  }), all = FALSE))
	  if (ncol(x) == 1) dim(out) <- c(length(out), 1)
	  colnames(out) <- colnames(x)
	  return(out)
  }
  ix <- switch(align,
      "left" = { 1:(n-k+1) },
      "center" = { floor((1+k)/2):ceiling(n-k/2) },
      "right" = { k:n })
  xu <- unclass(x)
  y <- xu[k:n] - xu[c(1, seq_len(n-k))] # difference from previous
  y[1] <- sum(xu[1:k])		 # find the first
  # sum precomputed differences
  rval <- cumsum(y)
  x[ix] <- rval
  na.fill(x, fill = fill, ix)
}
rollsum.default <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
		if (length(x) < 1L) return(x)
		
		coredata(rollsum(zoo(x), k, fill = fill, align = align, ...))
}
rollsum.ts <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
		if (length(x) < 1L) return(x)
		as.ts(rollsum(as.zoo(x), k, fill = fill, align = align, ...))
}
rollmaxr <- function(..., align = "right") {
	rollmax(..., align = align)
}
rollmax <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...) {
	UseMethod("rollmax")
}
rollmax.zoo <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
  if (length(x) < 1L) return(x)
  if (!missing(na.pad)) warning("na.pad is deprecated. Use fill.")
  align <- match.arg(align)
  if (length(dim(x)) == 2) {
	  # merge is the only zoo specific part of this method
	  out <- do.call("merge", c(lapply(1:NCOL(x), function(i) {
		rollmax(x[, i, drop = TRUE], k, fill = fill, align = align, ...)
	  }), all = FALSE))
	  if (ncol(x) == 1) dim(out) <- c(length(out), 1)
	  colnames(out) <- if (ncol(x) == ncol(out)) colnames(x)
	  return(out)
  }
  n <- length(x)
  k <- trunc(k)
  if(k > n) return(rollapply(x, k, FUN = (max), fill = fill, align = align, ...))
  ix <- switch(align,
      "left" = { 1:(n-k+1) },
      "center" = { floor((1+k)/2):ceiling(n-k/2) },
      "right" = { k:n })
  n <- length(x) 
  rval <- rep(0, n) 
  a <- 0
  xc <- coredata(x)
  if(k == 1) {
    rval <- xc
  } else {
    for (i in k:n) {
      rval[i] <- if (is.na(a) || is.na(rval[i-1]) || a==rval[i-1]) 
        max(xc[(i-k+1):i]) # calculate max of window
      else 
        max(rval[i-1], xc[i]); # max of window = rval[i-1] 
      a <- xc[i-k+1] # point that will be removed from window
    }
    rval <- rval[-seq(k-1)]
  }
  x[ix] <- rval
  na.fill(x, fill = fill, ix)
}
rollmax.default <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
		if (length(x) < 1L) return(x)
		
		coredata(rollmax(zoo(x), k, fill = fill, align = align, ...))
}
rollmax.ts <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
		if (length(x) < 1L) return(x)
		
		as.ts(rollmax(as.zoo(x), k, fill = fill, align = align, ...))
}
rollmedianr <- function(..., align = "right") {
	rollmedian (..., align = align)
}
rollmedian <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...) {
	UseMethod("rollmedian")
}
rollmedian.zoo <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
  if (length(x) < 1L) return(x)
  if (!missing(na.pad)) warning("na.pad is deprecated. Use fill.")
  align <- match.arg(align)
  n <- length(index(x))
  k <- trunc(k)
  if(k > n || anyNA(coredata(x))) return(rollapply(x, k, FUN = (median), fill = fill, align = align, ...))
  if (length(dim(x)) == 2) {
	  # merge is the only zoo specific part of this method
	  out <- do.call("merge", c(lapply(1:NCOL(x), function(i) {
		rollmedian(x[, i, drop = TRUE], k, fill = fill, align = align, ...)
	  }), all = FALSE))
	  if (ncol(x) == 1) dim(out) <- c(length(out), 1)
	  colnames(out) <- colnames(x)
	  return(out)
  }
  ix <- switch(align,
      "left" = { 1:(n-k+1) },
      "center" = { floor((1+k)/2):ceiling(n-k/2) },
      "right" = { k:n })
  m <- k %/% 2
  rval <- runmed(x, k, ...)
  attr(rval, "k") <- NULL
  if(m >= 1) rval <- rval[-c(1:m, (n-m+1):n)]
  x[ix] <- rval
  na.fill(x, fill = fill, ix)
}
rollmedian.default <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
		if (length(x) < 1L) return(x)
		
		coredata(rollmedian(zoo(x), k, fill = fill, align = align, ...))
}
rollmedian.ts <- function(x, k, fill = if (na.pad) NA, na.pad = FALSE, 
	align = c("center", "left", "right"), ...)
{
		if (length(x) < 1L) return(x)
		
		as.ts(rollmedian(as.zoo(x), k, fill = fill, align = align, ...))
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/rollmean.R 
 | 
					
	window.zoo <- function(x, index. = index(x), start = NULL, end = NULL, ...)
{
  all.indexes <- index(x)
  in.index <- MATCH(all.indexes, index., nomatch = 0) > 0
  if(length(start) == 2 && !is.null(attr(x, "frequency")) && is.numeric(all.indexes)) {
    freq <- attr(x, "frequency")
    start <- floor(start[1]*freq + (start[2] - 1) + .0001)/freq
  }
  if(length(end) == 2 && !is.null(attr(x, "frequency")) && is.numeric(all.indexes)) {
    freq <- attr(x, "frequency")
    end <- floor(end[1]*freq + (end[2] - 1) + .0001)/freq
  }
  if(is.null(start) || any(is.na(start))) {
    if(is.null(end) || any(is.na(end))) {
      wi <- which(MATCH(all.indexes, index., nomatch = 0) > 0)
      return(x[wi, , drop = FALSE])
    } else {
      wi <- which(in.index & all.indexes <= end)
      return(x[wi, , drop = FALSE])
    }
  } else {
    if(is.null(end) || any(is.na(end))) {
      wi <- which(in.index & all.indexes >= start)
    } else {
      wi <- which(in.index & all.indexes >= start & all.indexes <= end)
    }
    return(x[wi, , drop = FALSE])
  }
}
"window<-.zoo" <- function(x, index. = index(x), start = NULL, end = NULL, ..., value)
{
  ix <- index(x)
  stopifnot(all(MATCH(index., ix, nomatch = 0) > 0))
  
  if(length(start) == 2 && !is.null(attr(x, "frequency")) && is.numeric(ix)) {
    freq <- attr(x, "frequency")
    start <- floor(start[1]*freq + (start[2] - 1) + .0001)/freq
  }
  if(length(end) == 2 && !is.null(attr(x, "frequency")) && is.numeric(ix)) {
    freq <- attr(x, "frequency")
    end <- floor(end[1]*freq + (end[2] - 1) + .0001)/freq
  }
  
  if (!is.null(start) && !is.na(start)) index. <- index.[index. >= start]
  if (!is.null(end) && !is.na(end)) index. <- index.[index. <= end]
  wi <- which(MATCH(ix, index., nomatch = 0) > 0)
  if (length(dim(x)) == 0)
  	  x[wi] <- value
  else
  	  x[wi,] <- value
  return(x)
}
 
lag.zoo <- function(x, k = 1, na.pad = FALSE, ...)
{
   if (length(k) > 1) {
	if (is.null(names(k))) names(k) <- paste("lag", k, sep = "")
	return(do.call("merge.zoo", lapply(k, lag.zoo, x = x, na.pad = na.pad, ...)))
   }
   nr <- NROW(x)
   if (k != round(k)) {
	k <- round(k)
	warning("k is not an integer")
   }
   if (k == 0) return(x)
   if (abs(k) > nr) k <- nr
   if (k > 0)  {
	   xx <- x[-seq(1, length.out = k),, drop = FALSE]
	   attr(xx, "index") <- index(x)[-seq(to = nr, length.out = k)]
   } else {
	   xx <- x[-seq(to = nr, length.out = -k),, drop = FALSE]
	   attr(xx, "index") <- index(x)[-seq(1, length.out = -k)]
   }
   if (na.pad) merge(zoo(,time(x)), xx, all = c(TRUE, FALSE)) else xx
}
lag.zooreg <- function(x, k = 1, na.pad = FALSE, ...)
{
   if (length(k) > 1) {
	if (is.null(names(k))) names(k) <- paste("lag", k, sep = "")
	return(do.call("merge.zoo", lapply(k, lag.zooreg, x = x, na.pad = na.pad, ...)))
   }
   x0 <- x
   nr <- NROW(x)
   freq <- attr(x, "frequency")
   
   if (k != round(k)) warning("k is not an integer")
   k <- round(k)
   ix <- index(x)
   ix <- if(identical(class(ix), "numeric") | identical(class(ix), "integer"))
     floor(freq*ix - k + .0001)/freq else ix - k/freq
   index(x) <- ix
   if (na.pad) merge(x, zoo(, time(x0))) else x
}
diff.zoo <- function(x, lag = 1, differences = 1, arithmetic = TRUE, na.pad = FALSE, ...)
{
    ix <- index(x)
    stopifnot(differences >= 1)
    
    ## for relative differences, use division only if 'x' contains negative values
    ## other numerically more stable log-transform
    if(arithmetic || all(coredata(x) > 0)) {
      if (!arithmetic) x <- log(x)
      if (lag > 0) for(i in 1:differences) x <- x - lag(x, k = -lag, ...) else for(i in 1:differences) x <- lag(x, k = -lag, ...) - x
      if (!arithmetic) x <- exp(x)
    } else {
      if (lag > 0) for(i in 1:differences) x <- x / lag(x, k = -lag, ...) else for(i in 1:differences) x <- lag(x, k = -lag, ...) / x    
    }
    if (na.pad) merge(zoo(,ix), x, all = c(TRUE, FALSE)) else x
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/window.zoo.R 
 | 
					
	
xblocks <- function(x, ...)
    UseMethod("xblocks")
xblocks.default <-
    function (x, y, ..., col = NULL, border = NA, 
              ybottom = par("usr")[3], ytop = ybottom + height,
              height = diff(par("usr")[3:4]),
              last.step = median(diff(tail(x))))
{
    if (is.function(y))
        y <- y(x)
    x <- as.numeric(x)
    if (length(x) == 0) return()
    if (is.unsorted(x, na.rm = TRUE))
        stop("'x' should be ordered (increasing)")
    if (is.na(last.step))
        last.step <- 0
    ## Three cases:
    ## (1) If y is character, assume it gives the block colours
    ## -- unless 'col' is given, which over-rides it.
    ## (2) If y is logical, show blocks of TRUE values.
    ## (3) If y is numeric, show blocks of non-NA values.
    if (is.logical(y)) {
        y <- y
    } else if (is.numeric(y)) {
        y <- !is.na(y)
    } else {
        ## this will convert factor, Date, etc to character:
        y <- as.character(y)
    }
    ## Note: rle treats each NA as unique (does not combine runs of NAs)
    ## so we need to replace NAs with a temporary value.
    NAval <-
        if (is.character(y)) "" else FALSE
    y[is.na(y)] <- NAval
    ## find blocks (runs of constant values)
    yrle <- rle(y)
    ## substitute NA values back in
    blockCol <- yrle$values
    blockCol[blockCol == NAval] <- NA
    ## for logical series, col default comes from palette()
    if (is.logical(y) && is.null(col))
        col <- palette()[1]
    ## set block colours if 'col' given
    if (length(col) > 0) {
        if (is.character(col))
            col[col == ""] <- NA
        ok <- !is.na(blockCol)
        blockCol[ok] <- rep(col, length.out = sum(ok)) ## rep to avoid warnings
    }
    ## work out block geometry
    idxBounds <- cumsum(c(1, yrle$lengths))
    idxStart <- head(idxBounds, -1)
    idxEnd <- tail(idxBounds, -1)
    idxEnd[length(idxEnd)] <- length(y)
    blockStart <- x[idxStart]
    blockEnd <- x[idxEnd]
    blockEnd[length(blockEnd)] <- tail(blockEnd, 1) + last.step
    blockWidth <- blockEnd - blockStart
    ## adjust for log scales
    if (par("ylog")) {
        ybottom <- 10^ybottom
        ytop <- 10^ytop
    }
    ## draw it
    rect(xleft = blockStart, xright = blockEnd,
         ybottom = ybottom, ytop = ytop,
         col = blockCol, border = border, ...)
}
xblocks.zoo <-
xblocks.ts <-
    function(x, y = x, ...)
{
    if (!is.function(y))
        y <- coredata(y)
    xblocks(index(x), y, ...)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/xblocks.R 
 | 
					
	panel.plot.default <- function(x, y, subscripts, groups, panel = panel.xyplot,
  col = 1, type = "p", pch = 20, lty = 1, lwd = 1, ...)
{
  col <- rep(as.list(col), length.out = nlevels(groups))
  type <- rep(as.list(type), length.out = nlevels(groups))
  pch <- rep(as.list(pch), length.out = nlevels(groups))
  lty <- rep(as.list(lty), length.out = nlevels(groups))
  lwd <- rep(as.list(lwd), length.out = nlevels(groups))
  for(g in 1:nlevels(groups)) {
    idx <- g == unclass(groups[subscripts])
    if (any(idx)) panel(x[idx], y[idx], ...,
      col = col[[g]], type = type[[g]], pch = pch[[g]],
      lty = lty[[g]], lwd = lwd[[g]])
  }
  .Deprecated(msg="panel.plot.default is no longer needed, just use panel.xyplot etc")
}
panel.plot.custom <- function(...) {
  args <- list(...)
  function(...) {
    dots <- list(...)
    do.call("panel.plot.default", modifyList(dots, args))
  }
}
xyplot.its <-
xyplot.ts <-
xyplot.zoo <- function(x, data, ...)
{
    obj <- lattice::xyplot.ts(as.zoo(x), ...)
    obj$call <- match.call()
    obj
}
xyplot.tis <- function(x, data, ...)
{
    x <- aggregate(as.zoo(x), tis::POSIXct, identity)
    obj <- lattice::xyplot.ts(x, ...)
    obj$call <- match.call()
    obj
}
llines.its <-
llines.tis <-
llines.zoo <- function(x, y = NULL, ...)
{
    if (!is.null(y)) {
        llines(coredata(x), y = y, ...)
    } else {
        llines(coredata(time(x)), y = coredata(x), ...)
    }
}
lpoints.its <-
lpoints.tis <-
lpoints.zoo <- function(x, y = NULL, ...)
{
    if (!is.null(y)) {
        lpoints(coredata(x), y = y, ...)
    } else {
        lpoints(coredata(time(x)), y = coredata(x), ...)
    }
}
ltext.its <-
ltext.tis <-
ltext.zoo <- function(x, y = NULL, ...)
{
    if (!is.null(y)) {
        ltext(coredata(x), y = y, ...)
    } else {
        ltext(coredata(time(x)), y = coredata(x), ...)
    }
}
panel.lines.ts <- 
panel.lines.its <-
panel.lines.tis <-
panel.lines.zoo <- function(x, ...) {
  x <- as.zoo(x)
  panel.lines(time(x), coredata(x), ...)
  .Deprecated("panel.lines")
}
panel.points.ts <- 
panel.points.its <-
panel.points.tis <-
panel.points.zoo <- function(x, ...) {
  x <- as.zoo(x)
  panel.points(time(x), coredata(x), ...)
  .Deprecated("panel.points")
}
panel.text.ts <- 
panel.text.its <-
panel.text.tis <-
panel.text.zoo <- function(x, ...) {
  x <- as.zoo(x)
  panel.text(time(x), coredata(x), ...)
  .Deprecated("panel.text")
}
panel.segments.ts <- 
panel.segments.its <-
panel.segments.tis <-
panel.segments.zoo <- function(x0, x1, ...) {
  x0 <- as.zoo(x0)
  x1 <- as.zoo(x1)
  panel.segments(time(x0), coredata(x0), time(x1), coredata(x1), ...)
}
panel.rect.ts <- 
panel.rect.its <-
panel.rect.tis <-
panel.rect.zoo <- function(x0, x1, ...) {
  x0 <- as.zoo(x0)
  x1 <- as.zoo(x1)
  panel.rect(time(x0), coredata(x0), time(x1), coredata(x1), ...)
}
panel.polygon.ts <- 
panel.polygon.its <-
panel.polygon.tis <-
panel.polygon.zoo <- function(x, ...) {
  x <- as.zoo(x)
  panel.polygon(time(x), coredata(x), ...)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/xyplot.zoo.R 
 | 
					
	## class creation
yearmon <- function(x) structure(floor(12*x + .0001)/12, class = "yearmon")
## coercion to yearmon: always go via numeric
as.yearmon <- function(x, ...) UseMethod("as.yearmon")
as.yearmon.default <- function(x, ...) as.yearmon(as.numeric(x))
as.yearmon.numeric <- function(x, ...) yearmon(x)
as.yearmon.integer <- function(x, ...) structure(x, class = "yearmon")
as.yearmon.yearqtr <- function(x, frac = 0, ...) {
    if (frac == 0) yearmon(as.numeric(x)) else
    as.yearmon(as.Date(x, frac = frac), ...)
}
as.yearmon.dates <- 
as.yearmon.Date <- 
as.yearmon.POSIXt <- function(x, ...) as.yearmon(with(as.POSIXlt(x, tz="GMT"), 1900 + year + mon/12))
# as.jul.yearmon <- function(x, ...) jul(as.Date(x, ...)) # jul is from tis pkg
as.yearmon.mondate <-
as.yearmon.timeDate <-
as.yearmon.jul <- function(x, ...) as.yearmon(as.Date(x, ...))
as.yearmon.factor <- function(x, ...) as.yearmon(as.character(x), ...)
as.yearmon.character <- function(x, format = "", ...) {
   if (format == "") {
        nch <- nchar(gsub("[^-]", "", x))
		nch[is.na(x)] <- NA
		nch <- na.omit(nch)
        if (length(table(nch)) != 1) 
            stop("yearmon variable can only have one format")
		format <- if (all(nch == 0)) "%B %Y"
		else if (all(nch == 1)) "%Y-%m" else "%Y-%m-%d"
   }
   has.short.keys <- rep(regexpr("%[mbByY%]", format) > 0, length(x))
   has.no.others <- regexpr("%", gsub("%[mbByY%]", "", format)) < 0
   z <- ifelse(has.short.keys & has.no.others,
      as.Date( paste("01", x, sep = "-"), paste("%d", format, sep = "-"), ... ),
      as.Date(x, format, ...))
   as.yearmon(as.Date(z, origin = "1970-01-01"))
}
as.yearmon.ti <- function(x, ...) as.yearmon(as.Date(x), ...)
## coercion from yearmon
# returned Date is the fraction of the way through the period given by frac
as.Date.yearmon <- function(x, frac = 0, ...) {     
  x <- unclass(x)
  if(all(is.na(x))) return(as.Date(x))
  year <- floor(x + .001)
  ix <- !is.na(year)
  month <- floor(12 * (x - year) + 1 + .5 + .001)
  dd.start <- as.Date(rep(NA, length(year)))
  dd.start[ix] <- as.Date(paste(year[ix], month[ix], 1, sep = "-")) 
  dd.end <- dd.start + 32 - as.numeric(format(dd.start + 32, "%d"))
  as.Date((1-frac) * as.numeric(dd.start) + frac * as.numeric(dd.end), origin = "1970-01-01")
}
as.POSIXct.yearmon <- function(x, tz = "", ...) as.POSIXct(as.Date(x), tz = tz, ...)
as.POSIXlt.yearmon <- function(x, tz = "", ...) as.POSIXlt(as.Date(x), tz = tz, ...)
as.list.yearmon <- function(x, ...) lapply(seq_along(x), function(i) x[i])
as.numeric.yearmon <- function(x, ...) unclass(x)
as.character.yearmon <- function(x, ...) format.yearmon(x, ...)
as.data.frame.yearmon <- function(x, row.names = NULL, optional = FALSE, ...) 
{
  nrows <- length(x)
  nm <- paste(deparse(substitute(x), width.cutoff = 500), collapse = " ")
  if (is.null(row.names)) {
    if (nrows == 0) 
        row.names <- character(0)
    else if(length(row.names <- names(x)) == nrows && !any(duplicated(row.names))) {
    }
    else if(optional) row.names <- character(nrows)
    else row.names <- seq_len(nrows)
  }
  names(x) <- NULL
  value <- list(x)
  if(!optional) names(value) <- nm
  attr(value, "row.names") <- row.names
  class(value) <- "data.frame"
  value
}
## other methods for class yearmon
c.yearmon <- function(...)
    as.yearmon(do.call("c", lapply(list(...), as.numeric)))
cycle.yearmon <- function(x, ...) round(12 * (as.numeric(x) %% 1)) + 1
format.yearmon <- function(x, format = "%b %Y", ...) 
{
    if (length(x) == 0) return(character(0))
    xx <- format(as.Date(x), format = format, ...)
    names(xx) <- names(x)
    xx
}
print.yearmon <- function(x, ...) { 
    print(format(x), ...)
    invisible(x) 
}
months.yearmon <- function(x, abbreviate = FALSE) {
    months(as.Date(x), abbreviate = abbreviate)
}
quarters.yearmon <- function(x, abbreviate = FALSE) {
    quarters(as.Date(x), abbreviate = abbreviate)
}
"[.yearmon" <- function (x, ..., drop = TRUE) 
{
    cl <- oldClass(x)
    class(x) <- NULL
    val <- NextMethod("[")
    class(val) <- cl
    val
}
"[[.yearmon" <- function (x, ..., drop = TRUE) 
{
    cl <- oldClass(x)
    class(x) <- NULL
    val <- NextMethod("[[")
    class(val) <- cl
    val
}
MATCH.yearmon <- function(x, table, nomatch = NA, ...)
    match(floor(12*as.numeric(x) + .001), floor(12*as.numeric(table) + .001), nomatch = nomatch, ...)
Ops.yearmon <- function(e1, e2) {
    e1 <- as.numeric(as.yearmon(e1))
    e2 <- as.numeric(as.yearmon(e2))
    rval <- NextMethod(.Generic)
    if(is.numeric(rval)) rval <- yearmon(rval)
    return(rval)
}
"-.yearmon" <- function (e1, e2) 
{
    if (!inherits(e1, "yearmon")) 
        stop("Can only subtract from yearmon objects")
    if (nargs() == 1) 
	return(- as.numeric(e1))
    if (inherits(e2, "yearmon")) 
        return(as.numeric(e1) - as.numeric(e2))
    if (!is.null(attr(e2, "class"))) 
      stop("can only subtract yearmon objects and numbers from yearmon objects")
    yearmon(unclass(e1) - e2)
}
is.numeric.yearmon <- function(x) FALSE
Axis.yearmon <- function(x = NULL, at = NULL, ..., side, labels = NULL)
    axis.yearmon(x = x, at = at, ..., side = side, labels = TRUE)
axis.yearmon <- function (side, x, at, format, labels = TRUE, ..., N1 = 25, N2 = 2) {
    # If years in range > N1 then only years shown.  
    # If years in range > N2 then month ticks are not labelled.
    mat <- missing(at) || is.null(at)
    if (!mat) # at not missing
        x <- as.yearmon(at)
    else x <- as.yearmon(x)
    range <- par("usr")[if (side%%2) 
        1:2
    else 3:4]
    # range[1] <- ceiling(range[1])
    # range[2] <- floor(range[2])
    d <- range[2] - range[1]
    z <- c(range, x[is.finite(x)])
    class(z) <- "yearmon"
    if (d > N1) { # axis has years only
        z <- structure(pretty(z), class = "yearmon")
    } else if (d > N2) { # axis has all years and unlabelled months
        z <- seq(min(x), max(x), 1/12)
	# z <- seq(floor(min(x)), ceiling(max(x)))
    } else { # years and months
        z <- seq(min(x), max(x), 1/12)
    }
    if (!mat) 
        z <- x[is.finite(x)]
    z <- z[z >= range[1] & z <= range[2]]
    z <- sort(unique(z))
    class(z) <- "yearmon"
    if (identical(labels, TRUE)) {
	if (missing(format)) format <- c("%Y", "%b")
	if (length(format) == 1) format <- c(format, "")
	labels <- if (d <= N2) format.yearmon(z, format = format[2])
    else rep(NA, length(z))
	idx <- format.yearmon(z, format = "%m") == "01"
	labels[idx] <- format.yearmon(z[idx], format = format[1])
    } else if (identical(labels, FALSE)) 
        labels <- rep("", length(z))
    axis(side, at = z, labels = labels, ...)
}
summary.yearmon <- function(object, ...)
  summary(as.numeric(object), ...)
###
## convert from package date
as.yearmon.date <- function(x, ...) {
	as.yearmon(as.Date(x, ...))
}
mean.yearmon <- function (x, ...) as.yearmon(mean(unclass(x), ...))
Summary.yearmon <- function (..., na.rm)
{
    ok <- switch(.Generic, max = , min = , range = TRUE, FALSE)
    if (!ok) stop(.Generic, " not defined for yearmon objects")
    val <- NextMethod(.Generic)
    class(val) <- oldClass(list(...)[[1]])
    val
}
Sys.yearmon <- function() as.yearmon(Sys.Date())
range.yearmon <- function(..., na.rm = FALSE) {
	as.yearmon(range.default(..., na.rm = na.rm))
}
unique.yearmon <- function(x, incomparables = FALSE, ...) {
	as.yearmon(unique.default(x, incomparables = incomparables, ...))
}
xtfrm.yearmon <- function(x) as.numeric(x)
diff.yearmon <- function(x, ...) as.numeric(NextMethod())
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/yearmon.R 
 | 
					
	## class creation
yearqtr <- function(x) structure(floor(4*x + .001)/4, class = "yearqtr")
## coercion to yearqtr: always go via numeric
as.yearqtr <- function(x, ...) UseMethod("as.yearqtr")
as.yearqtr.default <- function(x, ...) as.yearqtr(as.numeric(x))
as.yearqtr.numeric <- function(x, ...) structure(floor(4*x + .0001)/4, class = "yearqtr")
as.yearqtr.integer <- function(x, ...) structure(x, class = "yearqtr")
# as.jul.yearqtr <- function(x, ...) jul(as.Date(x, ...)) # jul is from tis
as.yearqtr.mondate <-
as.yearqtr.jul <- # jul is in tis package
as.yearqtr.timeDate <-
as.yearqtr.dates <-
as.yearqtr.Date <- 
as.yearqtr.POSIXt <- function(x, ...) as.yearqtr(as.yearmon(x))
as.yearqtr.yearqtr <- function(x, ...) x
as.yearqtr.factor <- function(x, ...) as.yearqtr(as.character(x), ...)
as.yearqtr.character <- function(x, format, ...) {
    non.na <- x[!is.na(x)]
    if (length(non.na) == 0) 
        return(structure(rep(NA, length(x)), class = "yearqtr"))
    if (missing(format) || format == "") {
        format <- if (all(regexpr("q", non.na) > 0))  { "%Y q%q"
        } else if (all(regexpr("Q", non.na) > 0)) { "%Y Q%q"
        } else "%Y-%q"
    }
    y <- if (regexpr("%[qQ]", format) > 0) {
        format <- sub("%q", "%m", format)
        y <- as.numeric(as.yearmon(x, format))
        m0 <- round(12 * (y %% 1))
        floor(y) + ifelse(m0 > 3, NA, m0/4)
    } else as.yearmon(x, format)
    as.yearqtr(y)
}
as.yearqtr.ti <- function(x, ...) as.yearqtr(as.Date(x), ...)
## coercion from yearqtr
# returned Date is the fraction of the way through the period given by frac
as.Date.yearqtr <- function(x, frac = 0, ...) {
  x <- unclass(x)
  if(all(is.na(x))) return(as.Date(x))
  year <- floor(x + .001)
  ix <- !is.na(year)
  month <- floor(12 * (x - year) + 1 + .5 + .001)
  dd.start <- as.Date(rep(NA, length(year)))
  dd.start[ix] <- as.Date(paste(year[ix], month[ix], 1, sep = "-")) 
  dd.end <- dd.start + 100 - as.numeric(format(dd.start + 100, "%d")) 
  as.Date((1-frac) * as.numeric(dd.start) + frac * as.numeric(dd.end), origin = "1970-01-01")
}
as.POSIXct.yearqtr <- function(x, tz = "", ...) as.POSIXct(as.Date(x), tz = tz, ...)
as.POSIXlt.yearqtr <- function(x, tz = "", ...) as.POSIXlt(as.Date(x), tz = tz, ...)
as.list.yearqtr <- function(x, ...) lapply(seq_along(x), function(i) x[i])
as.numeric.yearqtr <- function(x, ...) unclass(x)
as.character.yearqtr <- function(x, ...) format.yearqtr(x, ...)
as.data.frame.yearqtr <- function(x, row.names = NULL, optional = FALSE, ...) 
{
  nrows <- length(x)
  nm <- paste(deparse(substitute(x), width.cutoff = 500), collapse = " ")
  if (is.null(row.names)) {
    if (nrows == 0) 
        row.names <- character(0)
    else if(length(row.names <- names(x)) == nrows && !any(duplicated(row.names))) {
    }
    else if(optional) row.names <- character(nrows)
    else row.names <- seq_len(nrows)
  }
  names(x) <- NULL
  value <- list(x)
  if(!optional) names(value) <- nm
  attr(value, "row.names") <- row.names
  class(value) <- "data.frame"
  value
}
## other methods for class yearqtr
c.yearqtr <- function(...) {
    as.yearqtr(do.call("c", lapply(list(...), as.numeric)))
}
cycle.yearqtr <- function(x, ...) round(4 * (as.numeric(x) %% 1)) + 1
format.yearqtr <- function(x, format = "%Y Q%q", ...) 
{
  if (length(x) == 0) return(character(0))
  # like gsub but replacement and x may be vectors the same length
  gsub.vec <- function(pattern, replacement, x, ...) {
    y <- x
    for(i in seq_along(x)) {
      y[i] <- gsub(pattern, replacement[i], x[i], ...)
    }
    y
  }
  x <- as.yearqtr(x)
  x <- unclass(x)
  year <- floor(x + .001)
  qtr <- floor(4*(x - year) + 1 + .5 + .001)
  xx <- if (format == "%Y Q%q") {
    paste(year, " Q", qtr, sep = "")
  } else {
    # TODO: speed up the following
    xx <- gsub.vec("%q", qtr, rep.int(format, length(qtr)))
    xx <- gsub.vec("%Y", year, xx)
    xx <- gsub.vec("%y", sprintf("%02d", as.integer(year %% 100)), xx)
    xx <- gsub.vec("%C", year %/% 100, xx)
    xx
  }
  names(xx) <- names(x)
  xx[is.na(x)] <- NA_character_
  return(xx)
}
months.yearqtr <- function(x, abbreviate = FALSE) {
    months(as.Date(x), abbreviate = abbreviate)
}
quarters.yearqtr <- function(x, abbreviate = FALSE) {
    quarters(as.Date(x), abbreviate = abbreviate)
}
print.yearqtr <- function(x, ...) { 
    print(format(x), ...)
    invisible(x) 
}
"[.yearqtr" <- function (x, ..., drop = TRUE) 
{
    cl <- oldClass(x)
    class(x) <- NULL
    val <- NextMethod("[")
    class(val) <- cl
    val
}
"[[.yearqtr" <- function (x, ..., drop = TRUE) 
{
    cl <- oldClass(x)
    class(x) <- NULL
    val <- NextMethod("[[")
    class(val) <- cl
    val
}
MATCH.yearqtr <- function(x, table, nomatch = NA, ...)
    match(floor(4*as.numeric(x) + .001), floor(4*as.numeric(table) + .001), nomatch = nomatch, ...)
Ops.yearqtr <- function(e1, e2) {
    e1 <- as.numeric(as.yearqtr(e1))
    e2 <- as.numeric(as.yearqtr(e2))
    rval <- NextMethod(.Generic)
    if(is.numeric(rval)) rval <- yearqtr(rval)
    return(rval)
}
"-.yearqtr" <- function (e1, e2) 
{
    if (!inherits(e1, "yearqtr")) 
        stop("Can only subtract from yearqtr objects")
    if (nargs() == 1) 
	return(- as.numeric(e1))
    if (inherits(e2, "yearqtr")) 
        return(as.numeric(e1) - as.numeric(e2))
    if (!is.null(attr(e2, "class"))) 
      stop("can only subtract yearqtr objects and numbers from yearqtr objects")
    yearqtr(unclass(e1) - e2)
}
is.numeric.yearqtr <- function(x) FALSE
Axis.yearqtr <- function(x = NULL, at = NULL, ..., side, labels = NULL)
    axis.yearqtr(x = x, at = at, ..., side = side, labels = TRUE)
axis.yearqtr <- function (side, x, at, format, labels = TRUE, ..., N1 = 25, N2 = 7) {
    # If years in range > N1 then only years shown.  
    # If years in range > N2 then quarter ticks are not labelled.
    mat <- missing(at) || is.null(at)
    if (!mat) # at not missing
        x <- as.yearqtr(at)
    else x <- as.yearqtr(x)
    range <- par("usr")[if (side%%2) 
        1:2
    else 3:4]
    # range[1] <- ceiling(range[1])
    # range[2] <- floor(range[2])
    d <- range[2] - range[1]
    z <- c(range, x[is.finite(x)])
    class(z) <- "yearqtr"
    if (d > N1) { # axis has years only
        z <- structure(pretty(z), class = "yearqtr")
    } else if (d > N2) { # axis has all years and unlabelled quarters
        z <- seq(min(x), max(x), 0.25)
	# z <- seq(floor(min(x)), ceiling(max(x)))
    } else { # years and quarters
        z <- seq(min(x), max(x), 0.25)
    }
    if (!mat) 
        z <- x[is.finite(x)]
    z <- z[z >= range[1] & z <= range[2]]
    z <- sort(unique(z))
    class(z) <- "yearqtr"
    if (identical(labels, TRUE)) {
	if (missing(format)) format <- c("%Y", "Q%q")
	if (length(format) == 1) format <- c(format, "")
	if (d <= N2) labels <- format.yearqtr(z, format = format[2])
	idx <- format.yearqtr(z, format = "%q") == "1"
	labels <- rep(NA, length(z))
	labels[idx] <- format.yearqtr(z[idx], format = format[1])
    } else if (identical(labels, FALSE)) 
        labels <- rep("", length(z))
    axis(side, at = z, labels = labels, ...)
}
summary.yearqtr <- function(object, ...)
  summary(as.numeric(object), ...)
## convert from package date
as.yearqtr.date <- function(x, ...) {
	as.yearqtr(as.Date(x, ...))
}
mean.yearqtr <- function (x, ...) as.yearqtr(mean(unclass(x), ...))
Summary.yearqtr <- function (..., na.rm)
{
    ok <- switch(.Generic, max = , min = , range = TRUE, FALSE)
    if (!ok) stop(.Generic, " not defined for yearqtr objects")
    val <- NextMethod(.Generic)
    class(val) <- oldClass(list(...)[[1]])
    val
}
Sys.yearqtr <- function() as.yearqtr(Sys.Date())
range.yearqtr <- function(..., na.rm = FALSE) {
	as.yearqtr(range.default(..., na.rm = na.rm))
}
unique.yearqtr <- function(x, incomparables = FALSE, ...) {
	as.yearqtr(unique.default(x, incomparables = incomparables, ...))
}
xtfrm.yearqtr <- function(x) as.numeric(x)
diff.yearqtr <- function(x, ...) as.numeric(NextMethod())
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/yearqtr.R 
 | 
					
	zoo <- function (x = NULL, order.by = index(x), frequency = NULL,
  calendar = getOption("zoo.calendar", TRUE)) 
{
    ## process index "order.by"    
    if(length(unique(MATCH(order.by, order.by))) < length(order.by))
      warning(paste("some methods for", dQuote("zoo"),
      "objects do not work if the index entries in", sQuote("order.by"), "are not unique"))
    index <- ORDER(order.by)
    order.by <- order.by[index]
    if(is.matrix(x) || is.data.frame(x)) x <- as.matrix(x)
    if(is.matrix(x) && sum(dim(x)) < 1L) x <- NULL
    if(missing(x) || is.null(x))
      x <- numeric()
    else if(is.factor(x))         
      x <- factor(rep(as.character(x), length.out = length(index))[index],
        levels = levels(x), ordered = is.ordered(x))
    else if(is.matrix(x) || is.data.frame(x)) 
      x <- (x[rep(1:NROW(x), length.out = length(index)), , 
        drop = FALSE])[index, , drop = FALSE]
    else if(is.atomic(x)) 
      x <- rep(x, length.out = length(index))[index]
    else stop(paste(dQuote("x"), ": attempt to define invalid zoo object"))
    if(!is.null(frequency)) {
      delta <- suppressWarnings(try(diff(as.numeric(order.by)), silent = TRUE))
      freqOK <- if(inherits(delta, "try-error") || anyNA(delta)) FALSE
        else if(length(delta) < 1) TRUE
        else identical(all.equal(delta*frequency, round(delta*frequency)), TRUE)
      if(!freqOK) {
        warning(paste(dQuote("order.by"), "and", dQuote("frequency"),
        	"do not match:", dQuote("frequency"), "ignored"))
        frequency <- NULL
      } else {
        if(frequency > 1 && identical(all.equal(frequency, round(frequency)), TRUE))
	  frequency <- round(frequency)
      }
      if(!is.null(frequency) && identical(class(order.by), "numeric") | identical(class(order.by), "integer")) {
        orig.order.by <- order.by
        order.by <- floor(frequency * order.by + .0001)/frequency
        if(!isTRUE(all.equal(order.by, orig.order.by))) order.by <- orig.order.by
	if(calendar && frequency %in% c(4, 12)) {
	  order.by <- if(frequency == 4) as.yearqtr(order.by) else as.yearmon(order.by)	
	}
      }
    }
    attr(x, "oclass") <- attr(x, "class")
    attr(x, "index") <- order.by
    attr(x, "frequency") <- frequency
    class(x) <- if(is.null(frequency)) "zoo" else c("zooreg", "zoo")
    return(x)
}
print.zoo <- function (x, style = ifelse(length(dim(x)) == 0,
    "horizontal", "vertical"), quote = FALSE, ...) 
{
    style <- match.arg(style, c("horizontal", "vertical", "plain"))
    if (is.null(dim(x)) && length(x) == 0) style <- "plain"
    if (length(dim(x)) > 0 && style == "horizontal") style <- "plain"
    if (style == "vertical") {
	y <- as.matrix(coredata(x))
        if (length(colnames(x)) < 1) {
            colnames(y) <- rep("", NCOL(x))
        }
		if (NROW(y) > 0) {
			rownames(y) <- index2char(index(x), frequency = attr(x, "frequency"))
		}
        print(y, quote = quote, ...)
    }
    else if (style == "horizontal") {
        y <- as.vector(x)
        names(y) <- index2char(index(x), frequency = attr(x, "frequency"))
        print(y, quote = quote, ...)
    }
    else {
        cat("Data:\n")
        print(coredata(x))
        cat("\nIndex:\n")
        print(index(x))
    }
    invisible(x)
}
summary.zoo <- function(object, ...) 
{
	y <- as.data.frame(object, row.names = NULL)
	if (length(colnames(object)) < 1) {
		lab <- deparse(substitute(object))
		colnames(y) <- if (NCOL(object) == 1) lab
		  else paste(lab, 1:NCOL(object), sep=".")
	}
	if (NROW(y) > 0) {
		summary(cbind(data.frame(Index = index(object)), y), ...)
	} else summary(data.frame(Index = index(object)), ...)
}
is.zoo <- function(object)
  inherits(object, "zoo")
str.zoo <- function(object, ...)
{
  cls <- if(inherits(object, "zooreg")) "zooreg" else "zoo"
  if(NROW(object) < 1) cat(paste(sQuote(cls), "series (without observations)\n")) else {
    cat(paste(sQuote(cls), " series from ", start(object), " to ", end(object), "\n", sep = ""))
    cat("  Data:")
    str(coredata(object), ...)
    cat("  Index: ")
    str(index(object), ...)
    if(cls == "zooreg") cat(paste("  Frequency:", attr(object, "frequency"), "\n"))
  }
}
"[.zoo" <- function(x, i, j, drop = TRUE, ...)
{
  if(!is.zoo(x)) stop("method is only for zoo objects")
  rval <- coredata(x)
  n <- NROW(rval)
  n2 <- if(nargs() == 1) length(as.vector(rval)) else n
  if(missing(i)) i <- 1:n
  if (inherits(i, "matrix")) i <- as.vector(i)
  ## also support that i can be index:
  ## if i is not numeric/integer/logical, it is interpreted to be the index
  if (inherits(i, "logical"))
    i <- which(rep(i, length.out = n2))
  else if (inherits(i, "zoo") && inherits(coredata(i), "logical")) {
    i <- which(coredata(merge(zoo(,time(x)), i)))
  } else if(!((inherits(i, "numeric") || inherits(i, "integer")))) 
    i <- which(MATCH(index(x), i, nomatch = 0L) > 0L)
  
  if(length(dim(rval)) == 2) {
    drop. <- if (length(i) == 1) FALSE else drop
    rval <- if (missing(j)) rval[i, , drop = drop., ...]
      else rval[i, j, drop = drop., ...]
    if (drop && length(rval) == 1) rval <- c(rval)
    rval <- zoo(rval, index(x)[i])
  } else
    rval <- zoo(rval[i], index(x)[i])
  attr(rval, "oclass") <- attr(x, "oclass")
  attr(rval, "levels") <- attr(x, "levels")
  attr(rval, "frequency") <- attr(x, "frequency")
  if(!is.null(attr(rval, "frequency"))) class(rval) <- c("zooreg", class(rval))
  return(rval)
}
"[<-.zoo" <- function (x, i, j, value) 
{
  ## x[,j] <- value and x[] <- value can be handled by default method
  if(missing(i)) return(NextMethod("[<-"))
  ## otherwise do the necessary processing on i
  n <- NROW(coredata(x))
  n2 <- if(nargs() == 1) length(as.vector(coredata(x))) else n
  n.ok <- TRUE
  value2 <- NULL
  
  if (inherits(i, "matrix")) i <- as.vector(i)
  if (inherits(i, "logical")) {
    if (length(i) == n) {
		i <- which(i)
		n.ok <- TRUE
	} else {
		i <- which(rep(i, length.out = n))
		n.ok <- all(i <= n2)
	}
  } else if (inherits(i, "zoo") && inherits(coredata(i), "logical")) {
    i <- which(coredata(merge(zoo(,time(x)), i)))
    n.ok <- all(i <= n2)
  } else if(!((inherits(i, "numeric") || inherits(i, "integer")))) {
    ## all time indexes in index(x)?
    i.ok <- MATCH(i, index(x), nomatch = 0L) > 0L
    if(any(!i.ok)) {
      if(is.null(dim(value))) {
        value2 <- value[!i.ok]
        value <- value[i.ok]
      } else {
        value2 <- value[!i.ok,, drop = FALSE]
        value <- value[i.ok,, drop = FALSE]      
      }
      i2 <- i[!i.ok]
      i <- i[i.ok]
    }
    i <- which(MATCH(index(x), i, nomatch = 0L) > 0L)
    n.ok <- all(i <= n2)
  }
  if(!n.ok | any(i < 1)) stop("Out-of-range assignment not possible.")
  rval <- NextMethod("[<-")
  if(!is.null(value2)) {
    rval2 <- if(missing(j)) zoo(value2, i2) else {
      value2a <- matrix(NA, nrow = length(i2), ncol = NCOL(rval))
      colnames(value2a) <- colnames(rval)
      value2a[, j] <- value2
      zoo(value2a, i2)
    }
    rval <- c(rval, rval2)
  }
  return(rval)
}
.DollarNames.zoo <- function(x, pattern = "") {
  dn <- dimnames(x)
  if(is.null(dn)) {
    character(0)
  } else {
    cn <- dn[[2]]
    if(is.null(cn)) {
      character(0)
    } else {
      grep(pattern, cn, value = TRUE)
    }
  }
}
"$.zoo" <- function(object, x) {
  if(length(dim(object)) != 2) stop("not possible for univariate zoo series")
  if(is.null(colnames(object))) stop("only possible for zoo series with column names")
  wi <- pmatch(x, colnames(object))
  if(is.na(wi)) NULL else object[, wi]
}
"$<-.zoo" <- function(object, x, value) {
  if(length(object) == 0L) {
    is.plain <- function(x)
      all(class(x) %in% c("array", "integer", "numeric", "factor", "matrix", "logical"))
    if(is.plain(value)) value <- zoo(value,
      if(length(index(object))) index(object) else seq_along(value), attr(object, "frequency"))
    return(setNames(merge(object, value, drop = FALSE), x))
  }
  if(length(dim(object)) != 2) stop("not possible for univariate zoo series")
  if(NCOL(object) > 0L && is.null(cnam <- colnames(object))) stop("only possible for zoo series with column names")
  wi <- match(x, cnam)
  if(is.na(wi)) {
    if(!is.null(value)) {
      object <- cbind(object, value)
      if(is.null(dim(object))) dim(object) <- c(length(object), 1)
      if(!identical(colnames(object), cnam)) colnames(object)[NCOL(object)] <- x  
    }
  } else {
    if(is.null(value)) {
      object <- object[, -wi, drop = FALSE]
    } else {   
      object[, wi] <- value
    }
  }
  object
}
head.zoo <- function(x, n = 6, ...) {
	stopifnot(length(n) == 1L)
	xlen <- NROW(x)
    n <- if (n < 0L) 
        max(NROW(x) + n, 0L)
    else min(n, xlen)
	if (length(dim(x)) == 0) x[seq_len(n)]
	else x[seq_len(n),, drop = FALSE]
}
 
tail.zoo <- function(x, n = 6, ...) {
    stopifnot(length(n) == 1L)
    xlen <- NROW(x)
    n <- if (n < 0L) 
        max(xlen + n, 0L)
    else min(n, xlen)
	if (length(dim(x)) == 0) x[seq.int(to = xlen, length.out = n)]
	else x[seq.int(to = xlen, length.out = n),, drop = FALSE]
}
range.zoo <- function(..., na.rm = FALSE)
    range(sapply(list(...), coredata), na.rm = na.rm)
scale.zoo <- function (x, center = TRUE, scale = TRUE) {
	x[] <- xs <- scale(coredata(x), center = center, scale = scale)
	attributes(x) <- c(attributes(x), attributes(xs))
	x
}
with.zoo <- function(data, expr, ...) {
    stopifnot(length(dim(data)) == 2)
    eval(substitute(expr), as.list(data), enclos = parent.frame())
}
xtfrm.zoo <- function(x) coredata(x)
subset.zoo <- function (x, subset, select, drop = FALSE, ...) 
{
    if (missing(select)) 
        vars <- TRUE
    else {
        nl <- as.list(1:ncol(x))
        names(nl) <- colnames(x)
        vars <- eval(substitute(select), nl, parent.frame())
    }
    if (missing(subset)) {
        subset <- rep(TRUE, NROW(x))
    } else {
        e <- substitute(subset)
	if("time" %in% colnames(x)) {
	  xdf <- as.data.frame(x)
          subset <- eval(e, xdf, parent.frame())
          xdf$time <- time(x)
          subset2 <- eval(e, xdf, parent.frame())
	  if(!identical(subset, subset2))
  	      warning("'time' is a column in 'x' (not the time index)")
	} else {
          subset <- eval(e, cbind(as.data.frame(x), time = time(x)), parent.frame())
	}
        if (!is.logical(subset)) stop("'subset' must be logical")
    }
    x[subset & !is.na(subset), vars, drop = drop]
}
names.zoo <- function(x) {
  cx <- coredata(x)
  if(is.matrix(cx)) colnames(cx) else names(cx)
}
"names<-.zoo" <- function(x, value) {
  if(is.matrix(coredata(x))) {
    colnames(x) <- value
  } else {
    names(coredata(x)) <- value
  }
  x
}
rev.zoo <- function(x) {
  ix <- rev(ORDER(time(x)))
  zoo(coredata(x), time(x)[ix])
}
ifelse.zoo <- function(test, yes, no) {
	if(!is.zoo(test)) test <- zoo(test, index(yes))
	merge(test, yes, no, retclass = NULL)
	ifelse(test, yes, no)
}
mean.zoo <- function(x, ...)  mean(coredata(x), ...)
median.zoo <- if(getRversion() <= "3.3.3") {
  function(x, na.rm = FALSE) median(coredata(x), na.rm = na.rm)
} else {
  function(x, na.rm = FALSE, ...) median(coredata(x), na.rm = na.rm, ...)
}
quantile.zoo <- function(x, ...) quantile(coredata(x), ...)
transform.zoo <- function(`_data`, ...)
{
  ## turn zoo matrix into a list of zoo series
  if (is.null(dim(coredata(`_data`)))) warning("transform() is only useful for matrix-based zoo series")
  `_data` <- as.list(`_data`)
  
  ## evaluate transformations
  e <- eval(substitute(list(...)), `_data`, parent.frame())
  ## zoo series that are replaced
  inx <- match(names(e), names(`_data`))
  matched <- !is.na(inx)
  if (any(matched)) `_data`[inx[matched]] <- e[matched]
  ## merge zoo series (including those that are added)
  z <- do.call("merge", c(`_data`, e[!matched]))
  ## always return a zoo matrix (even if just one column)
  if(is.null(dim(coredata(z)))) {
    dim(z) <- c(length(z), 1L)
    names(z) <- names(e)
  }
  return(z)
}
`dim<-.zoo` <- function(x, value) {
  d <- dim(x)
  l <- length(x)
  ok <- isTRUE(all.equal(d, value)) ||                                  ## no change
    (is.null(d) && l == 0L && all(value == c(length(index(x)), 0L))) || ## zero-length vector -> 0-column matrix
    (is.null(d) && l > 0L && all(value == c(l, 1L))) ||                 ## positive-length vector -> 1-column matrix
    (!is.null(d) && d[2L] <= 1L && is.null(value))                      ## 0- or 1-column matrix -> vector
  if(!ok) warning("setting this dimension may lead to an invalid zoo object")
  NextMethod()
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/zoo.R 
 | 
					
	zooreg <- function(data, start = 1, end = numeric(), frequency = 1, 
  deltat = 1, ts.eps = getOption("ts.eps"), order.by = NULL,
  calendar = getOption("zoo.calendar", TRUE))
{
    ## choose frequency/deltat
    if (missing(frequency)) frequency <- 1/deltat
    	else if(missing(deltat)) deltat <- 1/frequency
    if (frequency > 1 && abs(frequency - round(frequency)) < ts.eps)
    	frequency <- round(frequency)
    ## detect if integer index is intended
    intgr <- ((length(start) < 1L) || is.integer(start)) && ((length(end) < 1L) || is.integer(end))
    ## check data and choose default
    if (missing(data) || is.null(data)) data <- NA
    if(!(is.vector(data) || is.factor(data) || is.atomic(data) || is.matrix(data) || is.data.frame(data)))
      stop(paste(dQuote("data"), ": attempt to define invalid zoo object"))
    if(is.matrix(data) || is.data.frame(data)) data <- as.matrix(data)
    ## if no index (i.e., order.by) is specified: behave as ts()
    ## else: behave as zoo()
    if (is.null(order.by)) {
	if(!any(c(is.vector(data), is.factor(data), is.atomic(data), is.matrix(data), is.data.frame(data))))
  	    stop(paste(dQuote("data"), ": attempt to define invalid zoo object"))
	ndata <- NROW(data)        
        ## convenience function
        numORint <- function(x) identical(class(x), "numeric") || identical(class(x), "integer")
        ## choose start/end
        if (length(start) > 1) start <- start[1] + (start[2] - 1)/frequency
        if (length(end) > 1) end <- end[1] + (end[2] - 1)/frequency
        if (missing(end)) {
	  ostart <- start
	  oend <- NULL
	  start <- as.numeric(start)	  
	  end <- start + (ndata - 1)/frequency
	} else if(missing(start)) {
	  ostart <- NULL
	  oend <- end
	  end <- as.numeric(end)
	  start <- end - (ndata - 1)/frequency
	} else{
	  ostart <- start
	  oend <- NULL
	  start <- as.numeric(start)
	  end <- as.numeric(end)
	}
        if (start > end) stop("start cannot be after end")
        ## check whether lengths of data and index match
	# wrong lengths sometimes: order.by <- seq(start, end, by = deltat)
	order.by <- start + seq(0, length.out = ndata) * deltat
	if(isTRUE(all.equal(start * frequency, round(start * frequency), tolerance = ts.eps^2))) {
	  order.by <- floor(frequency * order.by + .0001)/frequency
        }
	
	## support also non-numeric indexes
	if(!is.null(ostart) && !numORint(ostart))
	  order.by <- ostart + (order.by - start)
	if(!is.null(oend) && !numORint(oend))
	  order.by <- oend + (order.by - end)
	
	nobs <- length(order.by)
        ## nobs <- floor((end - start) * frequency + 1.01)
        if (nobs != ndata) {
	  if(is.vector(data)) data <- rep(data, length.out = nobs)
	  else if(is.factor(data)) data <- factor(rep(as.character(data), length.out = nobs), labels = levels(data))
	  else if(is.matrix(data) || is.data.frame(data)) data <- data[rep(1:ndata, length.out = nobs), , drop = FALSE]
        }
 
	## support of calendar index (yearqtr/yearmon) for quarterly/monthly data
	if(calendar && frequency %in% c(4, 12) && numORint(order.by)) {
	  order.by <- if(frequency == 4) as.yearqtr(order.by) else as.yearmon(order.by)
	} else if(intgr) {
	  if(isTRUE(all.equal(order.by, round(order.by), tolerance = ts.eps^2))) order.by <- as.integer(round(order.by))
	}
	
        attr(data, "oclass") <- attr(data, "class")
        attr(data, "index") <- order.by
        attr(data, "frequency") <- frequency
        class(data) <- c("zooreg", "zoo")
        return(data)
    } else {
        return(zoo(data, order.by, frequency))
    }
}
rev.zooreg <- function(x) { z <- as.zooreg(rev(as.zoo(x))); frequency(z) <- frequency(x); z }
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/zooreg.R 
 | 
					
	register_s3_method <- function(pkg, generic, class, fun = NULL) {
  stopifnot(is.character(pkg), length(pkg) == 1L)
  stopifnot(is.character(generic), length(generic) == 1L)
  stopifnot(is.character(class), length(class) == 1L)
  if (is.null(fun)) {
    fun <- get(paste0(generic, ".", class), envir = parent.frame())
  } else {
    stopifnot(is.function(fun))
  }
  if (isNamespaceLoaded(pkg)) {
    registerS3method(generic, class, fun, envir = asNamespace(pkg))
  }
  # Always register hook in case package is later unloaded & reloaded
  setHook(
    packageEvent(pkg, "onLoad"),
    function(...) {
      registerS3method(generic, class, fun, envir = asNamespace(pkg))
    }
  )
}
.onLoad <- function(libname, pkgname) {
  if(getRversion() < "3.6.0") {
    register_s3_method("ggplot2", "autoplot", "zoo")
    register_s3_method("ggplot2", "fortify", "zoo")
    register_s3_method("ggplot2", "scale_type", "yearmon")
    register_s3_method("ggplot2", "scale_type", "yearqtr")
  }
  invisible()
}
.onUnload <- function(libpath) {
  library.dynam.unload("zoo", libpath)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/R/zzz.R 
 | 
					
	
# From:
# http://www.nabble.com/Re%3A-mark-areas-on-time-series-plot-p23112841.html
Lines <- '"Time","IEMP (rand/US$) Index","Distress"
01/08/81,-0.02,0
01/09/81,0.08,0
01/10/81,-0.09,0
01/11/81,0.05,0
01/12/81,0.11,0
01/01/82,0.05,0
01/02/82,-0.04,0
01/03/82,0.07,0
01/04/82,0.07,0
01/05/82,0,0
01/06/82,0.06,0
01/07/82,-0.02,0
01/08/82,0.07,0
01/09/82,-0.11,0
01/10/82,0.04,0
01/11/82,-0.36,0
01/12/82,-0.01,0
01/01/83,-0.21,0
01/02/83,-0.16,1
01/03/83,0.19,1
01/04/83,-0.06,1
01/05/83,0.06,1
01/06/83,0.13,1
01/07/83,-0.01,1
01/08/83,0,1
01/09/83,-0.01,1
01/10/83,0.06,1
01/11/83,0.09,1
01/12/83,0.04,1
01/01/84,0.02,1
01/02/84,-0.01,1
01/03/84,0.03,1
01/04/84,0.03,1
01/05/84,0,1
01/06/84,0.03,1
01/07/84,0.13,1
01/08/84,0.18,1
01/09/84,0.07,1
01/10/84,0.12,1
01/11/84,-0.16,1
01/12/84,0.13,1
01/01/85,-0.06,1
01/02/85,-0.07,1
01/03/85,0.04,1
01/04/85,-0.12,1
01/05/85,-0.01,1
01/06/85,-0.17,1
01/07/85,0.09,1
01/08/85,0.08,1
01/09/85,0.08,1
01/10/85,-0.07,1
01/11/85,-0.01,1
01/12/85,-0.01,1
01/01/86,-0.26,1
01/02/86,-0.11,1
01/03/86,0.02,1
01/04/86,0.01,1
01/05/86,0.04,1
01/06/86,0.12,1
01/07/86,-0.05,0
01/08/86,-0.08,0
01/09/86,-0.17,0
01/10/86,-0.06,0
01/11/86,0,0
01/12/86,0.02,0
01/01/87,-0.16,0
01/02/87,-0.05,0
01/03/87,-0.04,0
01/04/87,0.01,0
01/05/87,0.03,0
01/06/87,0.01,0
01/07/87,0,0
01/08/87,0.02,0
01/09/87,0,0
01/10/87,-0.02,0
01/11/87,0.02,0
01/12/87,0.01,0
01/01/88,0.02,0
01/02/88,0.09,0
01/03/88,0.09,0
01/04/88,0.04,0
01/05/88,0.09,0
01/06/88,-0.05,0
01/07/88,0.11,0
01/08/88,0.08,0
01/09/88,-0.04,0
01/10/88,0.08,0
01/11/88,0.01,0
01/12/88,-0.02,0
01/01/89,0,0
01/02/89,0.07,0
01/03/89,0.06,0
01/04/89,0.03,0
01/05/89,0.09,0
01/06/89,0.02,0
01/07/89,-0.04,0
01/08/89,-0.01,0
01/09/89,0.01,0
01/10/89,-0.01,0
01/11/89,-0.03,0
01/12/89,-0.02,0
01/01/90,-0.02,0
01/02/90,0,0
01/03/90,0.01,0
01/04/90,0.04,0
01/05/90,-0.01,0
01/06/90,0.01,0
01/07/90,-0.02,0
01/08/90,-0.05,0
01/09/90,0,0
01/10/90,-0.01,0
01/11/90,-0.03,0
01/12/90,0.02,0
01/01/91,-0.01,0
01/02/91,-0.03,0
01/03/91,0.04,0
01/04/91,0.05,0
01/05/91,0.01,0
01/06/91,0.04,0
01/07/91,0.01,0
01/08/91,-0.02,0
01/09/91,-0.02,0
01/10/91,-0.02,0
01/11/91,-0.04,0
01/12/91,0.02,0
01/01/92,-0.05,0
01/02/92,-0.01,0
01/03/92,0.01,0
01/04/92,-0.04,0
01/05/92,-0.08,0
01/06/92,0,0
01/07/92,-0.08,0
01/08/92,-0.08,0
01/09/92,0.04,0
01/10/92,0.02,0
01/11/92,0.04,0
01/12/92,0.06,0
01/01/93,0,0
01/02/93,0,0
01/03/93,0.04,0
01/04/93,-0.05,0
01/05/93,0.08,0
01/06/93,0.04,0
01/07/93,0.05,0
01/08/93,0,0
01/09/93,0,0
01/10/93,-0.09,0
01/11/93,-0.03,0
01/12/93,-0.09,0
01/01/94,0.01,0
01/02/94,0.02,0
01/03/94,0.05,0
01/04/94,0.07,0
01/05/94,0.07,0
01/06/94,-0.01,0
01/07/94,-0.03,0
01/08/94,-0.08,0
01/09/94,0.06,0
01/10/94,-0.03,0
01/11/94,0.01,0
01/12/94,0,0
01/01/95,-0.01,0
01/02/95,0.02,0
01/03/95,0,0
01/04/95,0.07,0
01/05/95,-0.03,0
01/06/95,0.02,0
01/07/95,-0.01,0
01/08/95,0,0
01/09/95,0.01,0
01/10/95,-0.02,0
01/11/95,-0.03,0
01/12/95,0,0
01/01/96,-0.01,0
01/02/96,0.05,1
01/03/96,0.08,1
01/04/96,0.16,1
01/05/96,0.1,1
01/06/96,-0.07,1
01/07/96,0.03,1
01/08/96,0.06,1
01/09/96,-0.05,1
01/10/96,0.01,1
01/11/96,0.03,0
01/12/96,0.04,0
01/01/97,-0.07,0
01/02/97,-0.06,0
01/03/97,-0.02,0
01/04/97,-0.02,0
01/05/97,-0.1,0
01/06/97,-0.01,0
01/07/97,0,0
01/08/97,-0.01,0
01/09/97,-0.01,0
01/10/97,-0.01,0
01/11/97,0.04,0
01/12/97,0.01,0
01/01/98,-0.01,0
01/02/98,-0.05,0
01/03/98,-0.04,0
01/04/98,0.01,0
01/05/98,0.1,1
01/06/98,0.2,1
01/07/98,0.25,1
01/08/98,0.1,1
01/09/98,-0.08,0
01/10/98,-0.11,0
01/11/98,-0.07,0
01/12/98,0.01,0
01/01/99,-0.02,0
01/02/99,-0.02,0
01/03/99,-0.02,0
01/04/99,-0.07,0
01/05/99,0.03,0
01/06/99,-0.06,0
01/07/99,-0.07,0
01/08/99,0,0
01/09/99,-0.06,0
01/10/99,-0.02,0
01/11/99,0,0
01/12/99,0,0
01/01/00,-0.07,0
01/02/00,0.03,0
01/03/00,0.02,0
01/04/00,0.04,0
01/05/00,0.09,0
01/06/00,-0.03,0
01/07/00,-0.01,0
01/08/00,0.01,0
01/09/00,0.03,0
01/10/00,0.04,0
01/11/00,0.02,0
01/12/00,0,0
01/01/01,0,0
01/02/01,0.01,0
01/03/01,0.02,0
01/04/01,0.03,0
01/05/01,-0.02,0
01/06/01,-0.04,1
01/07/01,-0.01,1
01/08/01,0.01,1
01/09/01,-0.01,1
01/10/01,0.06,1
01/11/01,0.04,1
01/12/01,0.21,1
01/01/02,0.02,0
01/02/02,-0.01,0
01/03/02,0.07,0
01/04/02,0.01,0
01/05/02,-0.04,0
01/06/02,-0.01,0
01/07/02,-0.01,0
01/08/02,0.07,0
01/09/02,0.04,0
01/10/02,-0.02,0
01/11/02,-0.07,0
01/12/02,-0.05,0
01/01/03,-0.03,0
01/02/03,-0.02,0
01/03/03,-0.03,0
01/04/03,-0.03,0
01/05/03,-0.06,0
01/06/03,-0.05,0
01/07/03,-0.04,0
01/08/03,-0.07,0
01/09/03,-0.09,0
01/10/03,-0.13,0
01/11/03,-0.09,0
01/12/03,0,0
01/01/04,0.06,0
01/02/04,-0.02,0
01/03/04,-0.02,0
01/04/04,-0.04,0
01/05/04,0.04,0
01/06/04,-0.05,0
01/07/04,-0.05,0
01/08/04,-0.02,0
01/09/04,0.01,0
01/10/04,-0.01,0
01/11/04,-0.07,0
01/12/04,-0.04,0
01/01/05,0.02,0
01/02/05,0,0
01/03/05,-0.02,0
01/04/05,-0.02,0
01/05/05,0.01,0
01/06/05,0.06,0
01/07/05,-0.01,0
01/08/05,-0.04,0
01/09/05,-0.01,0
01/10/05,0.03,0
01/11/05,0.02,0
01/12/05,-0.06,0
01/01/06,-0.06,0
01/02/06,-0.01,0
01/03/06,0.02,0
01/04/06,-0.02,0
01/05/06,0.04,0
01/06/06,0.13,0
01/07/06,0.07,0
01/08/06,-0.02,0
01/09/06,0.08,0
01/10/06,0.06,0
01/11/06,-0.05,0
01/12/06,-0.01,0
01/01/07,0.05,0
01/02/07,-0.04,0
01/03/07,0.01,0
01/04/07,-0.02,0
01/05/07,0.01,0
01/06/07,0.06,0
01/07/07,-0.05,0
01/08/07,0.07,0
01/09/07,0,0
01/10/07,-0.01,0
01/11/07,0.02,0
01/12/07,0.02,0
01/01/08,0,0
01/02/08,0.08,0'
library(zoo)
z <- read.zoo(text = Lines, format = "%d/%m/%y", sep = ",",
header = TRUE, col.names = c("", "IEMP", "Distress"))
plot(cbind(z$IEMP, ifelse(z$Distress, z, NA)), col = 1:2, screen = 1, ylab = "IEMP")
legend("bottomright", c("Normal", "Distress"), lty = 1, col = 1:2)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/demo/zoo-overplot.R 
 | 
					
	### R code from vignette source 'zoo-design.Rnw'
###################################################
### code chunk number 1: preliminaries
###################################################
library("zoo")
Sys.setenv(TZ = "GMT")
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/inst/doc/zoo-design.R 
 | 
					
	### R code from vignette source 'zoo-faq.Rnw'
###################################################
### code chunk number 1: preliminaries
###################################################
library("zoo")
Sys.setenv(TZ = "GMT")
suppressWarnings(RNGversion("3.5.0"))
###################################################
### code chunk number 2: duplicates1
###################################################
z <- suppressWarnings(zoo(1:8, c(1, 2, 2, 2, 3, 4, 5, 5)))
z
###################################################
### code chunk number 3: duplicates2
###################################################
aggregate(z, identity, mean)
###################################################
### code chunk number 4: duplicates3
###################################################
aggregate(z, identity, tail, 1)
###################################################
### code chunk number 5: duplicates4
###################################################
time(z) <- na.approx(ifelse(duplicated(time(z)), NA, time(z)), na.rm = FALSE)
###################################################
### code chunk number 6: duplicates5
###################################################
z[!is.na(time(z))]
###################################################
### code chunk number 7: duplicates
###################################################
Lines <- "1|BHARTIARTL|EQ|18:15:05|600|1
2|BHARTIARTL|EQ|18:15:05|600|99
3|GLENMARK|EQ|18:15:05|238.1|5
4|HINDALCO|EQ|18:15:05|43.75|100
5|BHARTIARTL|EQ|18:15:05|600|1
6|BHEL|EQ|18:15:05|1100|11
7|HINDALCO|EQ|18:15:06|43.2|1
8|CHAMBLFERT|EQ|18:15:06|46|10
9|CHAMBLFERT|EQ|18:15:06|46|90
10|BAJAUTOFIN|EQ|18:15:06|80|100"
library("zoo")
library("chron")
tail1 <- function(x) tail(x, 1)
cls <- c("NULL", "NULL", "NULL", "character", "numeric", "numeric")
nms <- c("", "", "", "time", "value", "volume")
z <- read.zoo(text = Lines, aggregate = tail1,
  FUN = times, sep = "|", colClasses = cls, col.names = nms)
z2 <- read.zoo(text = Lines, aggregate = sum,
  FUN = times, sep = "|", colClasses = cls, col.names = nms)
z$volume <- z2$volume
z
###################################################
### code chunk number 8: readsplit
###################################################
Lines <- "Date Stock Price
2000-01-01 IBM 10
2000-01-02 IBM 11
2000-01-01 ORCL 12
2000-01-02 ORCL 13"
stocks <- read.zoo(text = Lines, header = TRUE, split = "Stock")
stocks
###################################################
### code chunk number 9: log-plot
###################################################
z <- zoo(1:100)
plot(z, log = "y", panel = function(..., log) lines(...))
###################################################
### code chunk number 10: plot-axes (eval = FALSE)
###################################################
## set.seed(1)
## z.Date <- as.Date(paste(2003, 02, c(1, 3, 7, 9, 14), sep = "-"))
## z <- zoo(cbind(left = rnorm(5), right = rnorm(5, sd = 0.2)), z.Date)
## 
## plot(z[,1], xlab = "Time", ylab = "")
## opar <- par(usr = c(par("usr")[1:2], range(z[,2])))
## lines(z[,2], lty = 2)
## 
## axis(side = 4)
## legend("bottomright", lty = 1:2, legend = colnames(z), bty="n")
## par(opar)
###################################################
### code chunk number 11: plot-axes1
###################################################
set.seed(1)
z.Date <- as.Date(paste(2003, 02, c(1, 3, 7, 9, 14), sep = "-"))
z <- zoo(cbind(left = rnorm(5), right = rnorm(5, sd = 0.2)), z.Date)
plot(z[,1], xlab = "Time", ylab = "")
opar <- par(usr = c(par("usr")[1:2], range(z[,2])))
lines(z[,2], lty = 2)
axis(side = 4)
legend("bottomright", lty = 1:2, legend = colnames(z), bty="n")
par(opar)
###################################################
### code chunk number 12: factor1
###################################################
DF <- data.frame(time = 1:4, x = 1:4, f = factor(letters[c(1, 1, 2, 2)]))
zx <- zoo(DF$x, DF$time)
zf <- zoo(DF$f, DF$time)
###################################################
### code chunk number 13: factor2
###################################################
DF2 <- data.frame(x = zx, f = zf)
###################################################
### code chunk number 14: factor3
###################################################
z <- zoo(data.matrix(DF[-1]), DF$time)
###################################################
### code chunk number 15: lags
###################################################
z <- zoo(11:15, as.Date("2008-01-01") + c(-4, 1, 2, 3, 6))
zr <- as.zooreg(z)
lag(z)
lag(zr)
diff(log(z))
diff(log(zr))
###################################################
### code chunk number 16: subtract-monthly-means
###################################################
set.seed(123)
z <- zoo(rnorm(100), as.Date("2007-01-01") + seq(0, by = 10, length = 100))
z.demean1 <- z - ave(z, as.yearmon(time(z)))
###################################################
### code chunk number 17: subtract-monthly-means2
###################################################
z.demean2 <- z - ave(z, format(time(z), "%m"))
###################################################
### code chunk number 18: yearmon2
###################################################
as.yearmon2 <- function(x, ...) UseMethod("as.yearmon2")
as.yearmon2.Date <- function(x, ...) {
  y <- as.yearmon(with(as.POSIXlt(x, tz = "GMT"), 1900 + year + mon/12))
  names(y) <- x
  structure(y, class = c("yearmon2", class(y)))
}
###################################################
### code chunk number 19: yearmon2-inverse
###################################################
as.Date.yearmon2 <- function(x, frac = 0, ...) {
  if (!is.null(names(x))) return(as.Date(names(x)))
  x <- unclass(x)
  year <- floor(x + .001)
  month <- floor(12 * (x - year) + 1 + .5 + .001)
  dd.start <- as.Date(paste(year, month, 1, sep = "-")) 
  dd.end <- dd.start + 32 - as.numeric(format(dd.start + 32, "%d"))
  as.Date((1-frac) * as.numeric(dd.start) + frac * as.numeric(dd.end),
    origin = "1970-01-01")
}
###################################################
### code chunk number 20: yearmon2-example
###################################################
dd <- seq(as.Date("2000-01-01"), length = 5, by = 32)
z <- zoo(1:5, as.yearmon2(dd))
z
aggregate(z, as.Date, identity) 
###################################################
### code chunk number 21: single-panel
###################################################
z <- zoo(0:500, as.Date(0:500))
plot(z, xaxt = "n")
tt <- time(z)
m <- unique(as.Date(as.yearmon(tt)))
jan <- format(m, "%m") == "01"
mlab <- substr(months(m[!jan]), 1, 1)
axis(side = 1, at = m[!jan], labels = mlab, tcl = -0.3, cex.axis = 0.7)
axis(side = 1, at = m[jan], labels = format(m[jan], "%y"), tcl = -0.7)
axis(side = 1, at = unique(as.Date(as.yearqtr(tt))), labels = FALSE)
abline(v = m, col = grey(0.8), lty = 2)
###################################################
### code chunk number 22: multiplesingleplot
###################################################
z3 <- cbind(z1 = z, z2 = 2*z, z3 = 3*z)
opar <- par(mfrow = c(2, 2))
tt <- time(z)
m <- unique(as.Date(as.yearmon(tt)))
jan <- format(m, "%m") == "01"
mlab <- substr(months(m[!jan]), 1, 1)
for(i in 1:ncol(z3)) {
  plot(z3[,i], xaxt = "n", ylab = colnames(z3)[i], ylim = range(z3))
  axis(side = 1, at = m[!jan], labels = mlab, tcl = -0.3, cex.axis = 0.7)
  axis(side = 1, at = m[jan], labels = format(m[jan], "%y"), tcl = -0.7)
  axis(side = 1, at = unique(as.Date(as.yearqtr(tt))), labels = FALSE)
}
par(opar)
###################################################
### code chunk number 23: multipanelplot
###################################################
plot(z3, screen = 1:3, xaxt = "n", nc = 2, ylim = range(z3),
  panel = function(...) {
    lines(...)
    panel.number <- parent.frame()$panel.number
    nser <- parent.frame()$nser
    # place axis on bottom panel of each column only
    if (panel.number %% 2 == 0 || panel.number == nser) { 
      tt <- list(...)[[1]]
      m <- unique(as.Date(as.yearmon(tt)))
      jan <- format(m, "%m") == "01"
      mlab <- substr(months(m[!jan]), 1, 1)
      axis(side = 1, at = m[!jan], labels = mlab, tcl = -0.3, cex.axis = 0.7)
      axis(side = 1, at = m[jan], labels = format(m[jan], "%y"), tcl = -0.7)
      axis(side = 1, at = unique(as.Date(as.yearqtr(tt))), labels = FALSE)
    }
})
###################################################
### code chunk number 24: plot-with-na
###################################################
z <- zoo(c(1, NA, 2, NA, 3))
plot(z)
###################################################
### code chunk number 25: plot-with-na1
###################################################
plot(z, type = "p") 
###################################################
### code chunk number 26: plot-with-na2
###################################################
plot(na.omit(z))
###################################################
### code chunk number 27: plot-with-na3
###################################################
plot(na.approx(z))
###################################################
### code chunk number 28: plot-with-na4
###################################################
plot(z, type = "p")
lines(na.omit(z))
###################################################
### code chunk number 29: Rmetrics
###################################################
library("timeDate")
dts <- c("1989-09-28", "2001-01-15", "2004-08-30", "1990-02-09")
tms <- c(  "23:12:55",   "10:34:02",   "08:30:00",   "11:18:23")
td <- timeDate(paste(dts, tms), format = "%Y-%m-%d %H:%M:%S")
library("zoo")
z <- zoo(1:4, td)
zz <- merge(z, lag(z))
plot(zz)
library("timeSeries")
zz
as.timeSeries(zz)
as.zoo(as.timeSeries(zz))
###################################################
### code chunk number 30: Rmetrics-detach
###################################################
detach("package:timeSeries")
detach("package:timeDate")
###################################################
### code chunk number 31: ifelse
###################################################
z <- zoo(c(1, 5, 10, 15))
# wrong !!!
ifelse(diff(z) > 4, -z, z)
# ok
ifelse.zoo(diff(z) > 4, -z, z)
# or if we merge first we can use ordinary ifelse
xm <- merge(z, dif = diff(z))
with(xm, ifelse(dif > 4, -z, z))
# or in this case we could also use orindary ifelse if we 
# use fill = NA to ensure all three have same index
ifelse(diff(z, fill = NA) > 4, -z, z)
###################################################
### code chunk number 32: fillin
###################################################
# April is missing
zym <- zoo(1:5, as.yearmon("2000-01-01") + c(0, 1, 2, 4, 5)/12)
g <- seq(start(zym), end(zym), by = 1/12)
na.locf(zym, xout = g)
###################################################
### code chunk number 33: fillin-2
###################################################
z <- zoo(1:3, as.Date(c("2000-01-15", "2000-03-3", "2000-04-29")))
g <- seq(as.yearmon(start(z)), as.yearmon(end(z)), by = 1/12)
na.locf(z, x = as.yearmon, xout = g)
###################################################
### code chunk number 34: fillin-3
###################################################
Lines <- "Time,Value
2009-10-09 5:00:00,210
2009-10-09 5:05:00,207
2009-10-09 5:17:00,250
2009-10-09 5:30:00,193
2009-10-09 5:41:00,205
2009-10-09 6:00:00,185"
library("chron")
z <- read.zoo(text = Lines, FUN = as.chron, sep = ",", header = TRUE)
g <- seq(start(z), end(z), by = times("00:10:00"))
na.locf(z, xout = g)
###################################################
### code chunk number 35: date
###################################################
z <- zoo(1:2, c("2000-01-01", "2000-01-02"))
aggregate(z, function(x) as.Date(x, origin = "1970-01-01"))
###################################################
### code chunk number 36: date-2
###################################################
aggregate(z, as.Date) 
###################################################
### code chunk number 37: date-3
###################################################
Lines <- "2000-01-01 12:00:00,12
2000-01-02 12:00:00,13"
read.zoo(text = Lines, sep = ",", FUN = function(x) as.Date(x, origin = "1970-01-01"))
###################################################
### code chunk number 38: date-4
###################################################
read.zoo(text = Lines, sep = ",", FUN = as.Date)
###################################################
### code chunk number 39: indexing
###################################################
n <- 50
z <- zoo(1:n, c(1:3, seq(4, by = 2, length = n-3)))
system.time({
	zz <- sapply(seq_along(z), 
		function(i) sum(z[time(z) <= time(z)[i] & time(z) > time(z)[i] - 3]))
	z1 <- zoo(zz, time(z))
})
system.time({
	zc <- coredata(z)
	tt <- time(z)
	zr <- sapply(seq_along(zc), 
		function(i) sum(zc[tt <= tt[i] & tt > tt[i] - 3]))
	z2 <- zoo(zr, tt)
})
identical(z1, z2) 
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/inst/doc/zoo-faq.R 
 | 
					
	### R code from vignette source 'zoo-quickref.Rnw'
###################################################
### code chunk number 1: preliminaries
###################################################
library("zoo")
library("tseries")
online <- FALSE ## if set to FALSE the local copy of
                ## is used instead of get.hist.quote()
options(prompt = "R> ")
Sys.setenv(TZ = "GMT")
suppressWarnings(RNGversion("3.5.0"))
###################################################
### code chunk number 2: read.zoo
###################################################
Sys.setlocale("LC_TIME", "C")
inrusd <- read.zoo("demo1.txt", sep = "|", format="%d %b %Y")
###################################################
### code chunk number 3: read.table
###################################################
tmp <- read.table("demo2.txt", sep = ",")
z <- zoo(tmp[, 3:4], as.Date(as.character(tmp[, 2]), format="%d %b %Y"))
colnames(z) <- c("Nifty", "Junior")
###################################################
### code chunk number 4: extract dates
###################################################
time(z)
###################################################
### code chunk number 5: start and end
###################################################
start(z)
end(inrusd)
###################################################
### code chunk number 6: convert to plain matrix
###################################################
plain <- coredata(z)
str(plain)
###################################################
### code chunk number 7: intersection
###################################################
m <- merge(inrusd, z, all = FALSE)
###################################################
### code chunk number 8: union
###################################################
m <- merge(inrusd, z)
###################################################
### code chunk number 9: merge with lag
###################################################
merge(inrusd, lag(inrusd, -1))
###################################################
### code chunk number 10: plotting1
###################################################
plot(m)
###################################################
### code chunk number 11: plotting2
###################################################
plot(m[, 2:3], plot.type = "single", col = c("red", "blue"), lwd = 2)
###################################################
### code chunk number 12: select range of dates
###################################################
window(z, start = as.Date("2005-02-15"), end = as.Date("2005-02-28"))
###################################################
### code chunk number 13: select one date
###################################################
m[as.Date("2005-03-10")]
###################################################
### code chunk number 14: impute NAs by interpolation
###################################################
interpolated <- na.approx(m)
###################################################
### code chunk number 15: impute NAs by LOCF
###################################################
m <- na.locf(m)
m
###################################################
### code chunk number 16: compute returns
###################################################
prices2returns <- function(x) 100*diff(log(x))
###################################################
### code chunk number 17: column-wise returns
###################################################
r <- prices2returns(m)
###################################################
### code chunk number 18: rolling standard deviations
###################################################
rollapply(r, 10, sd)
###################################################
### code chunk number 19: last day of month
###################################################
prices2returns(aggregate(m, as.yearmon, tail, 1))
###################################################
### code chunk number 20: last day of week
###################################################
nextfri <- function(x) 7 * ceiling(as.numeric(x-5+4) / 7) + as.Date(5-4)
prices2returns(aggregate(na.locf(m), nextfri, tail, 1))
###################################################
### code chunk number 21: four second mark
###################################################
zsec <- structure(1:10, index = structure(c(1234760403.968, 1234760403.969, 
1234760403.969, 1234760405.029, 1234760405.029, 1234760405.03, 
1234760405.03, 1234760405.072, 1234760405.073, 1234760405.073
), class = c("POSIXt", "POSIXct"), tzone = ""), class = "zoo")
to4sec <- function(x) as.POSIXct(4*ceiling(as.numeric(x)/4), origin = "1970-01-01")
aggregate(zsec, to4sec, tail, 1)
###################################################
### code chunk number 22: one second grid
###################################################
# tmp is zsec with time discretized into one second bins
tmp <- zsec
st <- start(tmp)
Epoch <- st - as.numeric(st)
time(tmp) <- as.integer(time(tmp) + 1e-7) + Epoch
# find index of last value in each one second interval
ix <- !duplicated(time(tmp), fromLast = TRUE)
# merge with grid 
merge(tmp[ix], zoo(, seq(start(tmp), end(tmp), "sec")))
# Here is a function which generalizes the above:
intraday.discretise <- function(b, Nsec) {
 st <- start(b)
 time(b) <- Nsec * as.integer(time(b)+1e-7) %/% Nsec + st -
 as.numeric(st)
 ix <- !duplicated(time(b), fromLast = TRUE)
 merge(b[ix], zoo(, seq(start(b), end(b), paste(Nsec, "sec"))))
}
intraday.discretise(zsec, 1)
###################################################
### code chunk number 23: tseries
###################################################
library("tseries")
###################################################
### code chunk number 24: data handling if offline
###################################################
if(online) {
  sunw <- get.hist.quote(instrument = "SUNW", start = "2004-01-01", end = "2004-12-31")
  sunw2 <- get.hist.quote(instrument = "SUNW", start = "2004-01-01", end = "2004-12-31",
    compression = "m", quote = "Close")
  eur.usd <- get.hist.quote(instrument = "EUR/USD", provider = "oanda", start = "2004-01-01", end = "2004-12-31")
  save(sunw, sunw2, eur.usd, file = "sunw.rda")
} else {
  load("sunw.rda")
}
###################################################
### code chunk number 25: get.hist.quote daily series (eval = FALSE)
###################################################
## sunw <- get.hist.quote(instrument = "SUNW", start = "2004-01-01", end = "2004-12-31")
###################################################
### code chunk number 26: get.hist.quote monthly series (eval = FALSE)
###################################################
## sunw2 <- get.hist.quote(instrument = "SUNW", start = "2004-01-01", end = "2004-12-31",
##   compression = "m", quote = "Close")
###################################################
### code chunk number 27: change index to yearmon
###################################################
time(sunw2) <- as.yearmon(time(sunw2))
###################################################
### code chunk number 28: compute same series via aggregate
###################################################
sunw3 <- aggregate(sunw[, "Close"], as.yearmon, tail, 1)
###################################################
### code chunk number 29: compute returns
###################################################
r <- prices2returns(sunw3)
###################################################
### code chunk number 30: get.hist.quote oanda (eval = FALSE)
###################################################
## eur.usd <- get.hist.quote(instrument = "EUR/USD", provider = "oanda", start = "2004-01-01", end = "2004-12-31")
###################################################
### code chunk number 31: is.weekend convenience function
###################################################
is.weekend <- function(x) ((as.numeric(x)-2) %% 7) < 2
###################################################
### code chunk number 32: omit weekends
###################################################
eur.usd <- eur.usd[!is.weekend(time(eur.usd))]
###################################################
### code chunk number 33: is.weekend based on POSIXlt
###################################################
is.weekend <- function(x) {
  x <- as.POSIXlt(x)
  x$wday > 5 | x$wday < 1
}
###################################################
### code chunk number 34: summaries
###################################################
date1 <- seq(as.Date("2001-01-01"), as.Date("2002-12-1"), by = "day")
len1 <- length(date1)
set.seed(1) # to make it reproducible
data1 <- zoo(rnorm(len1), date1)
# quarterly summary
data1q.mean <- aggregate(data1, as.yearqtr, mean)
data1q.sd <- aggregate(data1, as.yearqtr, sd)
head(cbind(mean = data1q.mean, sd = data1q.sd), main = "Quarterly")
# weekly summary - week ends on tuesday
# Given a date find the next Tuesday.
# Based on formula in Prices and Returns section.
nexttue <- function(x) 7 * ceiling(as.numeric(x - 2 + 4)/7) + as.Date(2 - 4)
data1w <- cbind(
       mean = aggregate(data1, nexttue, mean),
       sd = aggregate(data1, nexttue, sd)
)
head(data1w)
### ALTERNATIVE ###
# Create function ag like aggregate but takes vector of
# function names.
FUNs <- c(mean, sd)
ag <- function(z, by, FUNs) {
       f <- function(f) aggregate(z, by, f)
       do.call(cbind, sapply(FUNs, f, simplify = FALSE))
}
data1q <- ag(data1, as.yearqtr, c("mean", "sd"))
data1w <- ag(data1, nexttue, c("mean", "sd"))
head(data1q)
head(data1w)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/inst/doc/zoo-quickref.R 
 | 
					
	### R code from vignette source 'zoo-read.Rnw'
###################################################
### code chunk number 1: preliminaries
###################################################
library("zoo")
library("chron")
Sys.setenv(TZ = "GMT")
###################################################
### code chunk number 2: ex1a
###################################################
Lines <- "
time latitude longitude altitude  distance heartrate
1277648884 0.304048 -0.793819      260  0.000000        94
1277648885 0.304056 -0.793772      262  4.307615        95
1277648894 0.304075 -0.793544      263 25.237911       103
1277648902 0.304064 -0.793387      256 40.042988       115
"
z <- read.zoo(text = Lines, header = TRUE)
z
###################################################
### code chunk number 3: ex2a
###################################################
DF <- structure(list(
  Time = structure(1:5, .Label = c("7:10:03 AM", "7:10:36 AM",
    "7:11:07 AM", "7:11:48 AM", "7:12:25 AM"), class = "factor"),
  Bid = c(6118.5, 6118.5, 6119.5, 6119, 6119),
  Offer = c(6119.5, 6119.5, 6119.5, 6120, 6119.5)),
  .Names = c("Time", "Bid", "Offer"), row.names = c(NA, -5L),
  class = "data.frame")
DF
###################################################
### code chunk number 4: ex2b
###################################################
z <- read.zoo(DF, FUN = function(x)
  times(as.chron(paste("1970-01-01", x), format = "%Y-%m-%d %H:%M:%S %p")))
z
###################################################
### code chunk number 5: ex3
###################################################
Lines <- "
Date;Time;Close
01/09/2009;10:00;56567
01/09/2009;10:05;56463
01/09/2009;10:10;56370
01/09/2009;16:45;55771
01/09/2009;16:50;55823
01/09/2009;16:55;55814
02/09/2009;10:00;55626
02/09/2009;10:05;55723
02/09/2009;10:10;55659
02/09/2009;16:45;55742
02/09/2009;16:50;55717
02/09/2009;16:55;55385
"
f <- function(x) times(paste(x, 0, sep = ":"))
z <- read.zoo(text = Lines, header = TRUE, sep = ";", 
  split = 1, index = 2, FUN = f)
colnames(z) <- sub("X(..).(..).(....)", "\\3-\\2-\\1", colnames(z))
z
###################################################
### code chunk number 6: ex4
###################################################
Lines <- "
Date Time O H L C
1/2/2005 17:05 1.3546 1.3553 1.3546 1.35495
1/2/2005 17:10 1.3553 1.3556 1.3549 1.35525
1/2/2005 17:15 1.3556 1.35565 1.35515 1.3553
1/2/2005 17:25 1.355 1.3556 1.355 1.3555
1/2/2005 17:30 1.3556 1.3564 1.35535 1.3563
"
f <- function(d, t) as.chron(paste(as.Date(chron(d)), t))
z <- read.zoo(text = Lines, header = TRUE, index = 1:2, FUN = f)
z
###################################################
### code chunk number 7: ex5
###################################################
Lines <-
"  views  number  timestamp day            time
1  views  910401 1246192687 Sun 6/28/2009 12:38
2  views  921537 1246278917 Mon 6/29/2009 12:35
3  views  934280 1246365403 Tue 6/30/2009 12:36
4  views  986463 1246888699 Mon  7/6/2009 13:58
5  views  995002 1246970243 Tue  7/7/2009 12:37
6  views 1005211 1247079398 Wed  7/8/2009 18:56
7  views 1011144 1247135553 Thu  7/9/2009 10:32
8  views 1026765 1247308591 Sat 7/11/2009 10:36
9  views 1036856 1247436951 Sun 7/12/2009 22:15
10 views 1040909 1247481564 Mon 7/13/2009 10:39
11 views 1057337 1247568387 Tue 7/14/2009 10:46
12 views 1066999 1247665787 Wed 7/15/2009 13:49
13 views 1077726 1247778752 Thu 7/16/2009 21:12
14 views 1083059 1247845413 Fri 7/17/2009 15:43
15 views 1083059 1247845824 Fri 7/17/2009 18:45
16 views 1089529 1247914194 Sat 7/18/2009 10:49
"
cl <- c("NULL", "numeric", "character")[c(1, 1, 2, 2, 1, 3, 1)]
cn <- c(NA, NA, "views", "number", NA, NA, NA)
z <- read.zoo(text = Lines, skip = 1, col.names = cn, colClasses = cl,
  index = 3, format = "%m/%d/%Y",
  aggregate = function(x) tail(x, 1))
z
###################################################
### code chunk number 8: ex5a
###################################################
(z45 <- z[format(time(z), "%w") %in% 4:5,])
###################################################
### code chunk number 9: ex5b
###################################################
z45[!duplicated(format(time(z45), "%U"), fromLast = TRUE), ]
###################################################
### code chunk number 10: ex5c
###################################################
g <- seq(start(z), end(z), by = "day")
z.filled <- na.locf(z, xout = g)
###################################################
### code chunk number 11: ex5e
###################################################
z.filled[format(time(z.filled), "%w") == "5", ]
###################################################
### code chunk number 12: ex6
###################################################
Lines <- "
Date,Time,Open,High,Low,Close,Up,Down
05.02.2001,00:30,421.20,421.20,421.20,421.20,11,0
05.02.2001,01:30,421.20,421.40,421.20,421.40,7,0
05.02.2001,02:00,421.30,421.30,421.30,421.30,0,5"
###################################################
### code chunk number 13: ex6a
###################################################
f <- function(d, t) chron(d, paste(t, "00", sep = ":"),
  format = c("m.d.y", "h:m:s"))
z <- read.zoo(text = Lines, sep = ",", header = TRUE,
  index = 1:2, FUN  = f)
z
###################################################
### code chunk number 14: ex6b
###################################################
f2 <- function(d, t) as.chron(paste(d, t), format = "%d.%m.%Y %H:%M")
z2 <- read.zoo(text = Lines, sep = ",", header = TRUE, 
  index = 1:2, FUN  = f2)
z2
###################################################
### code chunk number 15: ex6c
###################################################
z3 <- read.zoo(text = Lines, sep = ",", header = TRUE, 
  index = 1:2, tz = "", format = "%d.%m.%Y %H:%M")
z3
###################################################
### code chunk number 16: ex7
###################################################
Lines <- "Date Time V2   V3   V4   V5
2010-10-15 13:43:54 73.8 73.8 73.8 73.8
2010-10-15 13:44:15 73.8 73.8 73.8 73.8
2010-10-15 13:45:51 73.8 73.8 73.8 73.8
2010-10-15 13:46:21 73.8 73.8 73.8 73.8
2010-10-15 13:47:27 73.8 73.8 73.8 73.8
2010-10-15 13:47:54 73.8 73.8 73.8 73.8
2010-10-15 13:49:51 73.7 73.7 73.7 73.7
"
z <- read.zoo(text = Lines, header = TRUE, index = 1:2, tz = "")
z
###################################################
### code chunk number 17: ex8
###################################################
Lines <- "
13/10/2010      A       23
13/10/2010      B       12
13/10/2010      C       124
14/10/2010      A       43
14/10/2010      B       54
14/10/2010      C       65
15/10/2010      A       43
15/10/2010      B       N.A.
15/10/2010      C       65
"
z <- read.zoo(text = Lines, na.strings = "N.A.",
  format = "%d/%m/%Y", split = 2)
z
###################################################
### code chunk number 18: ex9
###################################################
Lines <- '
"","Fish_ID","Date","R2sqrt"
"1",1646,2006-08-18 08:48:59,0
"2",1646,2006-08-18 09:53:20,100
'
z <- read.zoo(text = Lines, header = TRUE, sep = ",",
  colClasses = c("NULL", "NULL", "character", "numeric"),
  FUN = as.chron)
z
z2 <- read.zoo(text = Lines, header = TRUE, sep = ",",
  colClasses = c("NULL", "NULL", "character", "numeric"),
  tz = "")
z2
###################################################
### code chunk number 19: ex10
###################################################
Lines <-
" iteration         Datetime    VIC1    NSW1     SA1    QLD1
1         1 2011-01-01 00:30 5482.09 7670.81 2316.22 5465.13
2         1 2011-01-01 01:00 5178.33 7474.04 2130.30 5218.61
3         1 2011-01-01 01:30 4975.51 7163.73 2042.39 5058.19
4         1 2011-01-01 02:00 5295.36 6850.14 1940.19 4897.96
5         1 2011-01-01 02:30 5042.64 6587.94 1836.19 4749.05
6         1 2011-01-01 03:00 4799.89 6388.51 1786.32 4672.92
"
z <- read.zoo(text = Lines, skip = 1, index = 3:4,
  FUN = paste, FUN2 = as.chron)
z
z2 <- read.zoo(text = Lines, skip = 1, index = 3:4, tz = "")
z2
###################################################
### code chunk number 20: ex11
###################################################
DF <- structure(list(
  Date = structure(c(14609, 14638, 14640, 14666, 14668, 14699,
    14729, 14757, 14759, 14760), class = "Date"),
  A = c(4.9, 5.1, 5, 4.8, 4.7, 5.3, 5.2, 5.4, NA, 4.6),
  B = c(18.4, 17.7, NA, NA, 18.3, 19.4, 19.7, NA, NA, 18.1),
  C = c(32.6, NA, 32.8, NA, 33.7, 32.4, 33.6, NA, 34.5, NA),
  D = c(77, NA, 78.7, NA, 79, 77.8, 79, 81.7, NA, NA)),
  .Names = c("Date", "A", "B", "C", "D"), row.names = c(NA, -10L),
  class = "data.frame")
DF
z <- read.zoo(DF)
na.locf(z)[!duplicated(as.yearmon(time(z)), fromLast = TRUE)]
###################################################
### code chunk number 21: ex12
###################################################
Lines <- "
2009-10-07      0.009378
2009-10-19      0.014790
2009-10-23      -0.005946
2009-10-23      0.009096
2009-11-08      0.004189
2009-11-10      -0.004592
2009-11-17      0.009397
2009-11-24      0.003411
2009-12-02      0.003300
2010-01-15      0.010873
2010-01-20      0.010712
2010-01-20      0.022237
"
z <- read.zoo(text = Lines, aggregate = function(x) tail(x, 1))
z
###################################################
### code chunk number 22: ex13
###################################################
Lines <- "
timestamp,time-step-index,value
2009-11-23 15:58:21,23301,800
2009-11-23 15:58:29,23309,950
"
z <- read.zoo(text = Lines, header = TRUE, sep = ",", tz = "")
z
z2 <- read.zoo(text = Lines, header = TRUE, sep = ",", FUN = as.chron)
z2
###################################################
### code chunk number 23: ex14
###################################################
Lines <- "
Date Time Value
01/23/2000 10:12:15 12.12
01/24/2000 11:10:00 15.00
"
z <- read.zoo(text = Lines, header = TRUE, index = 1:2, FUN = chron)
z
###################################################
### code chunk number 24: ex15
###################################################
Lines <- "
Year   Qtr1  Qtr2  Qtr3  Qtr4   
1992    566   443   329   341   
1993    344   212   133   112   
1994    252   252   199   207
"
za <- read.zoo(text = Lines, header = TRUE)
za
zq <- zooreg(as.vector(t(za)), start = yearqtr(start(za)), freq = 4)
zq
###################################################
### code chunk number 25: further (eval = FALSE)
###################################################
## filenames <- dir(pattern = "csv$") 
## z <- read.zoo(filenames, header = TRUE, sep = ",", fixed = FALSE)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/inst/doc/zoo-read.R 
 | 
					
	### R code from vignette source 'zoo.Rnw'
###################################################
### code chunk number 1: preliminaries
###################################################
library("zoo")
library("tseries")
library("strucchange")
library("timeDate")
online <- FALSE ## if set to FALSE the local copy of MSFT.rda
                ## is used instead of get.hist.quote()
options(prompt = "R> ")
Sys.setenv(TZ = "GMT")
suppressWarnings(RNGversion("3.5.0"))
###################################################
### code chunk number 2: zoo-prelim
###################################################
library("zoo")
set.seed(1071)
###################################################
### code chunk number 3: zoo-vectors1
###################################################
z1.index <- ISOdatetime(2004, rep(1:2,5), sample(28,10), 0, 0, 0)
z1.data <- rnorm(10)
z1 <- zoo(z1.data, z1.index)
###################################################
### code chunk number 4: zoo-vectors2
###################################################
z2.index <- as.POSIXct(paste(2004, rep(1:2, 5), sample(1:28, 10),
  sep = "-"))
z2.data <- sin(2*1:10/pi)
z2 <- zoo(z2.data, z2.index)
###################################################
### code chunk number 5: zoo-matrix
###################################################
Z.index <- as.Date(sample(12450:12500, 10))
Z.data <- matrix(rnorm(30), ncol = 3)
colnames(Z.data) <- c("Aa", "Bb", "Cc")
Z <- zoo(Z.data, Z.index)
###################################################
### code chunk number 6: print1
###################################################
z1
z1[3:7]
###################################################
### code chunk number 7: print2
###################################################
Z
Z[1:3, 2:3]
###################################################
### code chunk number 8: subset
###################################################
z1[ISOdatetime(2004, 1, c(14, 25), 0, 0, 0)]
###################################################
### code chunk number 9: summary
###################################################
summary(z1)
summary(Z)
###################################################
### code chunk number 10: zooreg1
###################################################
zr1 <- zooreg(sin(1:9), start = 2000, frequency = 4)
zr2 <- zoo(sin(1:9), seq(2000, 2002, by = 1/4), 4)
zr1
zr2
###################################################
### code chunk number 11: zooreg2
###################################################
zr1 <- zr1[-c(3, 5)]
zr1
class(zr1)
frequency(zr1)
###################################################
### code chunk number 12: zooreg1b
###################################################
zooreg(1:5, start = as.Date("2005-01-01"))
###################################################
### code chunk number 13: zooreg3
###################################################
is.regular(zr1)
is.regular(zr1, strict = TRUE)
###################################################
### code chunk number 14: zooreg4
###################################################
zr1 <- as.zoo(zr1)
zr1
class(zr1)
is.regular(zr1)
frequency(zr1)
###################################################
### code chunk number 15: zooreg5
###################################################
as.ts(zr1)
identical(zr2, as.zoo(as.ts(zr2)))
###################################################
### code chunk number 16: plot1 (eval = FALSE)
###################################################
## plot(Z)
###################################################
### code chunk number 17: plot2 (eval = FALSE)
###################################################
## plot(Z, plot.type = "single", col = 2:4)
###################################################
### code chunk number 18: plot2-repeat
###################################################
plot(Z, plot.type = "single", col = 2:4)
###################################################
### code chunk number 19: plot1-repeat
###################################################
plot(Z)
###################################################
### code chunk number 20: plot3
###################################################
plot(Z, type = "b", lty = 1:3, pch = list(Aa = 1:5, Bb = 2, Cc = 4),
  col = list(Bb = 2, 4))
###################################################
### code chunk number 21: plot3-repeat (eval = FALSE)
###################################################
## plot(Z, type = "b", lty = 1:3, pch = list(Aa = 1:5, Bb = 2, Cc = 4),
##   col = list(Bb = 2, 4))
###################################################
### code chunk number 22: rbind
###################################################
rbind(z1[5:10], z1[2:3])
###################################################
### code chunk number 23: cbind
###################################################
cbind(z1, z2)
###################################################
### code chunk number 24: merge
###################################################
merge(z1, z2, all = FALSE)
###################################################
### code chunk number 25: merge2
###################################################
merge(z1, pi, 1:10)
###################################################
### code chunk number 26: aggregate
###################################################
firstofmonth <- function(x) as.Date(sub("..$", "01", format(x)))
aggregate(Z, firstofmonth(index(Z)), mean)
aggregate(Z, firstofmonth, head, 1)
###################################################
### code chunk number 27: disaggregate
###################################################
Nile.na <- merge(as.zoo(Nile),
  zoo(, seq(start(Nile)[1], end(Nile)[1], 1/4)))
head(as.zoo(Nile))
head(na.approx(Nile.na))
head(na.locf(Nile.na))
head(na.spline(Nile.na))
###################################################
### code chunk number 28: Ops
###################################################
z1 + z2
z1 < z2
###################################################
### code chunk number 29: cumsum
###################################################
cumsum(Z)
###################################################
### code chunk number 30: coredata
###################################################
coredata(z1)
coredata(z1) <- 1:10
z1
###################################################
### code chunk number 31: index
###################################################
index(z2)
###################################################
### code chunk number 32: index2
###################################################
index(z2) <- index(z1)
z2
###################################################
### code chunk number 33: startend
###################################################
start(z1)
end(z1)
###################################################
### code chunk number 34: window
###################################################
window(Z, start = as.Date("2004-03-01"))
window(Z, index = index(Z)[5:8], end = as.Date("2004-03-01"))
###################################################
### code chunk number 35: window2
###################################################
window(z1, end = as.POSIXct("2004-02-01")) <- 9:5
z1
###################################################
### code chunk number 36: lagdiff
###################################################
lag(z1, k = -1)
merge(z1, lag(z1, k = 1))
diff(z1)
###################################################
### code chunk number 37: coercion
###################################################
as.data.frame(Z)
###################################################
### code chunk number 38: na
###################################################
z1[sample(1:10, 3)] <- NA
z1
na.omit(z1)
na.contiguous(z1)
na.approx(z1)
na.approx(z1, 1:NROW(z1))
na.spline(z1)
na.locf(z1)
###################################################
### code chunk number 39: rollapply
###################################################
rollapply(Z, 5, sd)
rollapply(Z, 5, sd, fill = NA, align = "left")
###################################################
### code chunk number 40: rollmean
###################################################
rollmean(z2, 5, fill = NA)
###################################################
### code chunk number 41: strucchange1
###################################################
library("strucchange")
data("Journals", package = "AER")
Journals$age <- 2000 - Journals$foundingyear
scus <- gefp(log(subs) ~ log(price/citations), order.by = ~ age,
  data = Journals)
###################################################
### code chunk number 42: strucchange2
###################################################
plot(scus)
###################################################
### code chunk number 43: tseries1 (eval = FALSE)
###################################################
## library("tseries")
## MSFT <- get.hist.quote(instrument = "MSFT", start = "2001-01-01",
##   end = "2004-09-30", origin = "1970-01-01", retclass = "ts")
###################################################
### code chunk number 44: tseries1a
###################################################
if(online) {
  MSFT <- get.hist.quote("MSFT", start = "2001-01-01",
  end = "2004-09-30", origin = "1970-01-01", retclass = "ts")
  save(MSFT, file = "MSFT.rda", compress = TRUE)
} else {
  load("MSFT.rda")
}
###################################################
### code chunk number 45: tseries2
###################################################
MSFT <- as.zoo(MSFT)
index(MSFT) <- as.Date(index(MSFT))
MSFT <- na.omit(MSFT)
###################################################
### code chunk number 46: tseries3
###################################################
MSFT <- as.zoo(MSFT)
###################################################
### code chunk number 47: tseries3
###################################################
plot(diff(log(MSFT)))
###################################################
### code chunk number 48: timeDate2
###################################################
library("timeDate")
z2td <- zoo(coredata(z2), timeDate(index(z2), FinCenter = "GMT"))
z2td
###################################################
### code chunk number 49: yearmon1
###################################################
zr3 <- zooreg(rnorm(9), start = as.yearmon(2000), frequency = 12)
zr3
###################################################
### code chunk number 50: yearmon2
###################################################
aggregate(zr3, as.yearqtr, mean)
###################################################
### code chunk number 51: yearmon3
###################################################
as.Date(index(zr3))
as.Date(index(zr3), frac = 1)
###################################################
### code chunk number 52: yearmon4
###################################################
index(zr3) <- as.POSIXct(index(zr3))
as.irts(zr3)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoo/inst/doc/zoo.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
## & Guillaume Wacquet
##
## This file is part of ZooImage
## 
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
## 
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
## 
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## TODO: rework countCells into ZICell and return a ZICell object
##      with predict() method that should replace computeCell() function
## Create a ZICell object that can be used to predict the number of cells per
## colonies in the particles that are analyzed
## A ZICell object is a list of predictive models, one per plankton class
## For the classes, where there is no model defined, the conversion is
## assumed to be one-for-one (one cell in each particle)
#ZICell <- function (formula, data, method = getOption("ZI.mlearning",
#"mlMda"), calc.vars = getOption("ZI.calcVars", calcVars), drop.vars = NULL,
#drop.vars.def = dropVars(), cv.k = 10, cv.strat = TRUE, ...,
#subset, na.action = na.omit)
#{
#TODO: code of ZICell().    
#}
## TODO: adapt from this:
# Logarithmic transformation of FlowCAM parameters
logFeatures <- function (data) {
    ## Parameter:
    ## data: the measurements table.
  
    ## Features based on grey levels are dropped
    ## TODO: also allow this to work with other data than FlowCAM!
    vars <- c("ECD", "FIT_Area_ABD", "FIT_Length", "FIT_Width",
        "FIT_Diameter_ESD", "FIT_Perimeter", "FIT_Convex_Perimeter",
        "FIT_Compactness", "FIT_Elongation", "FIT_Roughness", "FIT_Volume_ABD",
        "FIT_Volume_ESD", "CV", "MeanFDia", "FeretRoundness", "Perim_Ratio",
        "FIT_Aspect_Ratio", "Transp2", "Nb_cells")
    for (i in 1:length(vars)) 
        data[[paste("log", vars[i], sep = ".")]] <- log(data[[vars[i]]])
    data
}
## Compute and save the predictive model (cells counting) from training set
## Avoid using train and traindir and take the risk to get out-of-synch between the
## two... on the other hand, rebuilding a training set takes time => how to speed
## this up?
cellModel <- function (train, traindir, class, method = "mda")
{
    ## Parameters:
    ## train: a ZITrain file
    ## traindir: the directory containing a training set and a _count file
    ## class: the group in training set to process (for the moment, only a terminal folder)
    ## method: the predictive method to use: lm, lda, mda (default).
  
    if (!inherits(train, "ZITrain"))
        stop("'train' does not appear to be a valid training set, or problem when reading the training set")
    
    ## Does 'class' exist in this training set?
    if (length(class) < 1 && !is.character(class))
        stop("'class' must be a single (or multiple) character string(s)")
  
    ## Does 'method' is a valid method?
    if (length(method) != 1 && !is.character(method))
        stop("'method' must be a single character string")
    if (!method %in% c("lm","lda","mda"))
        stop("'method' must be one of 'lm', 'lda' or 'mda'")
  
    ## Is there already a "_cellModels.RData" file?
    models <- NULL
    odir <- setwd(traindir)
    on.exit(setwd(odir))
    modelsFile <- "_cellModels.RData"
    modelsPath <- file.path(traindir, modelsFile)
    if (file.exists(modelsFile))
        models <- readRDS(modelsFile)
  
    cat("Building predictive models...")
    for (cl in class) {
        if (!cl %in% levels(train$Class)) {
            warning("'", cl, "' is not a class of the terminal classes in the training set")
        } else {
            nbCounted <- sum(!is.na(train$Nb_cells[train$Class == cl]))
            if (nbCounted < 1) {
                warning("Nothing counted for '", cl, "'!", sep = "") 
            } else {
                if (cl %in% names(models))
                    warning("A model exists for '", cl, "'. It will be replaced!", sep="")
        
                train2 <- calcVars(train[train$Class == cl & !is.na(train$Nb_cells), ])
                train2 <- logFeatures(train2)
                form <- as.formula(log.Nb_cells ~ log.FIT_Perimeter +
                    log.FIT_Convex_Perimeter + log.FIT_Diameter_ESD +
                    log.FIT_Volume_ESD + log.FIT_Area_ABD + log.FIT_Volume_ABD +
                    log.ECD + log.FeretRoundness + log.FIT_Length +
                    log.FIT_Compactness + log.FIT_Elongation + log.MeanFDia +
                    log.FIT_Roughness + log.Perim_Ratio + log.Transp2 +
                    log.FIT_Width + log.FIT_Aspect_Ratio + log.CV)
      
                if (method == "lm")
                    model <- lm(form, data = train2)
                if (method == "lda")
                    model <- lda(form, data = train2)
                if (method == "mda")
                    model <- mda(form, data = train2, start.method = "kmeans",
                        keep.fitted = TRUE, method = gen.ridge, iter = 10)
       
                models[[cl]] <- model
                saveRDS(models, file = modelsPath)
            }
        }
    }
    cat("Done!")
}
## TODO: calc.vars.cells
## TODO: rework cellCompute() into a predict.ZICell() method
## This should really be the predict() method of a ZICell object!
## Compute and save the number of cells for each particle in a sample
#predict.ZICell <- function (object, newdata, ...) 
#{
## Compute and save the number of cells for each particle in a sample
cellCompute <- function (data, cellModels) 
{
    ## Parameters:
    ## data: sample containing the particles to count.
    ## cellModels: the file containing the models for cells countings.
    ## Preparation of the data
    newdata <- data # To match arguments of the future method
    newdata$Id.1 <- NULL
    newdata$X.Item.1 <- NULL
    newdata$Nb_cells <- 1
      
    object <- readRDS(cellModels) # To be eliminated from the predict() method!
    ## List classes in data for which a predictive model exists
    objClasses <- names(object)
    Classes <- intersect(unique(newdata$Class), objClasses)
    
    if (!length(Classes)) {
        warning("No predictive models for classes in this sample")
    } else {
        ## For each class, compute the number of cells in each particle
        for (cl in Classes) {
            data2 <- calcVars(newdata[newdata$Class == cl &
                !is.na(newdata$Nb_cells), ])
            #print(data2[1:5,])
            data2 <- logFeatures(data2)
            #print(data2[1:5,])
            
            if (class(object[[cl]])[1] == "lm")
                pred <- predict(object[[cl]], newdata = data2)
            if (class(object[[cl]])[1] == "lda")
                pred <- predict(object[[cl]], newdata = data2)$class
            if (class(object[[cl]])[1] == "mda")
                pred <- predict(object[[cl]], newdata = data2)
        
            ## Logarithmic counts --> integer counts
            newdata[which(newdata$Class == cl &
                !is.na(newdata$Nb_cells)), ]$Nb_cells <-
                round(exp(as.numeric(pred)))
        }
    }
    ## Return the modified data
    newdata
}
cellCount <- function (traindir, class, reset = FALSE)
{
    ## Parameters:
    ## traindir: the directory containing a training set
    ## class: the group in training set to process (for the moment, only a terminal folder)
    ## reset: delete all existing counting for the class before proceeding
  
    ## traindir: is it a correct training set directory?
    ## TODO: do we just need to check, or do we need to create the ZITrain object completely?
    cat("Reading training set...")    
    train <- getTrain(traindir)
    if (!inherits(train, "ZITrain"))
        stop("'traindir' does not appear to ba a valid training set, or problem when reading the training set")
    cat(" done!\n")
    ## Does 'class' exist in this training set?
    if (length(class) != 1 && !is.character(class))
        stop("'class' must be a single character string")
    if (!class %in% levels(train$Class))
        stop("'class' must be one of the terminal classes in the training set")
    
    ## Get the path corresponding to this class
    paths <- attr(train, "path")
    path <- paths[basename(paths) == class]
    
    ## TODO: possibly simplify the ZITrain object?
    ## Create the "Nb_cells" column
    train$Nb_cells <- NA
    
    ## Is there already a "_count.RData" file?
    odir <- setwd(traindir)
    on.exit(setwd(odir))
    countFile <- "_count.RData"
    countPath <- file.path(traindir, countFile)
    if (file.exists(countFile)) {
        train2 <- readRDS(countFile)
        ## Add new items from train
        train <- rbind(train2, train[!train$Id %in% train2$Id, ])
        train <- train[order(train$Id), ]
    }
    
    ## Stats about countings
    cat("Number of already counted particles in each class:\n")
    print(table(train$Class[!is.na(train$Nb_cells)]))
    
    ## Possibly reset countings for class
    if (isTRUE(reset)) {
        cat("Warning: resetting countings for class '", class, "'\n", sep = "")
        train$Nb_cells[train$Class == class] <- NA
    }
    
    ## Indicate the number of items to count
    cat("Counting cells in '", class, "'...\n", sep = "")
    nbToCount <- sum(is.na(train$Nb_cells[train$Class == class]))
    if (nbToCount < 1)
        stop("No more items to count for '", class, "'!\n", sep = "")
    if (nbToCount == 1) {
        cat("One vignette left to count...\n", sep = "")
    } else cat(nbToCount, " remaining vignettes to process...\n", sep = "")
    ## Location of items to count
    itemsToCount <- (1:nrow(train))[train$Class == class & is.na(train$Nb_cells)]
    
    for (i in itemsToCount) {
        imgFile <- file.path(path, paste(train$Id[i], "jpg", sep = "."))
        readImg <- readJPEG
        if (!file.exists(imgFile)) {
            imgFile <- file.path(path, paste(train$Id[i], "png", sep = "."))
            readImg <- readPNG
        }
        if (!file.exists(imgFile))
            stop("Vignette not found! (", imgFile, " or .jpg)")
        img <- readImg(imgFile, native = TRUE)
        dev.new()
        dimg <- dim(img)
        h <- dimg[1]
        w <- dimg[2] 
        plot(c(0, w), c(0, h), type = 'n', xlab = "", ylab = "", axes = FALSE)
        rasterImage(img, 0, 0, w, h) 
        title(paste("Class:", class), adj = 0)
        title(paste("\n\n\nVignette:", train$Id[i]), adj = 0, font.main = 1,
            cex.main = .8)
        title(sub = paste0(
            "Particle Length: ",
            toString(signif(train$FIT_Length[i]), digits = 4), " \u00B5m",
            "\nParticle Width: ",
            toString(signif(train$FIT_Width[i]), digits = 4), " \u00B5m",
            "\nParticle ECD: ",
            toString(signif(train$ECD[i]), digits = 4), " \u00B5m\n\n\n"),
            adj = 0, font.sub = 1, cex.sub = .8)
        title(sub = "Click items and right-click when done.\nClose windows to end.",
            adj = 1, font.sub = 2, cex.sub = .8)
          
        nbCells <- locator(10000, type = "p", col = "red", pch = 16, cex = 2)
        dev.off()
        n <- length(nbCells$x)
        if (n < 1) n <- NA
        train$Nb_cells[i] <- n
        
        cat(paste0("Number of cells in ", train$Id[i], ": ", train$Nb_cells[i],
            "\n"))
        saveRDS(train, file = countPath)
    }
    
    ## Indication about what was done
    nbCounted <- sum(!is.na(train$Nb_cells[train$Class == class]))
    if (nbCounted < 1)
        stop("Nothing counted for '", class, "'!\n", sep = "")
    if (nbCounted == 1) {
        cat("One vignette counted for '", class, "'...\n", sep = "")
    } else cat(nbCounted, " vignettes counted for class '", class, "'...\n", sep = "")
    
    ## Statistics for countings for this class
    Counts <- as.character(train$Nb_cells[train$Class == class])
    Counts <- paste0(Counts, "cells")
    print(table(Counts))
    
    ## Call 'cellModel' function to build predictive model
    # To avoid small dimension, within-class singularity problems and build relevant model
    res <- try(cellModel(train = train, traindir = traindir, class = class, method = "mda"),
               silent = TRUE)
    if (inherits(res, "try-error"))
      warning("More counted vignettes are required for class '", class, "' to build model!")
    
    invisible(train)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/ZICell.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Create basically a mlearning object, but with predicted and cvpredicted added
## to it, and the '+other+' level added at the end of all levels
ZIClass <- function (formula, data, method = getOption("ZI.mlearning",
"mlRforest"), calc.vars = getOption("ZI.calcVars", calcVars), drop.vars = NULL,
drop.vars.def = dropVars(), cv.k = 10, cv.strat = TRUE, ...,
subset, na.action = na.omit)
{	
	## Rework calc.vars to freeze the list of variables to drop
	## Default (minimal) calc.vars function if none is provided
	if (!length(calc.vars)) {
		## Create a simple calc.vars function that just drop variables
		calc.vars <- function(x, drop.vars = NULL, drop.vars.def = dropVars()) {
			## Eliminate variables that are not predictors... and use Id as rownames
			Id <- x$Id
			if (length(Id)) rownames(x) <- Id
	
			## Variables to drop
			dropAll <- unique(as.character(c(drop.vars, drop.vars.def)))
			for (dropVar in dropAll) x[[dropVar]] <- NULL
	
			## Return the recalculated data frame
			x
		}
	}
	
	## Freeze data for drop.vars and drop.vars.def arguments of calc.vars
	if (!length(drop.vars)) drop.vars <- character(0) else
		drop.vars <- as.character(drop.vars)
	formals(calc.vars)$drop.vars <- drop.vars
	if (!length(drop.vars.def)) drop.vars.def <- character(0) else
		drop.vars.def <- as.character(drop.vars.def)
	formals(calc.vars)$drop.vars.def <- drop.vars.def
	
	## Check calc.vars and use it on data
	if (!is.function(calc.vars)) {
		stop("'calc.vars' must be a function or NULL")
	} else data <- calc.vars(data)
	
	## Train the machine learning algorithm
	ZI.class <- mlearning(formula, data = data, method = method,
		model.args = list(formula  = formula, data = substitute(data),
		subset = substitute(subset)), call = match.call(), ...,
		subset = subset, na.action = substitute(na.action))
		
	## Add ZIClass as class of the object
	class(ZI.class) <- c("ZIClass", class(ZI.class))
	
	## Save our customized calc.vars function in the object
	attr(ZI.class, "calc.vars") <- calc.vars
	## Get useful attributes from ZITrain
	attr(ZI.class, "traindir") <- attr(data, "traindir")
	attr(ZI.class, "path") <- attr(data, "path")
	## Calculate predictions with full training set
    attr(ZI.class, "predict") <- predict(ZI.class, data, calc = FALSE)
	## Possibly make a k-fold cross-validation and check results
	if (length(cv.k)) {
		attr(ZI.class, "cvpredict") <- cvpredict(ZI.class, type = "class",
			cv.k = cv.k, cv.strat = cv.strat)
		attr(ZI.class, "k") <- cv.k
		attr(ZI.class, "strat") <- cv.strat
	}
	
	ZI.class
}
print.ZIClass <- function (x, ...)
{
	algorithm <- attr(x, "algorithm")
	classes <- attr(x, "response")
	lclasses <- levels(classes)
    predicted <- attr(x, "predict")
	if (is.list(predicted)) predicted <- predicted$class
	k <- attr(x, "k")
	strat <- attr(x, "strat")
	cat("A 'ZIClass' object predicting for", length(lclasses), "classes:\n")
	print(lclasses)
	Confu <- table(classes, predicted)
	SelfConsist <- 100 * (sum(diag(Confu)) / sum(Confu))
	## Change the number of digits to display
	oldDigits <- options(digits = 4)
	on.exit(options(oldDigits))
	cat("\nAlgorithm used:", algorithm, "\n")
	cat("Self-consistency: ", SelfConsist, "%\n", sep = "")
	if (!is.null(k)) {
		if (isTRUE(strat)) msg <- ", stratified" else msg <- ""
    	cat("K-fold cross validation error estimation (k = ", k, msg, "):\n",
			sep = "")
		cvpredicted <- attr(x, "cvpredict")
		if (is.list(cvpredicted)) cvpredicted <- cvpredicted$class
		prior <- table(classes)
		ok <- diag(table(classes, cvpredicted))
		err <- 100 * (1 - (sum(ok) / sum(prior)))
		cat("Error rate: ", err, "%\n", sep = "")
		cat("\nError per class:\n")
		`Error (%)` <- sort(1 - (ok / prior)) * 100
		print(as.data.frame(`Error (%)`))
	}
	invisible(x)
}
summary.ZIClass <- function(object, sort.by = "Fscore", decreasing = TRUE,
na.rm = FALSE, ...)
{
	## Get the confusion object out of a ZIClass object and calc stats from there
	summary(confusion(object, response(object)), sort.by = sort.by, decreasing = decreasing,
		na.rm = na.rm, ...)
}
predict.ZIClass <- function (object, newdata, calc = TRUE, class.only = TRUE,
type = "class", ...)
{
	## Make sure we have correct objects
	if (!inherits(object, "ZIClass"))
		stop("'object' must be a 'ZIClass' object")
	
	if (!missing(newdata)) {
		if (!inherits(newdata, c("ZIDat", "data.frame")))
			stop("'newdata' must be a 'ZIDat' or 'data.frame' object")
		data <- as.data.frame(newdata)
		if (isTRUE(as.logical(calc)))
			data <- attr(object, "calc.vars")(data)
	}
    
	class(object) <- class(object)[-1]
		
	class.only <- isTRUE(as.logical(class.only))
	
	type <- as.character(type)[1]
	
	if (class.only && type != "class") {
		warning("with class.only == TRUE, type can only be 'class' and is force to it")
		type <- "class"
	}
	
	## Perform the prediction
	if (missing(newdata)) {
		res <- predict(object, ...)
	} else res <- predict(object, newdata = data, ...)
	
	## Return either the prediction, or the ZIDat object with Predicted
	## column append/replaced
	if (class.only) res else {
		newdata$Predicted <- res
		newdata
	}
}
confusion.ZIClass <- function (x, y = response(x),
labels = c("Actual", "Predicted"), useNA = "ifany", prior, use.cv = TRUE, ...) {
	## Check labels
	labels <- as.character(labels)
	if (length(labels) != 2)
		stop("You must provide exactly 2 character strings for 'labels'")
	
	## Extract class2: cvpredict or predict from the object
	if (isTRUE(as.logical(use.cv))) {
		class2 <- attr(x, "cvpredict")
		if (is.list(class2)) class2 <- class2$class
		if (is.null(class2))
			stop("No or wrong cross-validated predictions in this ZIClass object")
	} else { # Use predict
		class2 <- attr(x, "predict")
		if (is.list(class2)) class2 <- class2$class
	}
	
	## Check that both variables are of same length and same levels
	if (length(y) != length(class2))
		stop("lengths of 'x' and 'y' are not the same")
	
	## Full list of levels is in (cv)predict in class2...
	## Response in y may have dropped levels! 
	lev1 <- levels(y)
	lev2 <- levels(class2)
	if (!all(lev1  %in% lev2))
		stop("levels of 'x' and 'y' do not match")
	
	## Rework levels in y to make sure they match perfectly thos in class2
	y <- factor(as.character(y), levels = lev2)
	
	## Construct the confusion object
	if (missing(prior)) {
		getNamespace("mlearning")$.confusion(data.frame(class1 = y,
			class2 = class2), labels = labels, useNA = useNA, ...)
	} else {
		getNamespace("mlearning")$.confusion(data.frame(class1 = y,
			class2 = class2), labels = labels, useNA = useNA, prior = prior, ...)
	}
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/ZIClass.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
## 
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
## 
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
## 
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
print.ZIRes <- function (x, ...)
{
	X <- x
	class(X) <- "data.frame"
	print(X)
	## Are there size spectra?
	spectrum <- attr(x, "spectrum")
	if (length(spectrum)) {
		cat("\nWith size spectrum:\n")
		print(spectrum)
	}
	invisible(x)
}
## TODO... with inspirations from histSpectrum() and plotAbdBio()
#plot.ZIRes <- function (x, y, ...)
#{
#	
#}
#histSpectrum <- function (spect, class = 1:18 * 0.3 / 3 + 0.2, lag = 0.25,
#log.scale = TRUE, width = 0.1, xlab = "classes (mm)",
#ylab = if (log.scale) "log(abundance + 1)/m^3" else "Abundance (ind./m^3",
#main = "", ylim = c(0, 2), plot.exp = FALSE)
#{	
#	## Plot of histograms and optionally line for exponential decrease
#	## for size spectra
#	plot.exp <- isTRUE(as.logical(plot.exp))
#	log.scale <- isTRUE(as.logical(log.scale))
#	if (plot.exp) {
#		spect.lm <- lm(spect ~ class)
#		print(summary(spect.lm))
#		slope <- format(coef(spect.lm)[2], digits = 3)
#		main <- paste(main, " (slope = ", slope, ")", sep = "")
#		class2 <- class - lag
#		spect.lm2 <- lm(spect ~ class2)
#		if (log.scale) {
#			spect <- 10^spect - 1
#			expdat <- 10^predict(spect.lm2) - 1
#		}
#	}
#	barplot(spect, width = 0.1, space = 0, xlab = xlab, ylab = ylab,
#		main = main, ylim = ylim)
#	if (plot.exp) {
#		if (log.scale) {
#			abline(coef = coef(spect.lm2), col = 2, lwd = 2)
#		} else {
#			lines(class2, expdat, col = 2, lwd = 2)
#		}
#		return(invisible(spect.lm2))
#	}
#}
#
#plotAbdBio <- function (t, y1, y2, y3, ylim = c(0,3), xlab = "Date",
#ylab = "log(abundance + 1)", main = "", cols = c("green", "blue", "red"),
#pchs = 1:3, hgrid = 1:3, vgrid = t, vline = NULL, xleg = min(vgrid),
#yleg = ylim[2], legend = c("series 1", "series 2", "series 3"), type = "o")
#{	
#	## Custom plot for abundance and biomass
#	plot(t, y1, type = type, ylim = ylim, xlim = range(vgrid), ylab = ylab,
#		xlab = xlab, main = main, col = cols[1], xaxt = "n", pch = pchs[1])
#	axis(1, at = vgrid, labels = format(vgrid, "%b"))
#	lines(t, y2, type = type, col = cols[2], pch = pchs[2])
#	lines(t, y3, type = type, col = cols[3], pch = pchs[3])
#	
#	## Grid
#	abline(h = hgrid, col = "gray", lty = 2)
#	abline(v = vgrid, col = "gray", lty = 2)
#	
#	## Vertical line(s) to spot particular time events
#	if (!is.null(vline))
#		abline(v = as.Date(vline), lty = 2, lwd = 2, col = 2)
#	if (!is.null(xleg))
#		legend(xleg, yleg, legend, col = cols, lwd = 1, pch = pchs,
#			bg = "white")
#}
rbind.ZIRes <- function (..., deparse.level = 1)
{
	## Same as rbind.data.frame, but take care also to combine spectrum attributes
	res <- rbind.data.frame(..., deparse.level = deparse.level)
	
	attr(res, "spectrum") <- do.call("c", lapply(list(...), attr,
		which = "spectrum"))
	res
}
## Calculate abundances, biomasses and size spectra per class in a sample
processSample <- function (x, sample, keep = NULL, detail = NULL, classes = "both",
header = c("Abd", "Bio"), cells = NULL, biomass = NULL, breaks = NULL)
{
	## Fix ECD in case of FIT_VIS data
	if ("FIT_Area_ABD" %in% names(x)) x$ECD <- ecd(x$FIT_Area_ABD)
			
	## Check arguments
	if (missing(sample)) {
		sample <- unique(sampleInfo(x$Label, type = "sample", ext = ""))
		if (length(sample) != 1) {
			warning("'sample' not provided, or 'x' does not contain a single sample 'Label'")
			return(NULL)
		}
	}
	if (!is.character(sample) || length(sample) != 1)
		stop("'sample' must be a single character string")
	
	header <- as.character(header)
	if (length(biomass)) {
		if (length(header) < 2) 
			stop("you must provide headers for abundances and biomasses")
		header <- header[1:2]
	} else {
		if (length(header) < 1)
			stop("You must provide a header for abundances")
		header <- header[1]
	}
	
	if (!length(x$Dil) || !is.numeric(x$Dil)) {
		warning("'Dil' column is missing or not numeric in 'x'")
		return(NULL)
	}
	
	## Do we compute the number of cells and the ECD per cell?
	## TODO: should not rely on a file here (use a predict() method of a ZICell object)!
	if (!is.null(cells) && file.exists(cells)) {
####		## Must be a ZICell model here! predict() iterates on all items
####		## of the list to compute cells for all classes!
####		x$Nb_cells <- predict(cells, x)
		## Fixed by G. Wacquet: x$Nb_cells -> x, because cellCompute() returns the whole df
		x <- cellCompute(x, cells)
    x$ECD_cells <- ecd(x$FIT_Area_ABD, x$Nb_cells)
	}
	
	## Extract only data for a given sample
	allSamples <- unique(sampleInfo(x$Label, type = "sample", ext = ""))
	if (!sample %in% allSamples){
		warning("Sample not found in 'x'")
		return(NULL)
	}
	x <- x[allSamples == sample, ]
	
	## Retrieving classes
	classes <- as.character(classes)[1]
	Cl <- switch(classes,
		Class = x$Class,
		Predicted = x$Predicted,
		both = { # Use Class where it is defined, otherwise, use predicted
			res <- x$Class
			if (is.null(res)) x$Predicted else {
				isMissing <- is.na(res)
				res[isMissing] <- x$Predicted[isMissing]
				res
			}
		},
		x[, classes])
	if (length(Cl) != NROW(x) || !is.factor(Cl)) { # There is a problem retrieving classes!
		warning("problem while retrieving classes (are they defined?)")
		return(NULL)
	} else x$Cl <- Cl
	
	## By default, only keep taxa starting with uppercase
	if (is.null(keep)) {
    keep <- levels(x$Cl)
    keep <- keep[grepl("[A-Z]", keep)]
  }
	
	## Subsample, depending on which classes we keep
	if (length(keep)) {
		keep <- as.character(keep)
		if (!all(keep %in% levels(x$Cl))) {
			warning("one or more 'keep' levels are not found in the classes")
			return(NULL)
		}
		x <- x[x$Cl %in% keep, ] # Select keep levels
	}
	Cl <- as.character(x$Cl)
	if (NROW(x) == 0) {
		warning("no data left for this sample in 'x' when 'keep' is applied")
		return(NULL)
	}
		
	## Data for biomass calculation
	if (length(biomass)) {
		if (inherits(biomass, "data.frame")) { # Parameters vary by class
			## We need Class, P1, P2 and P3, and among groups, we need [other] 
			if (NCOL(biomass) != 4 && !is.factor(biomass[, 1]))
				stop("you must provide a data frame with four columns and first one as factor")
			if (!"[other]" %in% levels(biomass[, 1]))
				stop("you must include '[other]' in the levels of factor for biomass conversion")
			## Make sure the three other columns are numeric
			biomass[, 2] <- as.numeric(biomass[, 2])
			biomass[, 3] <- as.numeric(biomass[, 3])
			biomass[, 4] <- as.numeric(biomass[, 4])
			## Place P1, P2 and P3 according to class in x
			isother <- biomass[, 1] == "[other]"
			defbio <- biomass[isother, 2:4]
			nms <- as.character(biomass[!isother, 1])
			P1bio <- structure(biomass[!isother, 2], names = nms)
			P1 <- P1bio[Cl]
			P1[is.na(P1)] <- defbio[1]
			x$P1 <- as.numeric(P1)
			P2bio <- structure(biomass[!isother, 3], names = nms)
			P2 <- P2bio[Cl]
			P2[is.na(P2)] <- defbio[2]
			x$P2 <- as.numeric(P2)
			P3bio <- structure(biomass[!isother, 4], names = nms)
			P3 <- P3bio[Cl]
			P3[is.na(P3)] <- defbio[3]
			x$P3 <- as.numeric(P3)
			
		} else if (length(biomass) == 3 && is.numeric(biomass)) { # Same parameters for all classes
			x$P1 <- biomass[1]
			x$P2 <- biomass[2]
			x$P3 <- biomass[3]
		} else stop("wrong 'biomass', must be NULL, a vector of 3 values or a data frame with Class, P1, P2 and P3")
		## Prefer using ECD_cells and Nb_cells if it exists
		if (is.numeric(x$ECD_cells)) {
			x$BioWeight <- (x$P1 * x$ECD_cells^x$P3 + x$P2) * x$Dil * x$Nb_cells
		} else {
			if (!is.numeric(x$ECD)) stop("'ECD' required for biomasses")
			x$BioWeight <- (x$P1 * x$ECD^x$P3 + x$P2) * x$Dil
		}
	}
	
	## By default, give detail for all kept classes
  if (is.null(detail)) detail <- keep
	
	## Split among detail, if provided
	if (length(detail)) {
		# We want more details for one ore more groups...
		detail <- as.character(detail)
		## 'total' and 'others' calculated differently!
		detail <- detail[detail != "[total]" & detail != "[other]"]
		
		Cl[!Cl %in% detail] <- "[other]"
		x$Cl <- Cl
		if (any(Cl == "[other]")) {
      sel <- c(detail, "[other]")
    } else sel <- detail
    abdnames <- paste(header[1], c(sel, "[total]"))
    bionames <- paste(header[2], c(sel, "[total]"))
		if (is.numeric(x$Nb_cells)) {
			res <- tapply(x$Dil * x$Nb_cells, Cl, sum, na.rm = TRUE)
			res <- res[sel]
			res <- c(res, '[total]' = sum(x$Dil  * x$Nb_cells, na.rm = TRUE))
		} else {
			res <- tapply(x$Dil, Cl, sum, na.rm = TRUE)
			res <- res[sel]
			res <- c(res, '[total]' = sum(x$Dil, na.rm = TRUE))
		}
		names(res) <- abdnames
		
		if (!missing(biomass)) {
			resbio <- tapply(x$BioWeight, Cl, sum, na.rm = TRUE)
			resbio <- resbio[sel]
			resbio <- c(resbio, '[total]' = sum(x$BioWeight, na.rm = TRUE))
			names(resbio) <- bionames
			res <- c(res, resbio)
		}
		
	} else { # Total abundance (and biomass) only
		if (is.numeric(x$Nb_cells)) {
			res <- sum(x$Dil * x$Nb_cells, na.rm = TRUE)
		} else {
			res <- sum(x$Dil, na.rm = TRUE)
		}
		if (!missing(biomass))
			res <- c(res, sum(x$BioWeight, na.rm = TRUE))
		names(res) <- paste(header, "[total]")
	}
	res[is.na(res)] <- 0
	
	## Make the result a data frame with first column being Id, and make it
	## a ZIRes object inheriting from data frame
	res <- structure(data.frame(Id = sample, t(res), check.names = FALSE),
		class = c("ZI3Res", "ZIRes", "data.frame"))
	
	## Do we calculate size spectra? (always by colonies, only)!
	if (length(breaks)) {
		if (!is.numeric(breaks) || length(breaks) < 2)
			stop("'breaks' must be a vector of two or more numerics or NULL")
		
		if(!is.numeric(x$ECD)) {
			warning("'ECD' required for size spectra")
			return(NULL)
		}
		
		## For each image, calculate size spectra per classes
		tcut <- function (items, data, breaks) {
			data <- data[items, ]
			x <- data$ECD 
			## Cut by class
			res <- tapply(x, data$Cl, function (x, breaks)
				table(cut(x, breaks = breaks)), breaks = breaks)
			## For empty classes, make sure to get zero
			res <- lapply(res, function (x, breaks)
				if (is.null(x)) table(cut(-1, breaks = breaks)) else x,
				breaks = breaks)
			## Turn this into a matrix and multiply by Dil for this image
			do.call("rbind", res) * data$Dil[1]
		}
		
		## Get abundance breaks (ind/vol) by image and by class
		## and sum over all images
		if (length(detail)) {
			x$Cl <- factor(x$Cl, levels = c(detail, "[other]"))
		} else x$Cl <- as.factor(x$Cl)
		spectrum <- Reduce("+", tapply(1:NROW(x), x$Label, tcut,
			data = x, breaks = breaks))
		
		## Place [other] at the end and add [total]
		isother <- rownames(spectrum) == "[other]"
		if (any(isother)) {
			spectrum <- rbind(spectrum[!isother, , drop = FALSE],
				spectrum[isother, , drop = FALSE],
				'[total]' = apply(spectrum, 2, sum))
		} else {
			spectrum <- rbind(spectrum,
			'[total]' = apply(spectrum, 2, sum))	
		}
		
		## Eliminate all lines except total if detail is not provided
		if (!length(detail))
			spectrum <- spectrum[NROW(spectrum), , drop = FALSE]
		## Put this in a 'spectrum' attribute (named list)
		spectrum <- list(spectrum)
		names(spectrum) <- sample
		attr(res, "spectrum") <- spectrum
	}
	
	res
}
processSampleAll <- function (path = ".", zidbfiles, ZIClass = NULL, keep = NULL,
detail = NULL, classes = "both", header = c("Abd", "Bio"), cells = NULL,
biomass = NULL, breaks = NULL)
{
	## First, switch to that directory                                       
	if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- "."	# Indicate we are now in the right path
	
	## Get the list of ZID(B) files to process
	if (missing(zidbfiles) || !length(zidbfiles)) {	# Compute them from path
		zidbfiles <- zidbList(".")
		## If no .zidb files, try .zid files instead
		if (!length(zidbfiles)) zidbfiles <- zidList(".")
	}
	
	## If there is no files to process, exit now
	if (!length(zidbfiles)) {
		warning("there are no ZID(B) files to process in ", getwd())
		return(invisible(FALSE))
	}
			
	## Process samples in the .zidb files
	message("Processing sample statistics for ZIDB files...")
	flush.console()
	nfiles <- length(zidbfiles)
	res <- NULL
	for (i in 1:nfiles) {
		progress(i, nfiles)
		zidbfile <- zidbfiles[i]
		if (hasExtension(zidbfile, "zidb")) {
			dat <- zidbDatRead(zidbfile)
		} else dat <- zidDatRead(zidbfile)
		## Do we predict the classes in the sample?
		if (length(ZIClass)) dat <- predict(ZIClass, dat, class.only = FALSE)
		## Process that one sample and merge with the rest
		res0 <- processSample(dat, keep = keep, detail = detail, 
      classes = classes, header = header, cells = cells, 
      biomass = biomass, breaks = breaks)
    res <- rbind(res, res0)
	}
	progress(101) # Clear progression indicator
	message(" -- Done! --")
	
	res
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/ZIRes.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
## 
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
## 
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
## 
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
template <- function (object, ...)
	UseMethod("template")
	
template.default <- function (object, ...)
	attr(object, "path")
## Get the subpath of vignettes giving their classes
.getPath <- function (x, rootdir = NULL, ext = "jpg", path, classes, ...) {
## Possibly get the classification of the particles
	if (length(classes)) {
		if (inherits(classes, "function")) {
			## Run this function for getting classes
			res <- classes(x, path, ...)
		} else if (inherits(classes, "mlearning")) {
			## Use this object to predict classes
			res <- predict(classes, x, ...)
		} else if (inherits(classes, "character")) {
			## Look for one or two columns with these names
			if (length(classes) > 2) {
				warning("cannot use more than two columns for classes... using first two only")
				classes <- classes[1:2]
			}
			nms <- names(x)
			if (any(!classes %in% nms))
				stop("classes are not existing variable names")
			res <- x[[classes[1]]]
			if (length(classes) == 2) {
				isMissing <- is.na(res)
				res[isMissing] <- x[[classes[2]]][isMissing]
			}
		}
		res <- as.character(res)
		
		## Transform this into a subpath
		subpath <- unique(c("_", path))
		while (!all(path == ".")) {
			path <- unique(dirname(path))
			subpath <- c(subpath, path)
		}
		subpath <- sort(subpath[subpath != "."])
		names(subpath) <- basename(subpath)
		res <- subpath[res]
		
		## Missing data are transformed into '_'
		res[is.na(res)] <- "_"
		names(res) <- NULL
		
	} else res <- rep("_", NROW(x)) # Default to put everything in '_'
			
	if (!length(rootdir)) rootdir <- "."
	res <- file.path(rootdir, res, paste(makeId(x), ext, sep = "."))
	res
}
## Prepare 'dir\subdir' for a manual classification by expanding all vignettes
## from a given number of zidfiles to the '_' subdir, and making
## a template for subdirs
## TODO: verify that template matches classes if classes is not NULL
prepareTrain <- function (traindir, zidbfiles,
template = c("[Basic]", "[Detailed]", "[Very detailed]"), classes = NULL, ...)
{
	## First, check that dirname of traindir is valid
	if (!checkDirExists(dirname(traindir))) return(invisible(FALSE))
	if (!checkEmptyDir(traindir,
		message = 'dir "%s" is not empty. Use AddToTrain() instead!'))
		return(invisible(FALSE))
	## Then, check that all zidfiles or zidbfiles exist
	if (hasExtension(zidbfiles[1], "zidb")) dbext <- "zidb" else dbext <- "zid"
    if (!checkFileExists(zidbfiles, dbext)) return(invisible(FALSE))
    zmax <- length(zidbfiles)
	## Also look for the template
	## If the object has a path template, use it...
	path <- attr(template, "path")
	if (!length(path)) { # Look for a .zic file with classes
		template <- as.character(template)[1]
		rx <- "^[[](.+)[]]$"
		if (grepl(rx, template)) {
			## This should be a template file in the default directory
			template <- paste(sub(rx, "\\1", template), ".zic",
				sep = "")
			template <- file.path(getTemp("ZIetc"), template)
			if (!file.exists(template)) {
				warning("The file '", template, "' is not found")
				return(invisible(FALSE))
			}
		}
		## Check that this is a .zic file
		if (!zicCheck(template)) return(invisible(FALSE))
	
		## Create the other directories
		path <- scan(template, character(), sep = "\n", skip = 2,
			quiet = TRUE)
		if (!length(path)) {
			warning(sprintf("'%s' is empty or corrupted!", template))
			return(invisible(FALSE))	
		}
	}
	## Create '_' subdir
	dir_ <- file.path(traindir, "_")
	if (!forceDirCreate(dir_)) return(invisible(FALSE))
	## Create subdirectories representing classes hierarchy as in template
	message("Making directories...")
	fullpath <- file.path(traindir, path)
	for (i in 1:length(fullpath)) {
		#message(fullpath[i])
		dir.create(fullpath[i], recursive = TRUE)
	}
	## Place the vignettes...
	message("Extracting data and vignettes ...")
	flush.console()
	for (i in 1:zmax) {
		progress(i, zmax)
		if (dbext != "zidb") {
            ## Using a temporary directory to unzip all files and then copy
    		## the RData files to the train directory
    		td <- tempfile()
    		unzip(zipfile = zidbfiles[i], exdir = td)
    		datafiles <- file.path(td, list.files(td,
    			pattern = extensionPattern(".RData"), recursive = TRUE))
			if (length(datafiles)) file.copy(datafiles, traindir)
			## Get path for the vignettes and copy them there
			zidat <- zidDatRead(zidbfiles[i])
			vigpath <- .getPath(zidat, rootdir = traindir, ext = "jpg",
				path = path, classes = classes, ...)
    		names(vigpath) <- basename(vigpath)
			vignettes <- file.path(td, list.files(td,
    			pattern = extensionPattern(".jpg"), recursive = TRUE))
			if (length(vignettes)) {
				vigpath <- vigpath[basename(vignettes)]
				isMissing <- is.na(vigpath)
				vigpath[isMissing] <- file.path(dir_,
					basename(vignettes))[isMissing]
				file.copy(vignettes, vigpath)
			} else warning("no vignettes found for ", zidbfiles[i])
    		unlink(td, recursive = TRUE)
		} else {  # Use .zidb files
            ## Link .zidb database to R objects in memory
            Zidb <- zidbLink(zidbfiles[i])
            AllItems <- ls(Zidb)
            Vigns <- AllItems[!grepl("_dat1", AllItems)]
            ## Extract all vignettes in their class subdirectory
            imgext <- Zidb[[".ImageType"]]
			## Get path for the vignettes and copy them there
			zidat <- zidbDatRead(zidbfiles[i])
			vigpath <- .getPath(zidat, rootdir = traindir, ext = imgext,
				path = path, classes = classes, ...)
    		names(vigpath) <- sub(paste("\\.", imgext, "$", sep = ""), "",
				basename(vigpath))
			if (length(Vigns)) {
				vigpath <- vigpath[Vigns]
				for (j in 1:length(Vigns)) {
				    vigfile <- vigpath[i]
					if (is.na(vigfile)) vigfile <- file.path(dir_,
						paste(Vigns[i], imgext, sep = "."))
					writeBin(Zidb[[Vigns[j]]], vigpath[j])
				}
			} else warning("no vignettes found for ", zidbfiles[i])
            ## Save vignettes
            ZI.sample <- Zidb$.Data
            save(ZI.sample, file = file.path(traindir, paste(sub(".zidb", "",
				basename(zidbfiles[i])), "_dat1.RData", sep = "")))
		}
	}
	progress(101) # Clear progression indicator
	message(" -- Done! --")
	invisible(TRUE)
}
## TODO: apply selection for active learning by partial validation
prepareTest <- function (testdir, zidbfiles, template, classes = NULL, ...)
{
	if (!is.null(attr(template, "path"))) template <- attr(template, "path")
	tpl <- structure(1, path = template)
	res <- prepareTrain(testdir, zidbfiles = zidbfiles,
		template = tpl, classes = classes, ...)
	## Add a .zic file there to make sure to respect training set classes
	cat("ZI3\n[path]\n", paste(template, collapse = "\n"), "\n", sep = "",
		file = file.path(testdir, "_template.zic"))
	
	invisible(res)
}
## Function to add new vignettes in a training set
addToTrain <- function (traindir, zidbfiles, classes = NULL, ...)
{
	## Check if selected zid(b) files are already classified in the training set
	Rdata <- list.files(traindir, pattern = "[.]RData$")
	RdataNew <- paste0(sub("[.]zidb?$", "", basename(zidbfiles)), "_dat1.RData")
	NewZidb <- !RdataNew %in% Rdata
	
	if (!any(NewZidb)) { # All zidbs are already in the training set
		warning("All selected ZID(B) files already in the training set")
		return(invisible(FALSE))
	} else { # Keep only new zid(b) files
		zidbfiles <- zidbfiles[NewZidb]
		warning("You have selected ", length(zidbfiles), " new ZID(B) files.\n",
			"The others files are already included in the training set")
	}
	
	## Extract vignettes to a new subdir in '_' and .RData to parent directory
	NewDir <- "_/_NewVignettes1"
	## Check if the new directory name already exists
	if (file.exists(file.path(traindir, NewDir))) {
		DirLst <- dir(file.path(traindir, "_"), pattern = "_NewVignettes")
		NewDir <- paste("_/_NewVignettes", (length(DirLst) + 1), sep = "")
	}
	
	## Check if NewDir exist
	ToPath <- file.path(traindir, NewDir)
	if (!file.exists(ToPath))
		if (!forceDirCreate(ToPath)) return(invisible(FALSE))
	
	## Extract RData in the root directory
	zmax <- length(zidbfiles)
	message("Adding data and vignettes to the training set...\n")
	for (i in 1:zmax) {
		progress(i, zmax)
		## treatment depends if it is a .zid or .zidb file
		zidbfile <- zidbfiles[i]
		if (grepl("[.]zidb$", zidbfile)) { # .zidb file
			## Link .zidb database to R objects in memory
            Zidb <- zidbLink(zidbfile)
            AllItems <- ls(Zidb)
            Vigns <- AllItems[!grepl("_dat1", AllItems)]
            ## Copy all vignettes in the TopPath directory
            imgext <- Zidb[[".ImageType"]]
			## Get path for the vignettes and copy them there
			zidat <- zidbDatRead(zidbfile)
			vigpath <- .getPath(zidat, rootdir = traindir, ext = imgext,
				path = attr(zidat, "path"), classes = classes, ...)
    		vigpath[vigpath == "_"] <- ToPath
			names(vigpath) <- sub(paste("\\.", imgext, "$", sep = ""), "",
				basename(vigpath))
			if (length(Vigns)) {
				vigpath <- vigpath[Vigns]
				for (j in 1:length(Vigns)) {
				    vigfile <- vigpath[i]
					if (is.na(vigfile)) vigfile <- file.path(ToPath,
						paste(Vigns[i], imgext, sep = "."))
					writeBin(Zidb[[Vigns[j]]], vigpath[j])
				}
			} else warning("no vignettes found for ", zidbfile)
            ## Save RData file
            ZI.sample <- Zidb$.Data
            save(ZI.sample, file = file.path(traindir, paste(sub(".zidb", "",
				basename(zidbfile)), "_dat1.RData", sep = "")))
		} else { # .zid file
			## Using a temporary directory to unzip all files and then copy
			## the RData files to the train directory
			td <- tempfile()
			unzip(zipfile = zidbfile, exdir = td)
			datafiles <- file.path(td, list.files(td,
				pattern = extensionPattern(".RData"), recursive = TRUE))
			if (length(datafiles))
				file.copy(datafiles, file.path(traindir, basename(datafiles)))
			## Get path for the vignettes and copy them there
			zidat <- zidDatRead(zidbfile)
			vigpath <- .getPath(zidat, rootdir = traindir, ext = "jpg",
				path = attr(zidat, "path"), classes = classes, ...)
    		vigpath[vigpath == "_"] <- ToPath
			names(vigpath) <- basename(vigpath)
			vignettes <- file.path(td, list.files(td,
    			pattern = extensionPattern(".jpg"), recursive = TRUE))
			if (length(vignettes)) {
				vigpath <- vigpath[basename(vignettes)]
				isMissing <- is.na(vigpath)
				vigpath[isMissing] <- file.path(ToPath,
					basename(vignettes))[isMissing]
				file.copy(vignettes, vigpath)
			} else warning("no vignettes found for ", zidbfile)
			unlink(td, recursive = TRUE)	
		}
	}
	progress(101) # Clear progression indicator
	message("-- Done --\n")
	invisible(TRUE)
}
addToTest <- function (testdir, zidbfiles, classes = NULL, ...)
	invisible(addToTrain(traindir = testdir, zidbfiles = zidbfiles,
		classes = classes, ...))
## Retrieve information from a manual training set in a 'ZITrain' object	
## TODO: check dir names are unique, check no duplicated vignettes,
##       check all measurements are there, ... + exhaustive report!
getTrain <- function (traindir, creator = NULL, desc = NULL, keep_ = FALSE,
na.rm = FALSE)
{
	## 'traindir' must be the base directory of the manual classification
	if (!checkDirExists(traindir)) return(invisible(FALSE))
	## Make sure we have .RData files in this traindir (otherwise it is
	## perhaps not a training set root dir!
	Dats <- list.files(traindir, pattern = "_dat1[.]RData$", full.names = TRUE)
	if (!length(Dats)) {
		warning("'traindir' does not appear to be a ", getTemp("ZIname"),
			" training set root dir!")
		return(invisible(FALSE))
	}
	## List the .jpg or .png files (recursively) in the dir
	res <- jpgList(traindir, recursive = TRUE)
	if (!length(res)) res <- pngList(traindir, recursive = TRUE)
	## Check the result...
	if (!length(res)) {
		warning("no PNG or JPEG vignettes found in this tree")
		return(invisible(FALSE))
	}
	## Replace "\\" by "/"
	res <- gsub("[\\]", "/", res)
	## Do we eliminate the '_' directory?
	if (!is.na(keep_) && !isTRUE(as.logical(keep_)))
		res <- grep("^[^_]", res, value = TRUE)
	## 'Id' is the name of the vignettes, minus the extension
	Id <- noExtension(res)
	## 'Path' is the directory path
	Path <- dirname(res)
	## 'Class' is the last directory where the vignettes are located
	Class <- basename(Path)
	
	## For all items in _ or one of its subdirectories, replace Class by NA
	if (is.na(keep_)) Class[grepl("^[_]", res)] <- NA
	## Create a  data frame with Id and Class
	df <- data.frame(Id = Id, Class = Class)
	df$Id <- as.character(df$Id)
	nitems <- nrow(df)
	## Read in all the .RData files from the root directory and merge them
	## Get measurement infos
	ZI.sample <- NULL
	load(Dats[1])
	Dat <- ZI.sample
	Classes <- class(Dat)
	
	## Modif Kev to free memory
	Dat <- cbind(Id = makeId(Dat), Dat)
	Dat <- merge(Dat, df, by = "Id")
	if (length(Dats) > 1) {
		for (i in 2:length(Dats)) {
			load(Dats[i])
			ZI.sample <- cbind(Id = makeId(ZI.sample), ZI.sample)
			ZI.sample <- merge(ZI.sample, df, by = "Id")
			Dat <- merge(Dat, ZI.sample, all = TRUE)
			Dat$X.Item.1 <- NULL
		}
	}
	rownames(Dat) <- 1:nrow(Dat)
	## Rename Dat in df
	df <- Dat
	## Fix ECD in case of FIT_VIS data
	if ("FIT_Area_ABD" %in% names(df)) df$ECD <- ecd(df$FIT_Area_ABD)
	## Problem if there is no remaining row in the data frame
	if (nrow(df) == 0) {
		warning("No valid item found (no vignettes with valid measurement data)")
		return(invisible(FALSE))
	}
	## Check that all items have associated measurements
	if (nrow(df) < nitems)
		warning(nitems - nrow(df),
			" vignettes without measurement data are eliminated (",
			nrow(df), " items remain in the object)")
	if (any(is.na(df)))
		if (isTRUE(as.logical(na.rm))) {
  	  		message("NAs found in the table of measurements and deleted")
  	  		df <- na.omit(df)
		} else message("NAs found in the table of measurements and left there")
	
	## Add attributes
	attr(df, "traindir") <- dir
	attr(df, "path") <- unique(Path)
	if (length(creator)) attr(df, "creator") <- creator
	if (length(desc)) attr(df, "desc") <- desc
	Classes <- c("ZI3Train", "ZITrain", Classes)
	class(df) <- Classes
	
	## Be sure that variables are numeric (sometimes, wrong importation)
#	as.numeric.Vars <- function (ZIDat, numvars) {
#	    if (is.null(numvars)) # Default values
#	        numvars <- c("ECD",
#	            "FIT_Area_ABD", "FIT_Diameter_ABD", "FIT_Volume_ABD",
#				"FIT_Diameter_ESD", "FIT_Volume_ESD", "FIT_Length", "FIT_Width",
#				"FIT_Aspect_Ratio", "FIT_Transparency", "FIT_Intensity",
#				"FIT_Sigma_Intensity", "FIT_Sum_Intensity", "FIT_Compactness",
#				"FIT_Elongation", "FIT_Perimeter", "FIT_Convex_Perimeter",
#				"FIT_Roughness", "FIT_Feret_Max_Angle", "FIT_PPC", "FIT_Ch1_Peak",
#				"FIT_Ch1_TOF", "FIT_Ch2_Peak", "FIT_Ch2_TOF", "FIT_Ch3_Peak",
#				"FIT_Ch3_TOF", "FIT_Avg_Red", "FIT_Avg_Green", "FIT_Avg_Blue",
#				"FIT_Red_Green_Ratio", "FIT_Blue_Green", "FIT_Red_Blue_Ratio",
#				"FIT_CaptureX", "FIT_CaptureY", "FIT_SaveX", "FIT_SaveY",
#				"FIT_PixelW", "FIT_PixelH", "FIT_Cal_Const",
#	            "Area", "Mean", "StdDev", "Mode", "Min", "Max", "X", "Y", "XM",
#	            "YM", "Perim.", "BX", "BY", "Width", "Height", "Major", "Minor",
#				"Angle", "Circ.", "Feret", "IntDen", "Median", "Skew", "Kurt",
#				"XStart", "YStart", "Dil")
#
#	    ## Make sure numvars are numeric
#		Names <- names(ZIDat)
#	    for (numvar in numvars) {
#	        if (numvar %in% Names && !is.numeric(ZIDat[, numvar]))
#	            ZIDat[, numvar] <- as.numeric(ZIDat[, numvar])
#	    }
#	    ZIDat
#	}
#	as.numeric.Vars(df, numvars = numvars)
	df
}
getTest <- function (testdir, creator = NULL, desc = NULL, keep_ = NA,
na.rm = FALSE)
{
	## Same as getTrain() but returns a ZITest object... and read _template.zic
	## to make sure that path and classes do match!
	zicfile <- file.path(testdir, "_template.zic")
	if (!file.exists(zicfile) || !zicCheck(zicfile))
		stop("testdir does not seem to contain a valid test set (may be use getTrain()?)")
	
	res <- getTrain(traindir = testdir, creator = creator, desc = desc,
		keep_ = keep_, na.rm = na.rm)
	class(res) <- c("ZI3Test", "ZITest", class(res)[-(1:2)])
	
	## Read the _template.zic file and change res$Class factors and path accordingly
	path <- scan(zicfile, character(), sep = "\n", skip = 2, quiet = TRUE)
	if (!length(path))
		stop(sprintf("'%s' is empty or corrupted!", zicfile))
	attr(res, "path") <- path
	
	## Now, make sure to recode res$Class factor in the correct order!
	lev <- sort(basename(path))
	res$Class <- factor(as.character(res$Class), levels = lev, exclude = NULL)
	invisible(res)
}
.recodeLevels <- function (object, depth = 1)
{
	if (!inherits(object, "ZITrain"))
		stop("'ZITrain' must be a 'ZITrain' object")
	
	depth <- as.integer(depth)[1]
	
	## Get the "path" attribute
	path <- attr(object, "path")
	
	## Split strings on "/"
	path <- strsplit(path, "/", fixed = TRUE)
	
	## Functions to get last item, or an item at a given level
	level <- function (x, depth = 1)
		ifelse(length(x) >= depth, x[depth], x[length(x)])
	
	## Return a list with new levels
	sapply(path, level, depth = depth)
}
recode <- function (object, ...)
	UseMethod("recode")
recode.ZITrain <- function (object, new.levels, depth, ...)
{	
	if (!missing(depth)) {
		if (!missing(new.levels))
			warning("depth is provided, so, new.levels is ignored and recomputed")
		new.levels <- .recodeLevels(object, depth)
		levels <- basename(attr(object, "path"))
		## Check that levels match those defined in the Class variable
		levels2 <- levels(object$Class)
		if (length(levels) != length(levels2))
			stop("length of levels in 'path' attribute must match levels in object$Class")
		if (any(!levels %in% levels2))
			stop("levels in the 'path' attribute do not match levels in object$Class")
	} else {
		## If new.levels is provided, just assume they match levels(object$Class)
		levels <- levels(object$Class)
		if (length(new.levels) != length(levels))
			stop("length of new.levels must match levels in object$Class")
	}
	new.levels <- as.character(new.levels)
	
	## Change levels now
	relevel <- function (x, levels, new.levels) {
		x <- as.character(x)
		for (i in 1:length(levels))
			if (new.levels[i] != levels[i])
				x[x == levels[i]] <- new.levels[i]
		as.factor(x)
	}
	
	object$Class <- relevel(object$Class, levels, new.levels)
	if (!is.null(object$Predicted))
		object$Predicted <- relevel(object$Predicted, levels, new.levels)
	if (!is.null(object$Predicted2))
		object$Predicted2 <- relevel(object$Predicted2, levels, new.levels)
	
	## If a new path is given for these new classes, change it
	path <- attr(new.levels, "path")
	### TODO: check its validity here
	if (!is.null(path)) attr(object, "path") <- path
	object
}
recode.ZITest <- recode.ZITrain
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/ZITrain.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
## 
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
## 
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
## 
## You should have received a copy of the GNU General Public License
## along with ZooImage. If not, see <http://www.gnu.org/licenses/>.
# Drop items in training set
dropItemsToTrain <- function (train, cl, drop.nb)
{
    ## Parameters:
    ## train: the training set
    ## cl: the class to consider
    ## drop.nb: the number of items to drop
    
    train[-sample(which(train$Class == cl), drop.nb), ]
}
# Add validated items (from contextual samples) in training set
addItemsToTrain <- function (train, CtxSmp, add.mode = "SV+NSV", threshold = NA,
dropItemsToTrain = dropItemsToTrain) 
{
    ## Parameters:
    ## train: the training set to complete
    ## CtxSmp: the contextual samples containing validated items
    ## add.mode: the mode for adding items (SV: Validated Suspects,
    ##           NSV: Validated Non-Suspects, SV+NSV: both)
    ## threshold: the maximal number of items in each class of training set 
    ## dropItemsToTrain: the function to drop items in training set (depending on threshold)
  
    ## Is 'add.mode' a valid mode?
    if (length(add.mode) != 1 && !is.character(add.mode))
        stop("'add.mode' must be a character string of length one")
    if (!add.mode %in% c("SV","NSV","SV+NSV"))
        stop("'add.mode' must be one of 'SV', 'NSV' or 'SV+NSV'")
    threshold <- as.integer(threshold)[1]
    if (threshold < 1 & !is.na(threshold))
        stop("threshold must be a positive integer or not assigned")
  
    ## Variables: 
    CtxData <- NULL   ## the contextual data containing validated items
    for (i in 1:length(CtxSmp)) {
        dat <- get(load(CtxSmp[i]))
        if (i == 1) {
            CtxData <- dat
        } else {
            CtxData <- merge(CtxData, dat, all = TRUE)
        }
    }
    CtxData$Id.1 <- NULL
    CtxData$X.Item.1 <- NULL
    ## Delete the items already present in the training set
    CtxData <- CtxData[!(CtxData$Id %in% train$Id), ]
    CtxValidData <- CtxData[which(CtxData$Validated == TRUE), ]
    CtxSVData <- CtxValidData[which(CtxValidData$Suspect == TRUE), ]
    CtxNSVData <- CtxValidData[which(CtxValidData$Suspect == FALSE), ]
  
    ## The classes to complete (only the classes in training set)
    ## Classes <- intersect(unique(train$Class), unique(CtxData$Class))
    ## the classes to complete (classes in both training set and contextual samples)
    Classes <- c(intersect(unique(train$Class), unique(CtxValidData$Class)), 
        setdiff(unique(CtxValidData$Class[!is.na(CtxValidData$Class)]), 
        unique(train$Class)))
    ## Paths in train + new classes
    paths <- c(attr(train, "path"), 
        setdiff(unique(CtxValidData$Class[!is.na(CtxValidData$Class)]), 
        basename(attr(train, "path"))))
    ## items in initial training set (FALSE) or items added from contextual samples (TRUE)
    train$AddedItems <- FALSE
  
    ## For each class in 'Classes'
    for (cl in Classes) {
        nbPresent <- length(which(train$Class == cl))
        nbAddSV <- NULL        # The number of items in SV to add
        nbAddNSV <- NULL       # The number of items in NSV to add
        if (nbPresent <= 1) {
            cat(paste("\n'", cl, "': ", nbPresent, " item in training set", sep = ""))
        } else {
            cat(paste("\n'", cl, "': ", nbPresent, " items in training set", sep = ""))
        }
    
        ## If threshold not reached
        if (nbPresent < threshold | is.na(threshold)) { 
            if (add.mode == "SV") {  
                # SV (Validated Suspects)
                DataSV <- CtxSVData[which(CtxSVData$Class == cl), ]
                if (NROW(DataSV) < 1) {
                    cat("\n   Adding 0 suspect item")
                } else {
                    nbAddSV <- min((threshold-nbPresent), NROW(DataSV), na.rm = TRUE)
                    if (nbAddSV == 1) {
                        cat(paste("\n   Adding", nbAddSV, "suspect item", sep = " "))
                    } else {
                        cat(paste("\n   Adding", nbAddSV, "suspect items", sep = " "))
                    }
                    AddedDataSV <- DataSV[sample(NROW(DataSV), nbAddSV),
                        !(names(CtxValidData) %in% c("Validated","Suspect"))]
                    AddedDataSV$AddedItems <- TRUE
                    train <- merge(train, AddedDataSV, 
                        by = intersect(names(train), names(AddedDataSV)), all = TRUE)
                }
            }
      
            if (add.mode == "NSV") {  
                # NSV (Validated Non-Suspects)
                DataNSV <- CtxNSVData[which(CtxNSVData$Class == cl), ]
                if (NROW(DataNSV) < 1) {
                    cat("\n   Adding 0 non-suspect item")
                } else {
                    nbAddNSV <- min((threshold-nbPresent), NROW(DataNSV), na.rm = TRUE)
                    if (nbAddNSV == 1) {
                        cat(paste("\n   Adding", nbAddNSV, "non-suspect item", sep = " "))
                    } else {
                        cat(paste("\n   Adding", nbAddNSV, "non-suspect items", sep = " "))
                    } 
                    AddedDataNSV <- DataNSV[sample(NROW(DataNSV), nbAddNSV),
                        !(names(CtxValidData) %in% c("Validated","Suspect"))]
                    AddedDataNSV$AddedItems <- TRUE
                    train <- merge(train, AddedDataNSV, 
                         by = intersect(names(train), names(AddedDataNSV)), all = TRUE)
                }
            }
      
            if (add.mode == "SV+NSV") {
                # SV (Validated Suspects)
                DataSV <- CtxSVData[which(CtxSVData$Class == cl), ]
                # NSV (Validated Non-Suspects)
                DataNSV <- CtxNSVData[which(CtxNSVData$Class == cl), ]
                nbAddSV <- min(c(ceiling(((threshold-nbPresent) /
                    (NROW(DataSV)+(NROW(DataNSV)))) * NROW(DataSV)),
                    NROW(DataSV)), na.rm = TRUE)   # ratio
                nbAddNSV <- min((threshold-nbAddSV-nbPresent), NROW(DataNSV), na.rm = TRUE)
        
                if (nbAddSV == 0) {
                    cat("\n   Adding 0 suspect item")
                } else {
                    if (nbAddSV == 1) {
                        cat(paste("\n   Adding", nbAddSV, "suspect item", sep = " "))
                    } else {
                        cat(paste("\n   Adding", nbAddSV, "suspect items", sep = " "))
                    }  
                    AddedDataSV <- DataSV[sample(NROW(DataSV), nbAddSV), 
                        !(names(CtxValidData) %in% c("Validated","Suspect"))]
                    AddedDataSV$AddedItems <- TRUE
                    train <- merge(train, AddedDataSV, 
                        by = intersect(names(train), names(AddedDataSV)), all = TRUE)
                }
        
                if (nbAddNSV == 0) {
                    cat("\n   Adding 0 non-suspect item")
                } else {
                    if (nbAddNSV == 1) {
                        cat(paste("\n   Adding", nbAddNSV, "non-suspect item", sep = " "))
                    } else {
                        cat(paste("\n   Adding", nbAddNSV, "non-suspect items", sep = " "))
                    }  
                    AddedDataNSV <- DataNSV[sample(NROW(DataNSV), nbAddNSV), 
                        !(names(CtxValidData) %in% c("Validated","Suspect"))]
                    AddedDataNSV$AddedItems <- TRUE
                    train <- merge(train, AddedDataNSV, 
                        by = intersect(names(train), names(AddedDataNSV)), all = TRUE)
                }
            }
            cat("\n")
        } else if (nbPresent >= threshold & !is.na(threshold)) { 
            ## If threshold reached or exceeded      
            if (add.mode == "SV") {  
                # SV (Validated Suspects)
                DataSV <- CtxSVData[which(CtxSVData$Class == cl), ]
                drop.nb <- min(nbPresent-threshold+ceiling(5*threshold/100), 
                    nbPresent-threshold+NROW(DataSV))
        
                if (drop.nb == 0) {
                    cat(paste("\n   Dropping", drop.nb, "item", sep = " "))
                } else {
                    if (drop.nb == 1) {
                        cat(paste("\n   Dropping", drop.nb, "item", sep = " "))
                    } else {
                        cat(paste("\n   Dropping", drop.nb, "items", sep = " "))
                    }
                    train <- dropItemsToTrain(train = train, cl = cl, drop.nb = drop.nb)
          
                    if (NROW(DataSV) < 1) {
                        cat("\n   Adding 0 suspect item")
                    } else {
                        nbAddSV <- min(threshold-(nbPresent-drop.nb), NROW(DataSV))
                        if (nbAddSV == 1) {
                            cat(paste("\n   Adding", nbAddSV, "suspect item", sep = " "))
                        } else {
                            cat(paste("\n   Adding", nbAddSV, "suspect items", sep = " "))
                        }
                        AddedDataSV <- DataSV[sample(NROW(DataSV), nbAddSV), 
                            !(names(CtxValidData) %in% c("Validated","Suspect"))]
                        AddedDataSV$AddedItems <- TRUE
                        train <- merge(train, AddedDataSV, 
                            by = intersect(names(train), names(AddedDataSV)), all = TRUE)
                    }
                }
            }
      
            if (add.mode == "NSV") { 
                # NSV (Validated Non-Suspects)
                DataNSV <- CtxNSVData[which(CtxNSVData$Class == cl), ]
                drop.nb <- min(nbPresent-threshold+ceiling(5*threshold/100), 
                       nbPresent-threshold+NROW(DataNSV))
        
                if (drop.nb == 0) {
                    cat(paste("\n   Dropping", drop.nb, "item", sep = " "))
                } else {
                    if (drop.nb == 1) {
                        cat(paste("\n   Dropping", drop.nb, "item", sep = " "))
                    } else {
                        cat(paste("\n   Dropping", drop.nb, "items", sep = " "))
                    }
                    train <- dropItemsToTrain(train = train, cl = cl, drop.nb = drop.nb)
          
                    if (NROW(DataNSV) < 1) {
                        cat("\n   Adding 0 non-suspect item")
                    } else {
                        nbAddNSV <- min(threshold-(nbPresent-drop.nb), NROW(DataNSV))
                        if (nbAddNSV == 1) {
                            cat(paste("\n   Adding", nbAddNSV, "non-suspect item", sep = " "))
                        } else {
                            cat(paste("\n   Adding", nbAddNSV, "non-suspect items", sep = " "))
                        }
                        AddedDataNSV <- DataNSV[sample(NROW(DataNSV), nbAddNSV), 
                            !(names(CtxValidData) %in% c("Validated","Suspect"))]
                        AddedDataNSV$AddedItems <- TRUE
                        train <- merge(train, AddedDataNSV, 
                            by = intersect(names(train), names(AddedDataNSV)), all = TRUE)
                    }
                }
            }
      
            if (add.mode == "SV+NSV") {  
                # SV (Validated Suspects)
                DataSV <- CtxSVData[which(CtxSVData$Class == cl), ]
                # NSV (Validated Non-Suspects)
                DataNSV <- CtxNSVData[which(CtxNSVData$Class == cl), ]
        
                drop.nb <- min(nbPresent-threshold+ceiling(5*threshold/100), 
                    nbPresent-threshold+NROW(DataSV)+NROW(DataNSV))
                nbAddSV <- min(ceiling(5*threshold/100), NROW(DataSV))
                nbAddNSV <- min(threshold-nbAddSV+drop.nb-nbPresent, NROW(DataNSV))
        
                if (drop.nb == 0) {
                    cat(paste("\n   Dropping", drop.nb, "item", sep = " "))
                } else {   
                    if (drop.nb == 1) {
                        cat(paste("\n   Dropping", drop.nb, "item", sep = " "))
                    } else {
                        cat(paste("\n   Dropping", drop.nb, "items", sep = " "))
                    }
                    train <- dropItemsToTrain(train = train, cl = cl, drop.nb = drop.nb)
          
                    if (nbAddSV == 0) {
                        cat("\n   Adding 0 suspect item")
                    } else {
                        if (nbAddSV == 1) {
                            cat(paste("\n   Adding", nbAddSV, "suspect item", sep = " "))
                        } else {
                            cat(paste("\n   Adding", nbAddSV, "suspect items", sep = " "))
                        }
                        AddedDataSV <- DataSV[sample(NROW(DataSV), nbAddSV), 
                            !(names(CtxValidData) %in% c("Validated","Suspect"))]
                        AddedDataSV$AddedItems <- TRUE
                        train <- merge(train, AddedDataSV, 
                            by = intersect(names(train), names(AddedDataSV)), all = TRUE)
                    }
          
                    if (nbAddNSV == 0) {
                        cat("\n   Adding 0 non-suspect item")
                    } else {
                        if (nbAddNSV == 1) {
                            cat(paste("\n   Adding", nbAddNSV, "non-suspect item", sep = " "))
                        } else {
                            cat(paste("\n   Adding", nbAddNSV, "non-suspect items", sep = " "))
                        }
                        AddedDataNSV <- DataNSV[sample(NROW(DataNSV), nbAddNSV), 
                            !(names(CtxValidData) %in% c("Validated","Suspect"))]
                        AddedDataNSV$AddedItems <- TRUE
                        train <- merge(train, AddedDataNSV, 
                            by = intersect(names(train), names(AddedDataNSV)), all = TRUE)
                    }
                }
            }
            cat("\n") 
        }
    }
    train <- train[order(train$Id), ]
    attr(train, "path") <- paths
    train
}
## Function to select contextual samples (WITH GRAPH)
# contextSelection <- function()
# {
#   CtxSmp <- NULL
#   #res <- dlgMessage("Do you want to select contextual samples?", type = "yesno")$res
#   res <- "yes"
#   while (res != "no") {
#     ## Graph for selection of contextual samples ('_valid.RData' files)
#     ctxSmpdir <- dlgDir(title = paste("Select dir containing contextual samples ('*_valid.RData' files)"))$res
#     ctxSmp <- list.files(ctxSmpdir, pattern = "_valid.RData")
#     
#     if(length(ctxSmp) < 1) {
#       warning("No validated files selected in this directory!")
#       return(invisible(NULL))
#     } else {
#       if (length(ctxSmp) > 20) {
#         msg <- paste(length(ctxSmp), "validated samples in this directory!\nPlease pre-select the most relevant samples...")
#         res <- dlgMessage(msg, type = "okcancel")$res
#         if (res == "cancel") return(invisible(NULL))
#         ctxSmp <- dlgList(ctxSmp, multiple = TRUE, title = "Select contextual samples (20 max):")$res
#         if (!length(ctxSmp)) return(invisible(NULL))
#       }
#       nbValid <- NULL
#       nbNotValid <- NULL
#       nbItems <- NULL
#       
#       ## Function to identify the bars selected by user (drawn rect)
#       identifyBar <- function(x, y = NULL, n = length(x), ...) {
#         sel <- rep(FALSE, length(x))
#         res <- NULL
#         while(sum(sel) < n) {
#           ans <- identify(x[!sel], y[!sel], n = 1, plot = FALSE, tolerance = .1)
#           if(!length(ans)) break
#           ans <- which(!sel)[ans]
#           rect(0, graph.y[ans]-.5, nbItems[which(bp == graph.y[ans])], graph.y[ans]+.5, lwd = 3)
#           sel[ans] <- TRUE
#           res <- c(res, ans)
#         }
#         dev.off()
#         res
#       }
#       
#       ## How many validated items in each selected contextual samples?
#       for (i in 1:length(ctxSmp)) {
#         dat <- get(load(paste(ctxSmpdir, ctxSmp[i], sep = "/")))
#         nbItems <- c(nbItems, nrow(dat))
#         nbNotValid <- c(nbNotValid, nrow(dat) - length(which(dat$Validated == TRUE)))
#         nbValid <- c(nbValid, length(which(dat$Validated == TRUE)))
#       }
#       #ctxSmp <- ctxSmp[which(nbValid != 0)]
#       #nbItems <- nbItems[which(nbValid != 0)]
#       #nbNotValid <- nbNotValid[which(nbValid != 0)]
#       #nbValid <- nbValid[which(nbValid != 0)]
#       nb <- rbind(nbValid, nbNotValid)
#       
#       ## Display the numbers of validated items in a graph
#       par(mar=c(4,10,4,4))
#       bp <- barplot(nb, horiz = TRUE, axes = FALSE, col = c("green4", "red"), border = NA, space = .6)
#       axis(2, at = bp, label = sub("_valid.*", "", ctxSmp), las = 2, cex.axis = .6)
#       axis(1, cex.axis = .6)
#       title("Contextual samples selection", adj = 1)
#       title(paste("\n\n\nClick bars and right-click when done."), adj = 1, font.main = 2, cex.main = .6)
#       graph.y <- do.call("c", lapply(1:length(bp), function(x) rep(bp[x], nbItems[x])))
#       graph.x <- do.call("c", lapply(unique(bp), function(x) seq(1, nbItems[which(bp == x)])))
#       legend("topright", legend = c("Validated", "Not validated"), col = c("green4", "red"), cex = .6, lty = 1, lwd = 3)
#       
#       selectedCtxSmp <- identifyBar(x = graph.x, y = graph.y)
#       ctxSmp <- ctxSmp[which(bp %in% unique(graph.y[selectedCtxSmp]))]
#       ctxSmp <- paste(ctxSmpdir, ctxSmp, sep = "/")
#       CtxSmp <- c(CtxSmp, ctxSmp)
#     }    
#     res <- dlgMessage("Do you want to select more contextual samples?", type = "yesno")$res
#   }
#   CtxSmp <- unique(CtxSmp)
#   CtxSmp
# }
## Function to select contextual samples (WITH LIST)
contextSelection <- function()
{
    CtxSmp <- NULL
    res <- "yes"
    while (res != "no") {   
        smpdir <- dlgDir(title = paste("Select dir containing contextual samples ('*_valid.RData' files)"))$res
        smp <- list.files(smpdir, pattern = "_valid.RData")
#       while (length(smp) < 1) {
#           dlgMessage("No validated files in this directory!", type = "ok")
#           smpdir <- dlgDir(title = paste("Select dir containing contextual samples ('*_valid.RData' files)"))$res
#           smp <- list.files(smpdir, pattern = "_valid.RData")
#       }
    
        nbValid <- NULL
        if (length(smp) < 1) {
            dlgMessage("No validated files in this directory!", type = "ok")
        } else {          
            ## How many validated items in each selected contextual samples?
            for (i in 1:length(smp)) {
                dat <- get(load(paste(smpdir, smp[i], sep = "/")))
                nbValid <- c(nbValid, length(which(dat$Validated == TRUE)))
            }
            smp <- smp[which(nbValid != 0)]
            nbValid <- nbValid[which(nbValid != 0)]
    
            if (length(smp) < 1) {
                dlgMessage("No validated files in this directory!", type = "ok")
            } else {
                ctxSmp <- dlgList(paste(sub("_valid.*", "", smp),
                    " (", nbValid, " validated items)", sep = ""),
                    multiple = TRUE, title = "Select contextual samples:")$res
                if (!length(ctxSmp)) return(invisible(NULL))
                ctxSmp <- paste(smpdir, smp[sub("_valid.*", "", smp) %in%
                    sub(" \\(.*", "", ctxSmp)], sep = "/")
                CtxSmp <- c(CtxSmp, ctxSmp)    
            }
        }
        res <- dlgMessage("Do you want to continue the selection?", type = "yesno")$res
    }
    unique(CtxSmp)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/activeLearning.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage. If not, see <http://www.gnu.org/licenses/>.
## TODO: disable next button when in last fraction
## TODO: save temporary and final results in the zidb file
## TODO: correct bugs with the back button
## Load libraries
#library(svMisc) #,  lib.loc = "./Libraries/V3")
#library(svDialogs) #, lib.loc = "./Libraries/V3")
#library(zooimage) #, lib.loc = "./Libraries/V3")
##library(RANN) # Only function: nn2()... apparently not used here!
#library(randomForest)
#library(e1071)
#library(RWeka)
#library(C50)
#library(ipred)
#### Functions not used, but kept for possible future use ######################
## Area under the curve
auc <- function (x, y) {
	if (length(x) != length(y) || length(x) < 2)
		stop("'x' and 'y' must have same length >= 2")
	sum(diff(x) * (y[-1] + y[-length(y)]) / 2)
}
#### Functions NOT used by errorCorrection(), but by examples ##################
## TODO: corrHist() and addPoints() are used together... make it one plot() method?
## TODO: make it S3 object with print, summry and plot methods + predict + ...
## Function to plot size spectrum of particles.
## Can be used for error correction of biomass (perspectives)
corrHist <- function (data, subset, sizeParam, interval) {
	data <- data[subset, sizeParam]
	if (!length(data)) return(0)
	hist(data, breaks = seq(min(data) - (2* interval), max(data) +
		(2 * interval), interval), plot = FALSE)
}
## Add points to an histogram
addPoints <- function (x, ...) {
	if (!inherits(x, "histogram"))
		stop("x must be an object of class 'histogram'")
	points(x = x$mids[x$counts > 0], y = x$counts[x$counts > 0], ...)
}
## Calculation of the residual error based on global error estimation
residError <- function (ErrorEstim, Error, Validated, na.rm = FALSE) {
	nObs <- length(Validated)
	if (sum(Validated) == nObs) return(0) # No error since everybody validated
	nErr <- ErrorEstim * nObs
	nErrVal <- sum(Error & Validated, na.rm = na.rm)
	nErrRes <- nErr - nErrVal
	max(nErrRes / nObs, 0) # To avoid potential negative values
}
## Evolution of residual error
residErrorEvol <- function (ErrorEstimEvol, Error, Validated, Step,
na.rm = FALSE) {
	nIter <- length(ErrorEstimEvol)
	if (nIter == 0) return(numeric(0))
	res <- numeric(nIter)
	for (i in 1:nIter) {
		Valid <- Validated & Step < i  #Step %in% 0:(i - 1)
		res[i] <- residError(ErrorEstim = ErrorEstimEvol[i], Error = Error,
			Validated = Valid, na.rm = na.rm)
	}
	res
}
#### Functions used both by errorCorrection() and by examples ##################
## Compute the Bray-Curtis dissimilarity index on two vectors
## Note that this is faster than vegan::vegdist for two vectors
## and it remains faster for a vector versus a matrix. But fir all pairs,
## use vegan::vegdist(x, method = "bray") instead
## x must be a reference vector, and y can be a vector of same length,
## or a matrix or a data frame with same number of rows
dissimilarity <- function (x, y, na.rm = FALSE) {
	if (is.data.frame(y)) y <- as.matrix(y)
	if (!is.numeric(x) || !is.numeric(y))
		stop("'x' and 'y' must be numeric")
	if (!is.null(dim(x)))
		stop("'x' must be a vector, not a matrix or an array")
	if (is.matrix(y)) {
		if (length(x) != nrow(y))
			stop("'y' must have same rows as the length of 'x'")
				## The matrix calculation version
		colSums(abs(x - y), na.rm = na.rm) /
			colSums(rbind(y, sum(x, na.rm = na.rm)), na.rm  = na.rm)
	} else { # x and y must be vectors of same length
		if (!is.null(dim(y)) || length(x) != length(y))
			stop("'x' and 'y' must be two vectors of same length")
		## The simpler vector version
		sum(abs(x - y), na.rm = na.rm) / sum(x, y, na.rm = na.rm)
	}
}
#### Functions used only by errorCorrection() ##################################
## Calculation of the global error based on validated items
## /!\ NOT based on items randomly selected --> Bad approximation
.globalError <- function (suspect, error, subset, na.rm = FALSE) {
	suspTot <- sum(suspect, na.rm = na.rm)
	trustTot <- sum(!suspect, na.rm = na.rm)
	vSusp <- suspect[subset]
	vTrust <- !suspect[subset]
	susp <- sum(vSusp, na.rm = na.rm)
	trust <- sum(vTrust, na.rm = na.rm)
	pSusp <- susp / suspTot
	pTrust <- trust / trustTot
	wTrust <- pSusp / pTrust
	if (is.na(wTrust) || !is.finite(wTrust)) wTrust <- 1
	vErr <- error[subset]
	err <- sum(vErr[vSusp], na.rm = na.rm) + wTrust * sum(vErr[vTrust],
		na.rm = na.rm)
	tot <- susp + wTrust * trust
	err / tot
}
## Error in each of the four fractions
.errorInFractions <- function (suspect, error, validated, iteration = NA,
	na.rm = FALSE) {
	## The four fractions
	suspValIdx <- suspect & validated
	suspNotValIdx <- suspect & !validated
	trustValIdx <- !suspect & validated
	trustNotValIdx <- !suspect & !validated
	## Error in each fraction
	errSuspValIdx <- error[suspValIdx]
	errSuspNotValIdx <- error[suspNotValIdx]
	errTrustValIdx <- error[trustValIdx]
	errTrustNotValIdx <- error[trustNotValIdx]
	## Number of items in each fraction
	nSuspVal <- sum(suspValIdx, na.rm = na.rm)
	nSuspNotVal <- sum(suspNotValIdx, na.rm = na.rm)
	nSuspTot <- nSuspVal + nSuspNotVal
	nTrustVal <- sum(trustValIdx, na.rm = na.rm)
	nTrustNotVal <- sum(trustNotValIdx, na.rm = na.rm)
	nTrustTot <- nTrustVal + nTrustNotVal
	## Number of error in each fraction
	nErrSuspVal <- sum(errSuspValIdx, na.rm = na.rm)
	nErrSuspNotVal <- sum(errSuspNotValIdx, na.rm = na.rm)
	nErrSuspTot <- nErrSuspVal + nErrSuspNotVal
	nErrTrustVal <- sum(errTrustValIdx, na.rm = na.rm)
	nErrTrustNotVal <- sum(errTrustNotValIdx, na.rm = na.rm)
	nErrTrustTot <- nErrTrustVal + nErrTrustNotVal
	## Number of error in validated fraction if random distribution of the error
	nErrSuspValRd <- nErrSuspTot / nSuspTot * nSuspVal
	nErrTrustValRd <- nErrTrustTot / nTrustTot * nTrustVal
	## Error rate in each fraction
	errSuspVal <- nErrSuspVal / nSuspVal
	errSuspNotVal <- nErrSuspNotVal / nSuspNotVal
	errTrustVal <- nErrTrustVal / nTrustVal
	errTrustNotVal <- nErrTrustNotVal / nTrustNotVal
	## Weight for trusted items
	probaSusp <- nSuspVal / nSuspTot
	probaTrust <- nTrustVal / nTrustTot
	weightTrust <- probaSusp / probaTrust
	## Results: data frame
	if (!all(is.na(iteration))) {
  	## Calculation of error in validated fraction at current iteration
		suspStepIdx <- suspect & iteration
		trustStepIdx <- !suspect & iteration
		errSuspStepIdx <- error[suspStepIdx]
		errTrustStepIdx <- error[trustStepIdx]
		nSuspStep <- sum(suspStepIdx, na.rm = na.rm)
		nTrustStep <- sum(trustStepIdx, na.rm = na.rm)
		nErrSuspStep <- sum(errSuspStepIdx, na.rm = na.rm)
		nErrTrustStep <- sum(errTrustStepIdx, na.rm = na.rm)
		errSuspStep <- nErrSuspStep / nSuspStep
		errTrustStep <- nErrTrustStep / nTrustStep
		res <- data.frame(errSuspVal = errSuspVal, errTrustVal = errTrustVal,
			errSuspNotVal = errSuspNotVal, errTrustNotVal = errTrustNotVal,
			errSuspStep = errSuspStep, errTrustStep = errTrustStep,
			nErrSuspTot = nErrSuspTot, nErrTrustTot = nErrTrustTot,
			nErrSuspVal = nErrSuspVal, nErrTrustVal = nErrTrustVal,
			nErrSuspStep = nErrSuspStep, nErrTrustStep = nErrTrustStep,
			nErrSuspValRd = nErrSuspValRd, nErrTrustValRd = nErrTrustValRd,
			nErrSuspNotVal = nErrSuspNotVal, nErrTrustNotVal = nErrTrustNotVal,
			nSuspTot = nSuspTot, nTrustTot = nTrustTot, nSuspVal = nSuspVal,
			nTrustVal = nTrustVal, nSuspStep = nSuspStep,
			nTrustStep = nTrustStep, nSuspNotVal = nSuspNotVal,
			nTrustNotVal = nTrustNotVal, weightTrust = weightTrust)
	} else {
		## Calculation of error in global sample
		res <- data.frame(errSuspVal = errSuspVal, errTrustVal = errTrustVal,
			errSuspNotVal = errSuspNotVal, errTrustNotVal = errTrustNotVal,
			nErrSuspTot = nErrSuspTot, nErrTrustTot = nErrTrustTot,
			nErrSuspVal = nErrSuspVal, nErrTrustVal = nErrTrustVal,
			nErrSuspValRd = nErrSuspValRd, nErrTrustValRd = nErrTrustValRd,
			nErrSuspNotVal = nErrSuspNotVal, nErrTrustNotVal = nErrTrustNotVal,
			nSuspTot = nSuspTot, nTrustTot = nTrustTot, nSuspVal = nSuspVal,
			nTrustVal = nTrustVal, nSuspNotVal = nSuspNotVal,
			nTrustNotVal = nTrustNotVal, weightTrust = weightTrust)
	}
	res
}
## Compute probabilities that will be used for suspect detection
classProba <- function (data, predicted = data$Predicted, classifier,
diff.max = 0.2, prop.bio = NULL) {
	## Get first and second identification probabilities
	## TODO: only works for randomForest and does not use mlearning!
	if (inherits(classifier, "randomForest")) {
		if (inherits(classifier, "ZIClass")) {
			data <- attr(classifier, "calc.vars")(data)
		}
		class(classifier) <- class(classifier)[-1]
		prob <- predict(classifier, newdata = data, class.only = FALSE,
			type = "membership")
	} else stop("Suspect detection not yet implemented for this algorithm")
	max.ids <- apply(prob, 1, sort, decreasing = TRUE)[1:2, ]
	first <- max.ids[1, ]
	second <- max.ids[2, ]
	## 1) Coefficient quantifying quality of identification
	identCoef <- sqrt(first * pmin((first - second) / (first * diff.max), 1))
	## 2) Probability if identification in that group
	#first
	## 3) Difference between first and second probabilities
	probaDiff <- first - second
	## 4) Proportion of the group (rares groups tend to have more FP!)
	predTable <- table(predicted)
	prop <- predTable / sum(predTable)
	gpProp <- prop[as.character(predicted)]
	# 5) ProbaBio modulation?
	if (is.null(prop.bio)) {
		proba <- data.frame(IdentCoef = identCoef, TreeProp = first,
		ProbaDiff = probaDiff, GpProp = gpProp)
	} else {
		bioFact <- prop.bio[as.character(predicted)]
		proba <- data.frame(IdentCoef = identCoef, TreeProp = first,
		ProbaDiff = probaDiff, GpProp = gpProp, BioFact = bioFact)
	}
	attr(proba, "IdentParam") <- list(DiffMax = diff.max,
		ProbaBio = prop.bio, ProbaTable = prob)
	proba
}
## Compute the proportion of false positives according to Bayes theorem
corrProba <- function (proba, predicted, confusion, nobs) {
	stats <- summary(confusion, type = c("Recall", "Specificity"))
	table.pred <- table(predicted)
	prop <- table.pred / nobs
	recall <- stats$Recall
	specif <- stats$Specificity
	fpprop <- numeric(length(table.pred))
	for (name in names(table.pred)) {
		prop.a <- prop[name]
		prop.b <- 1 - prop.a
		recall.a <- stats[name, ]$Recall
		fprate <- 1 - stats[name, ]$Specificity
		b.in.a <- fprate * prop.b
		a.in.a <- recall.a * prop.a
		fpprop[name] <- b.in.a / (b.in.a + a.in.a)
		if (fpprop[name] == "NaN") fpprop[name] <- 1
	}
	proba$FP <- fpprop[as.character(predicted)]
	## Compare this proportion to group proportion
	## PhG: If proba$GpProp is not defined, I got an error here...
	## ... so, try to recalculate it now in this case
	if (is.null(proba$GpProp)) {
	  predTable <- table(predicted)
	  prop <- predTable / sum(predTable)
	  proba$GpProp <- prop[as.character(predicted)]
	}
	proba$GpFPDiff <- proba$GpProp - proba$FP
	proba
}
## Detect suspect particles using different algorithms and update corr$Suspect
suspect <- function (data, proba, error, subset, predicted, confusion,
algorithm = "rf", knn.k = 3, svm.kernel = "polynomial", svm.degree = 5, ...) {
	algorithm <- match.arg(algorithm,
		c("rf", "knn", "svm", "bayes", "c50", "glm"))
	## In the improbable case we have no error at all, consider everyone as suspect...
	if (all(as.character(error) != "Error"))
		return(factor(rep("Error", nrow(set)), levels = levels(error)))
	## Update proba with latest calculations, according to a corrected confusion matrix
	proba <- corrProba(proba, predicted, confusion, nrow(data))
	set <- cbind(data, proba, Error = error)
	## TODO: mlearning is there for complete interchangeability of algorithms
	## thus, we should not need this switch() construct!
	res <- switch(algorithm,
		rf = {
			rf.model <- mlearning(formula = Error ~ ., data = set,
				subset = subset, method = "mlRforest", ...)
			susp <- predict(rf.model, newdata = set, type = "class")
			susp[subset] <- predict(rf.model, type = "class", method = "oob")
		},
		knn = {
		#	set <- subset(set, select = -c(Error))
		#	warning("Prediction of training set by 'knn' method without cross validation")
		#	susp <- knn(train = set[subset, ], test = set,
		#		cl = corr$Error[subset], k = knn.k, ...)
		stop("Not implemented yet!")
		},
		svm = {
		#	svm.model <- mlearning(Error ~ ., data = set,
		#		subset = subset, kernel = svm.kernel, degree = svm.degree,
		#		method = "mlSvm", ...)
		#	susp <- predict(svm.model, newdata = set, type = "class")
		#	susp[subset] <- predict(svm.model, type = "class", method = "cv")
		stop("Not implemented yet!")
		},
		bayes = {
		#	nb.model <- mlearning(Error ~ ., data = set,
		#		subset = subset, method = "mlNaiveBayes", ...)
		#	susp <- predict(nb.model, newdata = set, type = "class")
		#	susp[subset] <- predict(nb.model, type = "class", method = "cv")
		stop("Not implemented yet!")
		},
		c50 = {
		#	C50.train <- set[subset, ]
		#	c50.model <- C5.0(formula = Error ~ ., data = C50.train, ...)
		#	susp <- predict(c50.model, newdata = set, type = "class")
		#	susp[subset] <- cv(y = C50.train$Error, formula = Error ~ .,
		#		data = C50.train, model =  C5.0, predict =
		#		function(object, newdata) predict(object, newdata,
		#		type = "class"), k = 10, predictions = TRUE)$predictions
		stop("Not implemented yet!")
		},
		glm =  {
		#	glm.model <- Logistic(Error ~ ., data = set, subset = subset, ...)
		#	warning("Prediction of training set by 'glm' method without cross-validation")
		#	susp <- predict(glm.model, newdata = set, type = "class")
		stop("Not implemented yet!")
		},
		stop("Unknown algorithm '", algorithm, "'")
	)
	susp
}
## Main function for error correction
## Replace data by ZIobj and put zidb in first position... or group both items
## and how to retrieve class then???
## Group options in a single object too
errorCorrection <- function (data, classifier, mode = "validation",
fraction = 0.05, sample.min = 100, sample.max = 200, grp.min = 2,
random.sample = 0.1, algorithm = "rf", diff.max = 0.2, prop.bio = NULL,
rare = 0.01, zidb = NULL, testdir = NULL, id = NULL,
result = ".last_valid", envir = parent.frame()) {
	#### Parameters explanations
	#### Data and classifier
	## data -- the dataset to study
	if (missing(data) || !inherits(data, "ZIDat"))
		stop("data must be a ZIdat object")
## Temporary hack to eliminate possible unwanted columns!
data$Id.1 <- NULL
data$X.Item.1 <- NULL
## Make sure items are sorted alphabetically according to their id
if (is.null(data$Id)) {
	data <- data[order(paste(data$Label, data$Item, sep = "_")), ]
} else data <- data[order(data$Id), ]
	## classifier -- the classifier to use to classify particles
	if (missing(classifier) || !inherits(classifier, "ZIClass"))
		stop("classifier must be a ZIClass object")
#######	calc.vars <- attr(classifier, "calc.vars")
	#### General parameters of the iterative correction
	## mode -- the mode (validation, stat or demo)
	mode <- match.arg(mode, c("validation", "demo", "stat"))
	if (mode != "validation" & is.null(data$Class))
		 	stop("data requires a 'Class' column in this mode")
	## fraction -- fraction of sample to validate
	fraction <- as.numeric(fraction)[1]
	if (fraction < 0.01 || fraction > 1)
		stop("fraction must be between 0.01 and 1")
	## sample.min -- minimum number of particles to validate ate each step
	sample.min <- as.integer(sample.min)[1]
	if (sample.min < 1)
		stop("sample.min must be a positive integer")
	## sample.max -- maximum number of particles to validate ate each step
	sample.max <- as.integer(sample.max)[1]
	if (sample.max < 1)
		stop("sample.max must be a positive integer")
	if (sample.max < sample.min)
		stop("sample.max must be higher or equal than sample.min")
	## grp.min -- minimum number of particles of each group to validate
	grp.min <- as.integer(grp.min)[1]
	if (grp.min < 1 || grp.min > sample.min)
		stop("grp.min must be a positive integer and cannot be larger than sample.min")
	## random.sample -- fraction of random sample in the validation set
	random.sample <- as.numeric(random.sample)[1]
	if (random.sample < 0 || random.sample > 1)
		stop("random.sample must be a number between 0 and 1")
	#### Parameters for the detection of suspects
	## algorithm -- algorithm used to detect suspect particles
	## diff.max -- maximum tolerated difference in probabilities for class identification
	diff.max <- as.numeric(diff.max)[1]
	if (diff.max < 0)
		stop("diff.max must be a positive number or zero")
	## proba.bio -- groups probabilities, using biological information
	if (length(prop.bio) && (!is.numeric(prop.bio) || is.null(names(prop.bio))))
		stop("prop.bio must be a named vector (groups) of numbers")
	## rare -- detection of rare items
    rare <- as.numeric(rare)[1]
    if (rare < 0 || rare > 0.2)
		stop("rare must be between 0 and 0.2")
	## zidb -- path to the zidbfile to manually validate
	## testdir -- path of the directory used for manual validation
	## Variables
	proba <- NULL			# identification probabilities (additional vars for suspect detection)
	corr <- NULL			# main informations about the correction
	validated <- NULL		# validated class of the particles
	validation.data <- NULL # Data send as validation of a given step
	predicted <- NULL		# predictions at the beginning
	step <- -1 				# current iteration
	sample.size <- NULL		# size of the next subsample to validate
	## TODO: eliminate this and get it from table(corr$Step)
	validated.fracs <- 0 	# fraction of items validated at each step
	## Manual validation variables
	ntrusted <- NULL			# number of trusted particles detected
	nsuspect <- NULL			# number of suspect particles detected
	ntrusted.tovalid <- NULL 	# number of trusted particles to validate
	nsuspect.tovalid <- NULL 	# number of suspect particles to validate
	testset <- NULL				# subsample to validate
	## Validation of particles TODO: get rid of these two flags!
	step.manual <- FALSE		# should the user validate particles?
	testset.validated <- FALSE	# is the testset validated?
	## Plankton sorter variables
	sorter.title <- NULL 		# title of the plankton sorter page
	sorter.id <- NULL			# identifier of the plankton sorter page
	## Results
	manual.history <- NULL		# history of the manual confusion matrix
	manual2.history <- NULL		# history of manual + 2nd ident confusion matrix
	corr.confusion <- NULL 		# corrected confusion matrix
	classRare <- NULL			# String with the list of classes considered as rare
	cell.confusion <- NULL   	# corrected confusion matrix for cells
    bioweight.confusion <- NULL # corrected confusion matrix for bioweight
	correction.history <- NULL 	# history of the correction confusion matrix
	correctionCell.history <- NULL   # history of the correction confusion matrix for cells
    correctionBio.history <- NULL   # history of the correction confusion matrix for bioweight
	error.estim.data <- NULL 	# data used to estimate the error
	error.estim <- NULL			# history of the error estimation
	error.estim.history <- NULL # evolution of the error estimation
	error.estim.rd <- NULL 		# error using validated, randomly selected items
	error.estim.rd.history <- NULL # evolution of the error estimation
	suspect.history <- list()	# history of suspect detection
	validated.history <- list()	# history of validated particles
	error.valid.history <- list() # history of error among validated items
	error.suspect.history <- list() # history of error among suspect items
	error.trusted.history <- list() # history of error among suspect items
	nsuspect.history <- NULL 	# history of the number of suspect items
	ntrusted.history <- NULL 	# history of the number of trusted items
	nsuspect.tovalid.history <- NULL # history of suspect validation
	ntrusted.tovalid.history <- NULL # history of trusted validation
	errInFract.history <- data.frame() # evolution of the error in each fraction
	## Initialize the data for error correction (data, proba, corr and others)
	initialize <- function () {
		## Check that this directory exists and is empty
		if (mode != "stat") {
			if (is.null(testdir))
				testdir <<- file.path(tempdir(),
					noExtension(zidb))
			if (file.exists(testdir)) {
				res <- dlgMessage(paste("Temporary validation directory already",
					"exists. Do we erase old data there?"), type = "okcancel")$res
				if (res == "cancel")
					stop("testdir (", testdir, ") must not exist yet!")
				unlink(testdir, recursive = TRUE)
			}
			dir.create(testdir, recursive = TRUE)
			if (!file.exists(testdir))
				stop("cannot create 'testdir'!")
			testdir <<- normalizePath(testdir)
			## Put required files there: create the planktonSorter directory
			plSort <- file.path(testdir, "planktonSorter")
			dir.create(plSort)
			plFiles <- dir(system.file("planktonSorter", package = "zooimage"),
				full.names = TRUE)
			res <- file.copy(plFiles, file.path(plSort, basename(plFiles)))
			if (any(!res))
				stop("Problem when copying one or more plankton sorter files")
		}
		## In the other modes, indicate that we prepare the validation environment
		cat("Preparing a validation environment...\n")
		## Be sure that data is correctly ordered
		data <<- data[order(data$Item), ]
		## Be sure 'Id' exists in data
		data$Id <- makeId(data)
		## Make sure items' membership is predicted by the classifier
		predicted <- data$Predicted
		## TODO: shouldn't we do this all the time???
		if (is.null(predicted)) # Predict class if it wasn't done yet
			predicted <- predict(classifier, data, class.only = TRUE,
				type = "class")
		predicted <<- predicted
#		if (mode != "validation") {
			## Store validated items and table of validated items
			validated <<- data$Class
			## TODO: why this, and Class put in validated?
			data$Class <- NULL
			data <<- data
#		}
		## Keep all data
###		data <<- attr(classifier, "calc.vars")(data)
###		## Do not keep uncomplete attributes
###		data <<- data[, sapply(data, FUN = function(x) !any(is.na(x)))]
		proba <<- classProba(data, predicted, classifier, diff.max = diff.max,
			prop.bio = prop.bio)
		nobs <- nrow(data)
		error <- factor(rep("Error", nobs), levels = c("Correct", "Error"))
		## Compute the second possible identification
		secondIdent <- function (groups) {
			proba.table <- attr(proba, "IdentParam")$ProbaTable
			proba.order <- apply(proba.table, 1, order, decreasing = TRUE)
			fst.ids <- proba.order[1, ]
			scd.ids <- proba.order[2, ]
			proba.max <- apply(proba.table, 1, sort, decreasing = TRUE)[1, ]
			max.ids <- proba.max == 1
			scd.ids[max.ids] <- fst.ids[max.ids]
			factor(groups[scd.ids], levels = groups)
		}
		predicted2 <- secondIdent(levels(predicted))
		predTable <- table(predicted)
		prop <- predTable / sum(predTable)
		classRare <<- names(which(prop < rare))
		## Creation of corr object
		corr <<- data.frame(Actual = predicted, Actual2 = predicted2,
			Predicted = predicted, Predicted2 = predicted2, Validated = FALSE,
			Error = error, Step = step, Suspect = rep(TRUE, nobs),
			Rare = predicted %in% classRare, RdValidated = rep(Inf, nobs),
			OtherGp = rep(FALSE, nobs))
		## Confusion matrix of original classifier
		train.validated <- attr(classifier, "response")
		train.predicted <- attr(classifier, "cvpredict")
		train.conf <- confusion(x = train.predicted, y = train.validated)
		## First error correction: we start with Solow et al. estimation
		## Compute correction of abundances using a method similar to Solow et al.,
		## but that is not stuck with singular matrices problems
		## TODO: this takes a long time => try Solow et al first before using this!
		correctionLab <- function (x, conf) {
			l <- length(x)
			if (l != ncol(conf))
				stop("'x' has not the same length than 'conf'")
			toMat <- function(x, byrow = TRUE)
				matrix(rep(x, l), ncol = l, byrow = byrow)
			predMat <- toMat(colSums(conf))
			xMat <- toMat(x)
			## Remove 0
			notZero <- function (x) {
				if (0 %in% x) x[x==0] <- 1
				x
			}
			confB <- conf / notZero(predMat) * xMat
			x2 <- rowSums(confB)
			classMat <- toMat(rowSums(conf), byrow = FALSE)
			while (!isTRUE(all.equal(x, x2, tolerance = 0.00001))) {
				x <- x2
				confA <- conf / notZero(classMat) * toMat(x2, byrow = FALSE)
				xMat2 <- toMat(colSums(confA))
				confB <- confA / notZero(xMat2) * xMat
				x2 <- rowSums(confB)
			}
			round(x2)
		}
		tablePredicted <- table(predicted)
		correction.history <<- correctionLab(x = tablePredicted,
			conf = train.conf)
		manual.history <<- tablePredicted
		manual2.history <<- manual.history
		## Increment step (should be 0 now, because initial value is -1)
		step <<- step + 1
		## Determine the number of vignettes to manually validate
		setSampleSize()
	}
	## Compute the size of the next subsample: update sample.size
	setSampleSize <- function () {
		## Number of items we want to take
		sample.size <<- ceiling(nrow(data) * fraction)
		## No less than sample.min
		sample.size <<- max(sample.size, sample.min)
		## According to complexity of the training set, take possibly more
		sample.size <<- max(sample.size, grp.min * length(levels(predicted)))
		## ... but no more than sample.max
		sample.size <<- min(sample.size, sample.max)
		## Or how much remains?
		sample.size <<- min(sample.size, nrow(corr[!corr$Validated, ]))
	}
	## Determine the subsample to validate
	## Update Step and RdValidated (used for error estimation)
	## Automatically place vignettes in directories for manual validation
	prepareTest <- function () {
		nobs <- nrow(data)
		if (step < 1) {
			## At first time, take a random subsample
			## Same as considering everything as suspect
#PhG			nsuspect <<- nobs
#PhG			ntrusted <<- 0
			sample.ids <- sample(1:nobs, size = sample.size)
			corr$Step[sample.ids] <<- step
			corr$RdValidated[sample.ids] <<- step
			nsuspect.tovalid.history <<- c(nsuspect.tovalid.history, sample.size)
			ntrusted.tovalid.history <<- c(ntrusted.tovalid.history, 0)
			nsuspect.history <<- c(nsuspect.history, nobs)
			ntrusted.history <<- c(ntrusted.history, 0)
		} else { # Step > 0
			## Mix trusted and suspect particles
			#validated <- corr[corr$Validated, ]
			notvalidated <- corr[!corr$Validated, ]
			nsuspect <<- sum(notvalidated$Suspect)	#nrow(notvalidated[notvalidated$Suspect, ])
			ntrusted <<- nrow(notvalidated) - nsuspect	#nrow(notvalidated[!notvalidated$Suspect, ])
			## Determine the number of random items used in error estimation
			numRandom  <- max(sample.size - nsuspect,
				round(random.sample * sample.size))
			ids <- as.character(1:nobs)
			## All items not included in RdValidated
			tosample.ids <- ids[is.infinite(corr$RdValidated)]
			## Items to validate
			randomsample.ids <- as.numeric(sample(tosample.ids,
				size = min(numRandom, length(tosample.ids))))
			## Used to estimate error at step i
			corr$RdValidated[randomsample.ids] <<- step
			newstep <- corr$Step[randomsample.ids]
			## Except those already validated and used for error estimation
			newstep[newstep == -1] <- step
			corr$Step[randomsample.ids] <<- newstep
			notvalid.ids <- ids[!corr$Validated & corr$RdValidated == step]
			## Number of items to valid in order to achieve sample.size
			numSample <- sample.size - length(notvalid.ids)
			if (numSample > 0) {
				## Randomly select suspect items not validated
				suspnotval.ids <- ids[!corr$Validated & corr$Suspect &
					is.infinite(corr$RdValidated) & corr$Step == -1]
				if (length(suspnotval.ids)) {
					suspsample.ids <- as.numeric(sample(suspnotval.ids,
						size = min(numSample, length(suspnotval.ids))))
					corr$Step[suspsample.ids] <<- step
					numSample <- numSample - length(suspsample.ids)
				}
				if (numSample > 0) {
					## Randomly select trusted items not validated
					trustnotval.ids <- ids[!corr$Validated & !corr$Suspect  &
						is.infinite(corr$RdValidated) & corr$Step == -1]
					if (length(trustnotval.ids)) {
						trustsample.ids <- as.numeric(sample(trustnotval.ids,
							size = min(numSample, length(trustnotval.ids))))
						corr$Step[trustsample.ids] <<- step
					}
				}
			}
      ############### stratified random sampling ###############
#       if (numSample > 0) {
#     		## Select the same number of suspect items not validated in each class
#   			suspnotval.ids <- ids[!corr$Validated & corr$Suspect &
#   			  is.infinite(corr$RdValidated) & corr$Step == -1]
#   		  if (length(suspnotval.ids)) {
#   		    splitGp <- split(suspnotval.ids, list(corr[suspnotval.ids,]$Predicted))
#   		    strat.samples <- lapply(splitGp, function(x) x[sample(1:NROW(x),
#   		                      min(NROW(x), round(numSample/length(unique(corr$Predicted[as.numeric(suspnotval.ids)])))),
#   		                      replace = FALSE)])
#   		    suspsample.ids <- as.numeric(do.call(c, strat.samples))
#   			  corr$Step[suspsample.ids] <<- step
#   			  numSample <- numSample - length(suspsample.ids)
#   			}
#
#         if (numSample > 0) {
#           ## If not completed, randomly select suspects items not validated
#           suspnotval.ids <- ids[!corr$Validated & corr$Suspect &
#                                   is.infinite(corr$RdValidated) & corr$Step == -1]
#           if (length(suspnotval.ids)) {
#             suspsample.ids <- as.numeric(sample(suspnotval.ids,
#                        size = min(numSample, length(suspnotval.ids))))
#             corr$Step[suspsample.ids] <<- step
#             numSample <- numSample - length(suspsample.ids)
#           }
#         }
#
#   			if (numSample > 0) {
#   			  ## If not completed, Select the same number of trusted items not validated in each class
#   			  trustnotval.ids <- ids[!corr$Validated & !corr$Suspect  &
#   			    is.infinite(corr$RdValidated) & corr$Step == -1]
#   			  if (length(trustnotval.ids)) {
#   			    splitGp <- split(trustnotval.ids, list(corr[trustnotval.ids,]$Predicted))
#   			    strat.samples <- lapply(splitGp, function(x) x[sample(1:NROW(x),
#   			                          min(NROW(x), round(numSample/length(unique(corr$Predicted[as.numeric(trustnotval.ids)])))),
#   			                          replace = FALSE)])
#   			    trustsample.ids <- as.numeric(do.call(c, strat.samples))
#   			    corr$Step[trustsample.ids] <<- step
#   			    numSample <- numSample - length(trustsample.ids)
#   			  }
#   			}
#
#   			if (numSample > 0) {
#           ## If not completed, randomly select trusted items not validated
#   			  trustnotval.ids <- ids[!corr$Validated & !corr$Suspect &
#   			                          is.infinite(corr$RdValidated) & corr$Step == -1]
#   			  if (length(trustnotval.ids)) {
#   			    trustsample.ids <- as.numeric(sample(trustnotval.ids,
#   			                                        size = min(numSample, length(trustnotval.ids))))
#   			    corr$Step[trustsample.ids] <<- step
#   			    numSample <- numSample - length(trustsample.ids)
#   			  }
#   			}
      ############### ############### ###############
			nsuspect.tovalid <- length(ids[corr$Step == step & corr$Suspect])
			ntrusted.tovalid <- length(ids[corr$Step == step & !corr$Suspect])
			nsuspect.history <<- c(nsuspect.history, nsuspect)
			ntrusted.history <<- c(ntrusted.history, ntrusted)
			nsuspect.tovalid.history <<- c(nsuspect.tovalid.history, nsuspect.tovalid)
			ntrusted.tovalid.history <<- c(ntrusted.tovalid.history, ntrusted.tovalid)
		}
		if (mode != "stat") {
			## Make sure the R Httpd server is started
			tools <- getNamespace("tools")
			if (R.Version()$`svn rev` >= 67550) {
				port <- tools::startDynamicHelp(NA)
			} else {
				port <- tools$httpdPort
			}
			if (port == 0) port <- startDynamicHelp(TRUE)
			if (port == 0) stop("Impossible to start the R httpd server")
			subdir <- paste0("step", step + 1) # Because it start at 0,
			## but this is really the beginning of next step now
			stepdir <- file.path(testdir, subdir)
			dir.create(stepdir)
			Zidb <- zidbLink(zidb)
			## Write data in test directory
			keepRows <- corr$Step == step
			Vigns <- data[keepRows, "Id"]
			imgext <- Zidb[[".ImageType"]]
			## Extract all these images to stepdir
			vigpath <- file.path(stepdir, paste(Vigns, imgext, sep = "."))
			names(vigpath) <- Vigns
			if (length(Vigns))
				for (j in 1:length(Vigns))
					writeBin(Zidb[[Vigns[j]]], vigpath[j])
			## Create all directories of groups
			path <- attr(classifier, "path")
			names(path) <- basename(path)
			## Create planktonSorter.html file
			vigfiles <- basename(vigpath)
			pred <- as.character(corr[keepRows, "Predicted"])
			names(vigfiles) <- pred
			Sample <- as.character(sub("\\+.+_[0-9]+", "", Vigns[1]))
			if (step > 0) {
				## Recreate previous page with sorter items
				oldPage <- file.path(testdir, paste0("step", step),
					"planktonSorter.html")
				oldItems <- corr$Step == (step - 1)
				oldVigns <- paste(data[oldItems, "Id"], imgext, sep = ".")
				oldNames <- as.character(corr[oldItems, "Actual"])
				oldNames[is.na(oldNames)] <- "[other]"
				names(oldVigns) <- oldNames
				res <- planktonSorterPage(groups = path, vigns = oldVigns,
					title = sorter.title, id = sorter.id,
					step = step, file = oldPage)
			}
			## Create new page
			plSorterPage <- file.path(stepdir, "planktonSorter.html")
			sorter.title <<- paste0(Sample, "/Step", step + 1)
			sorter.id <<- paste(id, step + 1, sep = "/")
			res <- planktonSorterPage(groups = path, vigns = vigfiles,
				title = sorter.title, id = sorter.id,
				step = step + 1, file = plSorterPage)
			cat(paste("You have to validate vignettes in", subdir,
				"directory", "before next iteration...\n"))
			browseURL(paste0("file://", plSorterPage)) #Does not work on linux, "?v=", round(runif(1, max = 100000))))
		}
		testset.validated <<- FALSE
	}
	## Retrieve test set (validated or not)
	getTest <- function ()
		testset <<- corr[corr$Step == step, ]
	## Automatic validation of the particles (only in demo and stat mode)
	# Read manual validation on hardrive
	# Update corr$Actual column
	validate <- function () {
		if (mode == "stat") {
			corr[corr$Step == step, ]$Actual <<- validated[corr$Step == step]
		} else {
## TODO: change this!
#			## Read Test set with getTest() from zooimage package
#			SubDir <- file.path(testdir, paste0("step", step))
#			TestSetStep <- getTest(SubDir)[, c("Id", "Class")]
			## Id if items validated at step 'i' and included in data
## ????
#			dfId <- data.frame(Id = as.factor(rownames(data))[corr$Step == step])
#			## Reorder TestSetStep to replace values in corr object
#			test2 <- merge(dfId, TestSetStep, by = "Id", sort = FALSE)
#			## Be sure that names correspond
#			if (!isTRUE(all(test2$Id == dfId$Id)))
#				stop("'Id' in manual validation doesn't correspond with 'Id' ",
#					"from dataset used in error correction.")
			if (mode == "validation") {
				if (is.null(validation.data)) {
					return()
				} else {
					stepVal <- as.numeric(validation.data[1, 2]) - 1
					if (is.na(stepVal) || !length(stepVal) || stepVal < 0)
						stop("Wrong validation data (step = ", stepVal, ")")
					grps <- validation.data[-1, 1]
					## Transform [other] into NA
					grps[grps == "[other]"] <- NA
					testStep <- data.frame(Id = validation.data[-1, 2],
						Class = factor(grps, levels = levels(corr$Actual)))
					validation.data <<- NULL
					dfId <- data.frame(Id = makeId(data)[corr$Step == stepVal])
					test2 <- merge(dfId, testStep, by = "Id", sort = FALSE)
					corr[corr$Step == stepVal, ]$Actual <<- test2$Class
					step <<- stepVal
					## TODO: save these results also in the zidb file!
				}
			} else {
				corr[corr$Step == step, ]$Actual <<- validated[corr$Step == step]
			}
		}
		## Update column for vignettes impossible to identify or that belong to
		## other group than those included in the classifier
		corr$OtherGp <<- is.na(corr$Actual)
		testset.validated <<- TRUE
	}
	## Estimate global error in the sample based on RdValidated colum
	estimateError <- function () {
		## At first step, use all validated particles to estimate the error
		if (step == 0) {
			error.estim.data <<- corr[corr$Step == step, ]
			## Manage NAs
			error.estim <<- sum(error.estim.data$Actual[!error.estim.data$OtherGp] !=
				error.estim.data$Predicted[!error.estim.data$OtherGp]) /
				(nrow(error.estim.data) - sum(error.estim.data$OtherGp))
			error.estim.rd <<- error.estim
			error.estim.history <<- error.estim
			error.estim.rd.history <<- error.estim.history
		} else { # data used previously + a portion of the validated test set
			## All error at step i
			Error <- corr$Actual != corr$Predicted
			## Calculation of the global error
			error.estim <<- .globalError(suspect = corr$Suspect[!corr$OtherGp],
				error = Error[!corr$OtherGp],
				subset = corr$Validated[!corr$OtherGp], na.rm = TRUE)
			error.estim.history <<- c(error.estim.history, error.estim)
			error.estim.rd.history <<- c(error.estim.rd.history, error.estim.rd)
		}
		## Error in the different fractions
#PhG		if (mode != "validation") {
		if (mode == "stat") {
			error <- validated != corr$Predicted
			errInFract <- .errorInFractions(suspect = corr$Suspect,
				error = error, validated = corr$Validated,
				iteration = (corr$Step == step), na.rm = TRUE)
			errInFract.history <<- rbind(errInFract.history, errInFract)
		}
	}
	## Compute the weighted sum of validated suspect and trusted particles
	## Confusion matrix to estimate the abundance
	estimateAbundance <- function () {
		## At the first step all particles are considered suspect
		if (step == 0) {
			error.conf <- confusion(error.estim.data$Predicted,
				error.estim.data$Actual, useNA = "no") # remove NAs
			corr.confusion <<- error.conf / sum(error.conf) *
				(nrow(data) - sum(corr$OtherGp)) # remove NAs
			## For cells
			if ("Nb_cells" %in% names(data)) {
				error.conf.cell <- xtabs(data$Nb_cells[corr$Step==step] ~
                    error.estim.data$Actual + error.estim.data$Predicted,
					exclude = c(NA, NaN))
				cell.confusion <<- error.conf.cell /
					sum(error.conf.cell) * (sum(data$Nb_cells) -
					sum(data$Nb_cells[corr$OtherGp])) # remove NAs
			}
			## For biovolumes
			if ("BioWeight" %in% names(data)) {
				error.conf.bioweight <- xtabs(data$BioWeight[corr$Step==step] ~
			        error.estim.data$Actual + error.estim.data$Predicted,
					exclude = c(NA, NaN))
				bioweight.confusion <<- error.conf.bioweight /
					sum(error.conf.bioweight) * (sum(data$BioWeight) -
					sum(data$BioWeight[corr$OtherGp])) # remove NAs
			}
			## Calculate error in valid data and in both suspect and trusted parts
			error.valid.history[[step + 1]] <<-
				error.estim.data$Actual != error.estim.data$Predicted
			error.suspect.history[[step + 1]] <<-
				error.estim.data$Actual != error.estim.data$Predicted
			error.trusted.history[[step + 1]] <<- rep(FALSE, sum(error.conf))
		} else {
			## Confusion matrix suspect vs trusted
			nSuspTot <- sum(corr$Suspect & !corr$OtherGp)
			valSuspIdx <- corr$Validated & !corr$OtherGp & corr$Suspect
			valTrustIdx <- corr$Validated & !corr$OtherGp & !corr$Suspect
			nSuspVal <- sum(valSuspIdx)
			nTrustVal <- sum(valTrustIdx)
			confSuspVal <- confusion(corr$Predicted[valSuspIdx],
				corr$Actual[valSuspIdx])
			confTrustVal <- confusion(corr$Predicted[valTrustIdx],
				corr$Actual[valTrustIdx])
			confSusp.w <- confSuspVal / nSuspVal * nSuspTot
			notValTrustIdx <- !corr$Validated & !corr$Suspect
			confNotValTrust <- confusion(corr$Predicted[notValTrustIdx],
				corr$Actual[notValTrustIdx])
			corr.confusion <<- confSusp.w + confTrustVal + confNotValTrust
			## For cells
			if ("Nb_cells" %in% names(data)) {
				nCellSuspTot <- sum(data$Nb_cells[corr$Suspect &
					!corr$OtherGp])
				nCellSuspVal <- sum(data$Nb_cells[valSuspIdx])
				nCellTrustVal <- sum(data$Nb_cells[valTrustIdx])
				confSuspValCell <- xtabs(data$Nb_cells[valSuspIdx] ~
			        corr$Actual[valSuspIdx] + corr$Predicted[valSuspIdx],
					exclude = c(NA, NaN))
				confTrustValCell <- xtabs(data$Nb_cells[valTrustIdx] ~
			        corr$Actual[valTrustIdx] + corr$Predicted[valTrustIdx],
					exclude = c(NA, NaN))
				confSuspCell.w <- confSuspValCell / nCellSuspVal * nCellSuspTot
				confNotValTrustCell <- xtabs(data$Nb_cells[notValTrustIdx] ~
			        corr$Actual[notValTrustIdx] + corr$Predicted[notValTrustIdx],
					exclude = c(NA, NaN))
				cell.confusion <<-
					confSuspCell.w + confTrustValCell + confNotValTrustCell
			}
			## For biovolumes
			if ("BioWeight" %in% names(data)) {
				nBioSuspTot <- sum(data$BioWeight[corr$Suspect & !corr$OtherGp])
				nBioSuspVal <- sum(data$BioWeight[valSuspIdx])
				nBioTrustVal <- sum(data$BioWeight[valTrustIdx])
				confSuspValBio <- xtabs(data$BioWeight[valSuspIdx] ~
			        corr$Actual[valSuspIdx] + corr$Predicted[valSuspIdx],
					exclude = c(NA, NaN))
				confTrustValBio <- xtabs(data$BioWeight[valTrustIdx] ~
			        corr$Actual[valTrustIdx] + corr$Predicted[valTrustIdx],
					exclude = c(NA, NaN))
				confSuspBio.w <- confSuspValBio / nBioSuspVal * nBioSuspTot
				confNotValTrustBio <- xtabs(data$BioWeight[notValTrustIdx] ~
					corr$Actual[notValTrustIdx] + corr$Predicted[notValTrustIdx],
					exclude = c(NA, NaN))
				bioweight.confusion <<-
					confSuspBio.w + confTrustValBio + confNotValTrustBio
			}
			error.valid.history[[step + 1]] <<- testset$Actual != testset$Predicted
			if  (nsuspect > 0) {
				error.suspect.history[[step + 1]] <<-
					testset[testset$Suspect, ]$Actual !=
					testset[testset$Suspect, ]$Predicted
			}
			if (ntrusted > 0) {
				error.trusted.history[[step + 1]] <<-
					testset[!testset$Suspect, ]$Actual !=
					testset[!testset$Suspect,]$Predicted
			}
		}
	}
	## Compute the corrected coefficients for particles, cells, biovolume
#   estimateCoeffs <- function () {
#     ## For particles (colonies)
#     col.confusion <- table(corr$Predicted[corr$Validated], corr$Actual[corr$Validated], useNA = "no") # remove NAs
#     corr.coeffs <- ifelse(!colSums(col.confusion), rowSums(col.confusion),
#                           rowSums(col.confusion)/colSums(col.confusion))
#     ## For cells
#     if ("Nb_cells" %in% names(data)) {
#       cell.confusion <- xtabs(data$Nb_cells[corr$Validated] ~
#                                 corr$Predicted[corr$Validated] +
#                                 corr$Actual[corr$Validated], exclude = c(NA, NaN))
#       corr.coeffs <- cbind(corr.coeffs, ifelse(!colSums(cell.confusion), rowSums(cell.confusion),
#                                              rowSums(cell.confusion)/colSums(cell.confusion)))
#     }
#
#     ## For biovolumes
#     if ("BioWeight" %in% names(data)) {
#       bioweight.confusion <- xtabs(data$BioWeight[corr$Validated] ~
#                                      corr$Predicted[corr$Validated] +
#                                      corr$Actual[corr$Validated], exclude = c(NA, NaN))
#       corr.coeffs <- cbind(corr.coeffs, ifelse(!colSums(bioweight.confusion), rowSums(bioweight.confusion),
#                                              rowSums(bioweight.confusion)/colSums(bioweight.confusion)))
#     }
#     corr.coeffs
#   }
	## Estimate error and abundance
	## Update Validated, training set and histories
	correct <- function () {
		getTest()
		curr.validated.ids <- corr$Step == step
		corr$Validated[curr.validated.ids] <<- TRUE
		corr$Error[corr$Validated & (corr$Actual == corr$Predicted)] <<- "Correct"
		corr$Actual2 <<- corr$Actual
		if (step > 0) {
			ids <- !corr$Validated & corr$Suspect
			corr$Actual2[ids] <<- corr$Predicted2[ids]
		}
		estimateError()
		estimateAbundance()
		#estimateCoeffs()
		validated.fracs <<- c(validated.fracs, sample.size)
		correction.history <<- cbind(correction.history,
			rowSums(corr.confusion))
		if ("Nb_cells" %in% names(data)) {
			correctionCell.history <<- cbind(correctionCell.history,
		        rowSums(cell.confusion))
		}
		if ("BioWeight" %in% names(data)) {
			correctionBio.history <<- cbind(correctionBio.history,
		        rowSums(bioweight.confusion))
		}
		manual.history <<- cbind(manual.history, table(corr$Actual))
		manual2.history <<- cbind(manual2.history, table(corr$Actual2))
		setSampleSize() # Set the next subsample size
	}
	process <- function () {
		if (!step.manual) {
			if (step > 0) {
				susp <- suspect(attr(classifier, "calc.vars")(data), proba, error = corr$Error,
					subset = corr$Validated, predicted = predicted,
					confusion = corr.confusion, algorithm = algorithm)
				## Update Suspect column
				corr$Suspect <<- susp == "Error"
				## Keep a track of the detection to evaluate performance
				suspect.history[[step]] <<- susp
				validated.history[[step]] <<- corr$Validated
			}
			prepareTest()
			step.manual <<- TRUE
		} else if (testset.validated) {
			step.manual <<- FALSE
if (mode == "stat") {
	getTest()
	correct()
}
			cat(paste("Step", step + 1, "finished \n"))
			step <<- step + 1
		} else warning("You have to complete the validation first \n")
		flush.console()
	}
	## TODO: process called twice, and it seems that the step.manual flag is
	##       indeed triggering something else each time... This is *crazy*
	##       Do change this!!!
	processValidation <- function () {
		if (sample.size > 0) {
			#if (!isTRUE(step.manual)) {
			#	process()
			#} else {
			#	#validate()
			#	process()
			#}
			process()
		} else cat("Correction done!\n")
	}
	processDemo <- function () {
		if (sample.size > 0) {
#PhG			process()
#PhG			validate()
			process()
		} else cat("Correction done!\n")
	}
	processStats <- function () {
		repeat {
			if (sample.size > 0) {
				process()
				validate()
				process()
			} else {
				cat("Correction done!\n")
				break
			}
		}
	}
	## Accessors
	iterate <- function () {
		if (step < 0) initialize()
		switch(mode,
			validation = processValidation(),
			demo = processDemo(),
			stat = processStats(),
			stop("'mode' must be 'validation', 'demo', or 'stat'"))
	}
	setValidation <- function (validation.data = NULL) {
		validation.data <<- validation.data
		validate()
		correct()
		## Recreate the page with current sorting by default
		Zidb <- zidbLink(zidb)
		imgext <- Zidb[[".ImageType"]]
		## Create all directories of groups
		path <- attr(classifier, "path")
		names(path) <- basename(path)
		## Recreate previous page with sorter items
		oldPage <- file.path(testdir, paste0("step", step + 1),
			"planktonSorter.html")
		oldItems <- corr$Step == step
		oldVigns <- paste(data[oldItems, "Id"], imgext, sep = ".")
		oldNames <- as.character(corr[oldItems, "Actual"])
		oldNames[is.na(oldNames)] <- "[other]"
		names(oldVigns) <- oldNames
		res <- planktonSorterPage(groups = path, vigns = oldVigns,
			title = sorter.title, id = sorter.id,
			step = step + 1, file = oldPage)
		## Create the diagnostic graphs
		reportdir <- file.path(testdir, paste0("step", step + 1))
		reportplot <- file.path(reportdir, "ReportError.png")
		unlink(reportplot)
		png(reportplot, width = 800, height = 500)
		plotResult("b")
		dev.off()
		## Create a page displaying the current state of correction process
		reportfile <- file.path(reportdir, "planktonSorterResults.html")
		res <- planktonSorterReport(title = sorter.title, id = sorter.id,
			step = step + 1, file = reportfile)
		## This is a trick to avoid using cached version in memory!
		browseURL(paste0("file://", reportfile)) # Does not work on linux, "?v=", round(runif(1, max = 100000))))
		process()
		return(reportfile)
	}
	finish <- function () {
		cat("Validation done...\n")
		abd <-  getAbundance()
		print(abd)
		## Create an object with these results...
		test <- data.frame(Id = makeId(data), data, Class = corr$Actual, Validated = corr$Validated, Suspect = corr$Suspect)
		#test <- data.frame(Id = makeId(data), data, Class = corr$Actual)
		attr(test, "path") <- attr(classifier, "path")
		class(test) <- unique(c("ZI3Test", "ZITest", class(data)))
		assign(result, test, envir = envir)
		cat("Object `", result, "` created with validation results!\n", sep = "")
		## Erase the temporary directory on disk...
		unlink(testdir, recursive = TRUE)
		## Return abundances in this file
		abd
	}
	## Return the estimated abundance
	getAbundance <- function ()
		rowSums(corr.confusion)
	## Return the estimated error
	getErrorEstimation <- function ()
		error.estim
	## Evaluate the quality of the classifier for the suspects
	evaluateDetection <- function () {
		if (!length(suspect.history)) return(NULL)
		error <- factor(rep("Error", nrow(data)), levels = c("Correct", "Error"))
		error[validated == corr$Predicted] <- "Correct"
		conf <- list()
		for (i in 1:length(suspect.history)) {
			validated <- validated.history[[i]]
			conf[[paste0("Step", i)]] <-
				summary(confusion(x = error[!validated],
				y = as.factor(suspect.history[[i]][!validated])))
		}
		list(error = error, confusion = conf)
	}
	## Compare graphically the result with manual validation
	## and manual validation + second identification
	plotResult <- function (type = c("dissimilary", "partition", "barplot")) {
		#if (mode == "validation")
		#	stop("This function is not available in validation mode.")
		type <- match.arg(type[1], c("dissimilarity", "partition", "barplot"))
		if (type == "dissimilarity") {
			if (is.null(validated)) {
				## In case we are in validation mode, we don't have this!...
				## We start from original prediction
				abundances <- as.vector(table(predicted[!corr$OtherGp]))
			} else {
				abundances <- as.vector(table(validated[!corr$OtherGp]))
			}
			error1 <- dissimilarity(abundances, manual.history, na.rm = TRUE) * 100
			error3 <- dissimilarity(abundances, correction.history, na.rm = TRUE) * 100
			par(mar = c(5, 4, 4, 4) + 0.1)
			plot(cumsum(validated.fracs) / nrow(corr) * 100, error1,
				type = "l", xlab = "Validated fraction (%)",
				ylab = "Dissimilarity (%)", col = "green", xlim = c(0, 100),
				ylim = c(0, max(error1, error3) * 1.02),
				main = "Dissimilarity at each iteration")
			## Baseline for manual validation
			lines(c(0, 100), c(error1[1], 0), col = 1, lty = 2)
			## Line for correction
			lines(cumsum(validated.fracs) / nrow(corr) * 100, error3, col = "red")
			grid()
			legend("topright", legend = c("Random validation",
				"Suspects validation", "Error correction"),
				col = c("black", "green","red"), lty = c(2, 1, 1))
		} else if (type == "partition") {
			fracs <- cumsum(validated.fracs[-1]) / nrow(corr)
			errByFrac <- sapply(error.valid.history, sum, na.rm = TRUE) /
				validated.fracs[-1]
			suspByFrac <- nsuspect.tovalid.history / validated.fracs[-1]
			suspByFrac[1] <- 0
			par(mar = c(5, 4, 4, 4) + 0.1)
			plot(fracs * 100, errByFrac * 100, type = "l", xlab = "Validated fraction (%)",
				ylab = "Suspect and error (%)", xlim = c(0, 100), ylim = c(0, 100), col = "red",
				main = "Suspects and error at each iteration")
			lines(fracs * 100, suspByFrac * 100, col = "black")
			grid()
			legend("topright", legend = c("Suspect", "Error"),
				col = c("black", "red"), cex = 0.8, lwd = 2)
		} else { # Should be type == "barplot"
			thresholdDiffDiss <- 5  # Differential dissimilarity <= 5%
			nbStep <- ceiling(nrow(data) / validated.fracs[-1][1])
			errByFrac <- sapply(error.valid.history, sum, na.rm = TRUE) /
				validated.fracs[-1]
			suspByFrac <- nsuspect.tovalid.history / validated.fracs[-1]
			#suspByFrac[1] <- 0
			## case 1 item => projection, case more => another projection...
			dat <- rbind(suspByFrac * 100, errByFrac * 100)
			diffDiss <- sapply(2:ncol(correction.history), function (x)
				dissimilarity(correction.history[, x - 1], correction.history[, x],
				na.rm = TRUE) * 100
			)
			xcoord <-
				seq(0.7, ceiling(nrow(data) / validated.fracs[-1][1]) * 1.2, by = 1.2)
			if (step < 1) {
				suspRemain <- NA
				stepSD <- round((errByFrac*nsuspect.history -
					errByFrac*nsuspect.tovalid.history) /
                    nsuspect.tovalid.history) + (step+1)
				idxStepSD <- stepSD
				coordStepSD <- mean(c(xcoord[idxStepSD], xcoord[idxStepSD + 1]))
			} else {
				suspRemain <- c(NA, nsuspect.history[2:(step+1)] -
					nsuspect.tovalid.history[2:(step+1)])
				stepSD <- round(suspRemain / nsuspect.tovalid.history) + 1:(step+1)
				if (length(which(suspRemain == 0)) > 0) {
					idxStepSD <- which(suspRemain == 0)[1]
				} else {
					idxStepSD <- tail(stepSD,1)
				}
				coordStepSD <- mean(c(xcoord[idxStepSD], xcoord[idxStepSD + 1]))
			}
			par(mfrow = c(2, 1), mar = c(4, 4, 1, 4) + 0.1)
			bp1 <- barplot(suspRemain, #xlab = "Validation step",
			    ylab = "Nb remaining suspects", xlim = c(0.2,
				xcoord[ceiling(idxStepSD + (length(xcoord) - idxStepSD) / 3)]),
			    ylim = c(0, max(suspRemain, diffDiss, na.rm = TRUE)), yaxs = "r",
                col = "grey10", cex.axis = .7, cex.main = 1, ann = FALSE,
				yaxt = "n", #main = "Remaining suspects and differential dissimilarity")
			)
			title(expression(bold("Remaining suspects") *
				phantom("\tand\tdifferential dissimilarity")),
				col.main = "grey10", cex.main = 1)
			title(expression(phantom("Remaining suspects\t") * "and" *
				phantom("\tdifferential dissimilarity")),
				col.main = "black", cex.main = 1)
			title(expression(phantom("Remaining suspects\tand\t") *
				bold("differential dissimilarity")),
				col.main = "blue", cex.main = 1)
# 			legend("top", legend = c("Remaining suspects","Diff dissimilarity"),
# 			     fill = c("grey20","blue"), cex = .6, bty = "n", adj = c(0,0))
			axis(side = 1, at = seq(bp1[1], by = 1.2, length.out = nbStep),
				labels = 1:nbStep, cex.axis = .7)
			if (step > 0) axis(side = 2, cex.axis = .7)
			par(new = TRUE)
			plot(bp1, diffDiss, type = "o", col = "blue", ylim = c(0, 100),
				xlim = c(0.2, xcoord[ceiling(idxStepSD + (length(xcoord) -
				## TODO: why '+' at the end of next line???
				idxStepSD) / 3)]), lwd = 3, axes = FALSE, ann = FALSE) +
			## TODO: why '+' at the end of next line???
			axis(side = 4, col = "blue", col.axis = "blue", cex.axis = .7) +
			mtext("Differential dissimilarity (%)", side = 4, line = 3,
				col = "blue")
			abline(v = coordStepSD, lwd = 2, lty = 2, col = "dimgrey")
			text(x = coordStepSD + .5, y = 90, "SD", srt = -90, pos = 4,
				cex = 0.6, col = "dimgrey")
			if (length(which(diffDiss < thresholdDiffDiss)) > 0) {
				coordStepEC <- mean(c(xcoord[which(diffDiss < thresholdDiffDiss)[1]],
					xcoord[which(diffDiss < thresholdDiffDiss)[1]+1]))
				abline(v = coordStepEC, lwd = 2, lty = 2, col = "darkgoldenrod")
				text(x = coordStepEC + .5, y = 90, "EC", srt = -90, pos = 4,
					cex = 0.6, col = "darkgoldenrod")
			}
			grid()
			box()
			bp2 <- barplot(dat, xlab = "Validated step", beside = TRUE,
				ylab = "Suspect and corrected error (%)",
				xlim = c(0.2, xcoord[ceiling(idxStepSD + (length(xcoord) -
					idxStepSD) / 3)]),
				ylim = c(0, 100), col = c("#dddddd", "#dd0000"),
				cex.axis = .7, cex.main = 1, width = .5, space = c(0, .4),
				#main = "Suspects and error corrected at each iteration")
			)
			title(expression(bold("Nbr of suspects") *
				phantom("\tand\tcorrected error")),
				col.main = "gray40", cex.main = 1)
			title(expression(phantom("Nbr of suspects\t") * "and" *
				phantom("\tcorrected error")),
				col.main = "black", cex.main = 1)
			title(expression(phantom("Nbr of suspects\tand\t") *
				bold("corrected error")),
				col.main = "#dd0000", cex.main = 1)
# 			legend("topright", legend = c("Nbr of suspect", "Corrected error"),
# 			    fill = c("#dddddd", "#dd0000"), cex = .6, bty = "n", adj = c(0, 0))
			axis(side = 1, at = seq(mean(bp2[, 1]), by = 1.2, length.out = nbStep),
				labels = 1:nbStep, cex.axis = .7)
			axis(side = 4, cex.axis = .7)
			grid()
			box()
		}
	}
	getEnv <- function ()
		environment(getEnv)
	invisible(list(iterate = iterate, validate = setValidation, done = finish, abundance = getAbundance,
		error = getErrorEstimation, evaluate = evaluateDetection,
		plot = plotResult, environment = getEnv))
}
## Validated vs Suspect???
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/correction.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Transforms a file extension to a pattern for ignore.case matching of the
## extension: extension (with or without the dot at the beginning)
## returns a regular expression pattern that can be used
## to match files with this extension
extensionPattern <- function (extension = "r",
add.dot = !grepl("[.]", extension))
{
	extensionLetters <- substring(extension, 1:nchar(extension),
		1:nchar(extension))
	parts <- ifelse(extensionLetters %in% c(letters, LETTERS),
		paste("[", extensionLetters, casefold(extensionLetters, upper = TRUE),
		"]", sep = ""), extensionLetters)
	pattern <- paste(parts, collapse = "")
	if (isTRUE(as.logical(add.dot)))
		pattern <- paste(".", pattern, sep = "")
	pattern <- gsub( "[.]", "[.]", pattern)
	paste(pattern, "$", sep = "")
}
## Checks if the file has the given extension (used at different places...)
hasExtension <- function (file, extension = "r",
pattern = extensionPattern(extension))
	grepl(pattern, file)
## Get the name of a file, without its extension
noExtension <- function (file)
	sub("\\.[^.]+$", "", basename(file))
## List files with given extension
listFilesExt <- function (dir, extension = "r",
pattern = extensionPattern(extension), ... )
{
	if (!checkDirExists(dir)) return(character(0))
	list.files(dir, pattern = pattern , ...)
}
zimList <- function (dir, ...)
	listFilesExt(dir, extension = "zim", ...)
zimDatList <- function (dir, ...)
	listFilesExt(dir, extension = "_dat[135].zim", ...)
zipList <- function (dir, ...)
	listFilesExt(dir, extension = "zip", ...)
zidList <- function (dir, ...)
	listFilesExt(dir, extension = "zid", ...)
zidbList <- function (dir, ...)
	listFilesExt(dir, extension = "zidb", ...)
jpgList <- function (dir, ...)
	listFilesExt(dir, extension = "jpg", ...)
pngList <- function (dir, ...)
	listFilesExt(dir, extension = "png", ...)
## Check if a file exists
checkFileExists <- function (file, extension, message = "file not found: %s",
force.file = FALSE)
{
	## Does this file exists?
	if (!all(file.exists(file))) {
		warning(sprintf(message, file))
		return(FALSE)
	}
	## Make sure it is not a directory
	if (force.file && any(file.info(file)$isdir)) {
		warning("one or more files are directories")
		return(FALSE)
	}
	## Check its extension
	if (!missing(extension) && !all(hasExtension(file, extension))) {
		warning(sprintf("one or more files are not '%s' file", extension))
		return(FALSE)
	}
	## Everything is fine!
	return(TRUE)
}
## Checks if a directory exists
checkDirExists <- function (dir,
message = 'Path "%s" does not exist or is not a directory')
{
	if (!all(file.exists(dir)) || !all(file.info(dir)$isdir)) {
		warning(sprintf(message, dir))
		FALSE
	} else {
		## Everything is fine...
		TRUE
	}
}
## Check if a directory is empty (used in prepareTrain())
checkEmptyDir <- function (dir, message = 'dir "%s" is not empty')
{
	## Works only on a single dir (not vectorized code)
	dir <- as.character(dir)[1]
	if (file.exists(dir)) {
		Files <- list.files(dir, all.files = TRUE)
		Files <- Files[!Files %in% c(".", "..")]
		if (length(Files > 0)) {
			warning(sprintf(message, dir))
			return(FALSE)
		} else return(TRUE)
	} else forceDirCreate(dir)
}
## Force creation of a directory
forceDirCreate <- function (dir)
{
	## If it exists, make sure it is a directory
	if (file.exists(dir)) {
		if (!file.info(dir)$isdir) {
			warning(sprintf('"%s" is not a directory', dir))
			FALSE
		} else TRUE
	} else if (!dir.create(dir, showWarnings = FALSE)) {
		warning(sprintf('could not create directory "%s"', dir))
		FALSE
	} else TRUE
}
## Checks the first line of a file against some expected content
checkFirstLine <- function (file, expected = c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"),
message = 'file "%s" is not a valid ZooImage version <= 5 file')
{
	Line1 <- scan(as.character(file)[1], character(), nmax = 1, quiet = TRUE)
	res <- Line1 %in% expected
	if (!res) warning(sprintf(message, file))
	return(res)
}
## This is a copy of the unexported function tools:::mime_canonical_encoding
.mimeEncoding <- function (encoding)
{
    encoding[encoding %in% c("", "unknown")] <- utils::localeToCharset()[1L]
    encoding <- tolower(encoding)
    encoding <- sub("iso_8859-([0-9]+)", "iso-8859-\\1", encoding)
    encoding <- sub("iso8859-([0-9]+)", "iso-8859-\\1", encoding)
    encoding[encoding == "latin1"] <- "iso-8859-1"
    encoding[encoding == "latin2"] <- "iso-8859-2"
    encoding[encoding == "latin3"] <- "iso-8859-3"
    encoding[encoding == "latin4"] <- "iso-8859-4"
    encoding[encoding == "cyrillic"] <- "iso-8859-5"
    encoding[encoding == "arabic"] <- "iso-8859-6"
    encoding[encoding == "greek"] <- "iso-8859-7"
    encoding[encoding == "hebrew"] <- "iso-8859-8"
    encoding[encoding == "latin5"] <- "iso-8859-9"
    encoding[encoding == "latin6"] <- "iso-8859-10"
    encoding[encoding == "latin8"] <- "iso-8859-14"
    encoding[encoding == "latin-9"] <- "iso-8859-15"
    encoding[encoding == "latin10"] <- "iso-8859-16"
    encoding[encoding == "utf8"] <- "utf-8"
    encoding[encoding == "ascii"] <- "us-ascii"
    encoding
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/fileutils.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>
ZIDlg <- function ()
{
	## In this version, we use a simpler implementation, using svDialogs
	## and menus added to RGui, JGR or ctxmenu
	ZIname <- getTemp("ZIname")
	menuDel(ZIname)
	menuAdd(ZIname)
	menuAddItem(ZIname, "Load objects", "loadObjects()")
	menuAddItem(ZIname, "Save objects", "saveObjects()")
	menuAddItem(ZIname, "List objects", "listObjects()")
	menuAddItem(ZIname, "Remove objects", "removeObjects()")
	menuAddItem(ZIname, "-", "")
	menuAddItem(ZIname, "Interactive UI", "ZIUI()")
	menuAddItem(ZIname, "--", "")
	menuAddItem(ZIname, "Online help", 'help("zooimage")')
	menuAddItem(ZIname, "Manual (English version)", "viewManual()")
	menuAddItem(ZIname, "Manual (French version)", "viewFrenchManual()")
	menuAddItem(ZIname,
		"Web site", 'browseURL("http://www.sciviews.org/zooimage")')
	menuAddItem(ZIname, "--", "")
	menuAddItem(ZIname, "About...", "aboutZI(TRUE)")
	menuDel("Analyze")
	menuAdd("Analyze")
	menuAddItem("Analyze", "Acquire images...", "acquireImg()")
	menuAddItem("Analyze", "Import images...", "importImg()")
	menuAddItem("Analyze", "Process images...", "processImg()")
	#menuAddItem("Analyze", "Make .zid files...", "makeZid()")
	menuAddItem("Analyze", "Make .zidb files...", "makeZidb()")
	menuAddItem("Analyze", "Make .zidb files from FlowCAM data...", "makeZidbFlowCAM()")
	menuAddItem("Analyze", "-", "")
	menuAddItem("Analyze", "Make training set...", "makeTrain()")
	menuAddItem("Analyze", "Add vignettes to training set", "addVigsToTrain()")
 	menuAddItem("Analyze", "Differences between two training sets", "compTrain()")
	menuAddItem("Analyze", "Count cells in colonies...", "countCellsGUI()")
	menuAddItem("Analyze", "Read training set...", "collectTrain()")
	menuAddItem("Analyze", "Make classifier...", "makeClass()")
	menuAddItem("Analyze", "Analyze classifier...", "analyzeClass()")
	menuAddItem("Analyze", "Automatic classification of vignettes...",
		"vignettesClass()")
	menuAddItem("Analyze", "Validate classification...", "validClass()")
	menuAddItem("Analyze", "Active learning...", "activeLearningGUI()")
	menuAddItem("Analyze", "--", "")
	menuAddItem("Analyze", "Edit samples description", "editDescription()")
	menuAddItem("Analyze", "Process samples...", "processSamples()")
	menuAddItem("Analyze", "Process samples with cells counting...", "processSamplesWithCells()")
	menuAddItem("Analyze", "View results...", "viewResults()")
	menuAddItem("Analyze", "Export results...", "exportResults()")
	## Menu 'Functions' not added yet!
	menuDel("Utilities")
	menuAdd("Utilities")
	menuAddItem("Utilities", "Calibrate grayscale (16bit)", "calib()")
	menuAddItem("Utilities", "Biomass conversion specification",
		"fileEdit(file.path(getTemp('ZIetc'), 'Conversion.txt'))")
	menuAddItem("Utilities", "-", "")
	menuAddItem("Utilities", "Image viewer( XnView)", 'startPgm("ImageViewer")')
	menuAddItem("Utilities", "Image analyzer (ImageJ)",
		'startPgm("ImageEditor", switchdir = TRUE, iconize = TRUE)')
	menuAddItem("Utilities", "Metadata editor",
		'fileEdit(selectFile("ZimZis"))')
	menuAddItem("Utilities", "Simple acquisition (Vuescan)",
		'startPgm("VueScan", switchdir = TRUE)')
	menuAddItem("Utilities", "--", "")
	menuAddItem("Utilities", "New R graph", "dev.new()")
	menuAddItem("Utilities", "Activate next graph",
		"{dev.set(); if (isRgui()) bringToTop()}")
	menuAddItem("Utilities", "Close all graphs", "graphics.off()")
	menuAdd("Utilities/Options")
	menuAddItem("Utilities/Options", "Change active dir...",
		"setwd(dlgDir()$res)")
	menuAddItem("Utilities/Options", "-", "")
	menuAddItem("Utilities/Options", "Define decimal separator",
		"optInOutDecimalSep()")
#	## This is the old implementation usig svWidgets
#	# If the window is already created, just activate it...
#	if ("ZIDlgWin" %in% WinNames()) {
#		ZIDlgWin <- WinGet("ZIDlgWin")
#		tkfocus(ZIDlgWin)  	# Doesn't work with Rgui.exe, but next command does
#		tkwm.deiconify(ZIDlgWin)
#    	return(invisible())
#	}
#
#	# Construct the window
#	tkWinAdd("ZIDlgWin", title = paste(getTemp("ZIname"), "assistant"),
#		pos = "-100+10")
#	ZIDlgWin <- WinGet("ZIDlgWin")
#
#	# Do not show it until it is completelly constructed!
#	tkwm.withdraw(ZIDlgWin)
#	on.exit(tkwm.deiconify(ZIDlgWin))
#
#	# Change the icon of that window (if under Windows)
#	if (isWin()) tk2ico.set(ZIDlgWin, getTemp("ZIico"))
#
#	# Add a menu (load it from a spec file)
#	Pkg <- getTemp("ZIguiPackage", default = "zooimage")
#	MenuReadPackage(Pkg, file = "MenusZIDlgWin.txt")
#
#	# Add a toolbar (read it from file 'ToolbarZIDlgWin.txt')
#	ToolRead(file.path(getTemp("ZIgui"), "ToolbarsZIDlgWin.txt"))
#
#	# Add a statusbar with a text and a progressbar
#	status <- tk2frame(ZIDlgWin)
#	statusText <- tk2label(status, text = paste("Ready -", getwd()),
#		justify = "left", anchor = "w", width = 60)
#	statusProg <- tk2progress(status, orient = "horizontal", maximum = 100)
#	tkpack(statusProg, side = "right")
#	tkpack(statusText, side = "left", fill= "x")
#	tkpack(status, side = "bottom", fill = "x")
#	tkpack(tk2separator(ZIDlgWin), side = "bottom", fill = "x")
#
#	# Keep track of statusText / statusProg
#	assignTemp("statusText", statusText)
#	assignTemp("statusProg", statusProg)
#	## Change value of the progressbar
#	#tkconfigure(getTemp("statusProg"), value = 50)
#	## Change text of the statusbar
#	#tkconfigure(getTemp("statusText"), text = paste("Ready -", getwd()))
#	## Add a function for progress() to update the progressbar in this window
#	assignTemp(".progress", list(progressZIGUI = function (value, max.value) {
#		if (!"ZIDlgWin" %in% WinNames()) return()
#
#		if (is.null(max.value)) {
#		    max.value <- 100
#		    percent <- TRUE
#		} else percent <- FALSE
#
#		if (value > max.value) { # Erase progressmeter
#			rmTemp("statusBusy")
#			tkconfigure(getTemp("statusProg") , value = 0)
#			tkconfigure(getTemp("statusText") , text = paste("Ready -", getwd()))
#		} else { # Show progress
#			assignTemp("statusBusy", TRUE)
#			## Calculate fraction and show it in the progress bar
#			if (!percent) value <- value / max.value * 100
#			tkconfigure(getTemp("statusProg"), value = value)
#			## Display the progress text also in the statusbar
#			tkconfigure(getTemp("statusText"), text = message)
#		}
#		.Tcl("update idletasks")
#	}))
#
#	if (!isWin()) {
#		# The activate R console & R graph do not work elsewhere
#        MenuStateItem("$Tk.ZIDlgWin/Apps", "&R Console", FALSE)
#		MenuStateItem("$Tk.ZIDlgWin/Apps", "Active R &Graph", FALSE)
#	}
#
#	# For each of the six external programs, look if they are accessible,
#	# otherwise, inactivate
#	if (is.null(getOption("fileEditor")))
#         MenuStateItem("$Tk.ZIDlgWin/Apps", "&Metadata editor", FALSE)
#    if (is.null(getOption("ImageEditor")))
#         MenuStateItem("$Tk.ZIDlgWin/Apps", "Image &analyzer (ImageJ)", FALSE)
#    if (is.null(getOption("ImageViewer")))
#         MenuStateItem("$Tk.ZIDlgWin/Apps", "Image &viewer (XnView)", FALSE)
#    if (is.null(getOption("VueScan")))
#         MenuStateItem("$Tk.ZIDlgWin/Apps", "Simple acquisition (&VueScan)", FALSE)
#
#	# Change the window to non resizable and topmost (f under Windows)
#	if (isWin()) tcl("wm", "attributes", ZIDlgWin, topmost = 1)
#	tkwm.resizable(ZIDlgWin, 0, 0)
#	# Focus on that window
#	tkfocus(ZIDlgWin)	# Doesn't work with Rgui.exe, but tkwm.deiconify does
}
## Function for the RGui menu
aboutZI <- function (graphical = FALSE)
{
	msg <- getTemp("ZIverstring")
	### TODO: add more information here (copyright, authors, ...)
	if (isTRUE(as.logical(graphical))) {
		dlgMessage(message = msg, title = "About...", icon = "info",
			type = "ok")
	} else cat(msg, "\n")
}
exitZI <- function ()
{
	## This is useful to allow updating the package!
	detach("package:zooimage", unload = TRUE)
	message("zooimage package unloaded; To restart it, issue:\n> library(zooimage)")
}
## Functions for the assistant menu
closeAssistant <- function ()
{
	try(menuDel(getTemp("ZIname")), silent = TRUE)
	try(menuDel("Analyze"), silent = TRUE)
	try(menuDel("Utilities"), silent = TRUE)
	## Destroy the ZooImage Tk window, if it is currently displayed
	#tkWinDel("ZIDlgWin")
	## Eliminate the function to update the progressmeter in that window
	#assignTemp(".progress", list())
}
closeZooImage <- function ()
{
	closeAssistant()
	exitZI()
}
viewManual <- function ()
{
	manual <- file.path(getTemp("ZIetc"), "ZooImageManual.pdf")
	pdfviewer <- getOption( "pdfviewer" )
	if (!is.null(pdfviewer)) {
		if (.Platform$OS.type == "windows") {
            shell.exec(manual)
        } else {
			system(paste(shQuote(getOption("pdfviewer")), shQuote(manual)),
				wait = FALSE)
		}
	} else browseURL(manual)
}
viewFrenchManual <- function ()
{
  manual <- file.path(getTemp("ZIetc"), "ZooImageManual_french.pdf")
  pdfviewer <- getOption( "pdfviewer" )
  if (!is.null(pdfviewer)) {
    if (.Platform$OS.type == "windows") {
      shell.exec(manual)
    } else {
      system(paste(shQuote(getOption("pdfviewer")), shQuote(manual)),
             wait = FALSE)
    }
  } else browseURL(manual)
}
focusR <- function ()
{
	## Switch the focus to the R console
	### TODO: notify this command is not available elsewhere (inactivate menu?)
	if (isRgui()) bringToTop(-1) else
		stop("Not implemented in this environment")
}
focusGraph <- function ()
{
	## Focus to the active R graph (create one if there is no graph device)
	### TODO: notify this command is not available elsewhere (inactivate menu?)
	if (is.null(dev.list())) {
		device <- match.fun(getOption("device"))
		device()
	} else {
		## Activate current graph window
		if (isRgui()) bringToTop() else
			stop("Not implemented in this environment")
	}
}
## Show an assitant dialog box allowing to choose between VueScan and a different
## acquisition program... remember that setting in the registry under Windows
acquireImg <- function ()
{
	## First read the registry to determine which software in recorded there...
 	Asoft <- getOption("ZI.AcquisitionSoftware", "VueScan")
	if (Asoft == "VueScan") {
		opts <- c("VueScan", "Another software...")
		othersoft <- ""
		defval <- "VueScan"
	} else {
		othersoft <- Asoft
       	defval <- basename(othersoft)
		opts <- c("VueScan", defval, "Another software...")
	}
	## Then, show the dialog box
 	#res <- modalAssistant(paste(getTemp("ZIname"), "picture acquisition"),
	#	c("To acquire digital plankton images,",
	#	"you can use a specialized equipment or",
	#	"a digital camera on top of a binocular or",
	#	"a flatbed scanner, ...",
	#	"",
	#	"To pilot a scanner or rework RAW digicam images",
	#	"you can use 'Vuescan'.",
	#	"You can also specify to use your own software.",
	#	"", "", "Use:", ""), init = defval,
	#	options = opts, help.topic = "acquireImg")
	## Analyze result
	#if (res == "ID_CANCEL") return(invisible())
	res <- dlgList(opts, preselect = defval, multiple = FALSE,
		title = "Acquire images with:")$res
	if (!length(res)) return(invisible())
	## Did we selected "Another software..."?
	if (res == "Another software...") {
		## Ask for selecting this software
		Asoft <- dlgOpen(title = "Select a program...", multiple = FALSE)$res
		if (!length(Asoft)) return(invisible(NULL)) # Cancelled dialog box
	}
	## Did we selected "VueScan"
	if (res == "VueScan") {
		startPgm("VueScan", switchdir = TRUE)
		options(ZI.AcquisitionSoftware = "VueScan")
		return(invisible(NULL))
	}
	## We should have selected a custom software...
	if (!file.exists(Asoft))
		stop("Program '", Asoft, "' not found!")
	## Start the program
	system(paste('"', Asoft, '"', sep = ""), wait = FALSE)
	## Record it in the registry key
    options(ZI.AcquisitionSoftware = Asoft)
}
importImg <- function ()
{
	# Import images... basically, you can select a series of images in a
	# directory, and the program asks for writing the associated .zim files,
	# or you can access other processes that automatically build .zim files
	# and/or import images/data, including custom processes defined in
	# separate 'ZIEimport' objects (see FlowCAM import routine for an example)
	# Get a list of 'ZIEimport' objects currently loaded in memory
	Images <- selectFile("Img", multiple = TRUE, quote = FALSE,
		title = "Select data to import...")
	## Look if there is at least one image selected
	if (!length(Images)) return(invisible(FALSE))
	dir <- dirname(Images[1])
	ImagesFiles <- basename(Images)
	has <- function (file, pattern)
		grepl(pattern, file)
	## Determine which kind of data it is
	if (has(ImagesFiles[1], pattern = "^Import.*[.]zie$")) {
		if (length(Images) > 1)
			warning("you cannot select more than one ZIE file; using the first one")
		return(invisible(zieMake(path = dir, Filemap = ImagesFiles[1],
		  check = TRUE)))
	} else if (has(ImagesFiles[1], "[.]txt$")) {
		## Special Case for FlowCAM images
		if (length(Images) > 1)
			warning("you cannot select more than one TXT file; using the first one")
		## I also need the "ImportTemplate.zie" file in the same path
		txtFile <- Images
		zieTemplate <- file.path(dir, "ImportTemplate.zie")
		if (!checkFileExists(zieTemplate, "zie", force.file = TRUE)) {
			message("Missing ImportTemplate.zie file in the same directory")
		  return(invisible(FALSE))
		}
		## Create .zim files + FitVisParameters.csv file for the FlowCAM
		res <- zieCompileFlowCAM(path = dirname(txtFile), Tablefile = txtFile,
			Template = zieTemplate, check.names = FALSE)
		return(invisible(res))
	} else if (has(ImagesFiles[1], "[.][tT][iI][fF]$")) {
		pattern <- extensionPattern(".tif")
	} else if (has(ImagesFiles[1], "[.][jJ][pP][gG]$")) {
        pattern <- extensionPattern("jpg")
	} else {
		warning("unrecognized data type!")
		return(invisible(FALSE))
	}
	## If there is no special treatment, just make all required .zim files
	## for currently selected images
	invisible(zimMake(dir = dir, pattern = pattern, images = ImagesFiles))
}
## TODO: the text appears only on one line on the Mac???
processImg <- function ()
{
	## Display a dialog box telling how to process images using ImageJ
	## When the user clicks on 'OK', ImageJ is started... + the checkbox 'close R'
	#res <- modalAssistant(paste(getTemp("ZIname"), "picture processing"),
	#	c(paste("Once images are acquired and imported into", getTemp("ZIname")),
	#	"(they have correct associated metadata), they must be",
	#	"processed.",
	#	"",
	#	"To do so, start 'ImageJ' (just click 'OK') and select",
	#	paste("the method for your images in 'Plugins -> ", getTemp("ZIname"),
	#		"'.", sep = ""),
	#	"",
	#	"For very large images, or on computers with limited",
	#	"RAM memory, it is advised to close all other programs.",
	#	"Check the option below to close R in this case.", "", ""),
	#	init = "0", check = "Close R before running 'ImageJ'",
	#	help.topic = "processImg")
	## Analyze result
	#if (res == "ID_CANCEL") return(invisible())
	res <- dlgMessage(paste("You will switch now to ImageJ to process your",
		"images. Do you want to continue?"), type = "okcancel")$res
	if (res == "cancel") return(invisible(NULL))
 	## Start ImageJ
	if (!is.null(getOption("ImageEditor")))
		startPgm("ImageEditor", switchdir = TRUE, iconize = TRUE)
	## Do we have to close R?
	#if (res == "1") q()
}
makeZid <- function ()
{
	## Create ZID files, possibly processing imqges first
	## TODO: get the list of all available processes
	## and select it automatically from the ZIM file
	defval <- "Scanner_Gray16"
	## Calls the class org.sciviews.zooimage.ZooImageProcessList to get
	## the list of available processes
	getProcessList <- function () {
		cmd <- sprintf('java -cp .:"%s":"%s" org.sciviews.zooimage.ZooImageProcessList',
			system.file("imagej", "ij.jar", package = "zooimage"),
			system.file("imagej", "plugins", "_zooimage.jar",
			package = "zooimage"))
		system(cmd , intern = TRUE)
	}
	processes <- getProcessList()
	opts <- c( processes, "-- None --")
	## Then, show the dialog box
 	#plugin <- modalAssistant(paste(getTemp("ZIname"), "process images"),
	#	c("Process images with associated metadata (ZIM files)",
	#	"in batch mode from one directory and make ZID files.",
	#	"", "Select an image processor:", ""), init = defval,
	#	options = opts, help.topic = "processIJ")
	## Analyze result
	#if (plugin == "ID_CANCEL") return(invisible())
	plugin <- dlgList(opts, preselect = defval, multiple = FALSE,
		title = "Select a batch image processor:")$res
	if (!length(plugin)) return(invisible(NULL))
	## Select zim file or directory
	dir <- dlgDir()$res
	if (!length(dir)) return(invisible(NULL))
	## Do we need to process the images with ImageJ?
	if (plugin != "-- None --") {
		ijplugin <- function (zimfile, ij.plugin = c("Scanner_Gray16",
			"MacroPhoto_Gray16", "Scanner_Color", "Microscope_Color")){
			ij.plugin <- match.arg(ij.plugin)
			cmd <- sprintf('java -Xmx900m -cp .:"%s":"%s" org.sciviews.zooimage.ZooImage %s "%s"',
				system.file("imagej", "ij.jar", package = "zooimage"),
				system.file("imagej", "plugins", "_zooimage.jar",
					package = "zooimage"), ij.plugin,
					tools::file_path_as_absolute(zimfile))
			return(invisible(system(cmd, intern = TRUE)))
		}
		## TODO: update a progress bar from ImageJ (using sockets ?)
		ijplugin(dir, ij.plugin = plugin)
	}
	## Finalize .zid files (and possibly also .zip files by updating their comment)
#    res <- modalAssistant(paste(getTemp("ZIname"), "data processing"),
#		c("You should have processed all your images now.",
#		"The next step is to finalize the .zid files (ZooImage",
#		"Data files). There will be one data file per sample and",
#		"it is all you need for the next part of your work...",
#		"",
#		"Once this step succeed, you can free disk space by",
#		"transferring all files from the _raw subdirectory to",
#		"archives, for instance, DVDs (Apps -> CD-DVD burner).",
#		"",
#        "Warning: the whole _work subdirectory with intermediary",
#		"images will be deleted, and all .zim files will be",
#		"moved to the _raw subdirectory.",
#		"At the end, you should have only .zid files remaining",
#		"in your working directory.", "",
#		"Click 'OK' to proceed (select working directory)...", ""),
#		init = "1", check = "Check vignettes", help.topic = "makeZid")
#	# Analyze result
#	if (res == "ID_CANCEL") return(invisible())
#	# Confirm the directory to process...
#	dir <- dlgDir()$res
#	if (length(dir) == 0) return(invisible())
 	## Do we check the vignettes (only if images were not processed)?
 	check.vignettes <- (plugin == "-- None --")
	## Make .zid files
    cat("\n")
	## TODO: combine the log from ImageJ with this one!
	zidCompressAll(path = dir, check.vignettes = check.vignettes,
		replace = TRUE, delete.source = TRUE)
}
makeZidb <- function ()
{
	## Get the sample directory
	dir <- getwd()
	smpdir <- dlgDir(default = dir, title = paste("Select a sample base dir"))$res
	## Make sur smpdir does not end with /
	smpdir <- sub("/$", "", smpdir)
	if (!length(smpdir) || !file.exists(smpdir) || !file.info(smpdir)$isdir)
		return(invisible(NULL))
  ## Check if this is a sample where we also need to create vignettes
  workdir <- file.path(smpdir, "_work")
  if (file.exists(workdir) && file.info(workdir)$isdir &&
      length(dir(workdir, pattern = "_dat5\\.zim$"))) {
    #cleanit <- (dlgMessage("Keep temporary files?\n(only answer <No> if you think you could be limited in disk space!)", type = "yesno")$res == "no")
    cleanit <- FALSE
    res <- makeZIVignettes(orig.dir = workdir, target.dir = smpdir, clean.work = cleanit)
  } else {
    ## Old ZI1-3 approach: call zidbMake() function
    #### TODO: create zim and _dat1.zim files
    res <- zidbMake(smpdir, type = "ZI1", check = TRUE,
      check.vignettes = TRUE, replace = FALSE, delete.source = FALSE)
  }
  if (res) {# Process was correct
    # Move data from _work to _work_to_delete
    workdeldir <- file.path(smpdir, "_work_to_delete")
    if (!file.exists(workdeldir))
      dir.create(workdeldir)
    files <- dir(workdir)
    file.rename(file.path(workdir, files), file.path(workdeldir, files))
    message("You can now archive data in the '_raw' subdirectory and delete '_work_to_delete' to save disk space")
  } else {# At least one sample was not processed correctly
    message("Problem(s) when processing these samples: check error messages, apply corrections and reprocess...")
  }
}
makeZidbFlowCAM <- function ()
{
	## Get the sample directory
	dir <- getwd()
	smpdir <- dlgDir(default = dir, title = paste("Select a sample base dir"))$res
	if (!length(smpdir) || !file.exists(smpdir) || !file.info(smpdir)$isdir)
		return(invisible(NULL))
	## Get .lst file first
	Lst <- dir(file.path(smpdir), pattern = "\\.lst$", full.names = TRUE)[1]
	if (length(Lst)) {
		res <- try(importFlowCAM(Lst, rgb.vigs = FALSE, replace = FALSE),
            silent = TRUE)
		if (inherits(res, "try-error"))
			stop("Error importing sample", basename(smpdir))
	} else stop("No .lst file found in this directory... Is this really a FlowCAM sample dir?")
}
makeTrain <- function ()
{
	## Select samples, and a grouping template... and prepare
	## for making a training set
    ## First read the registry to determine which grouping in recorded there...
 	Grp <- getOption("ZI.DefaultGrouping", "[Basic]")
	## Does this point to an actual file?
	if (file.exists(Grp)) {
		defval <- basename(Grp)
		opts <- c("Basic", "Detailed", "Very_detailed", defval, "Another config...")
		otherGrp <- Grp
	} else {
		defval <- sub("^[[](.+)[]]$", "\\1", Grp)
		opts <- c("Basic", "Detailed", "Very_detailed", "Another config...")
		otherGrp <- ""
	}
	## Then, show the dialog box
 	#res <- modalAssistant(paste(getTemp("ZIname"), "prepare training set"),
	#	c("This step prepares a directory in the hard disk",
	#	"where you will have the opportunity to manually",
	#	"classify vignettes in as many taxa as you want.",
	#	"The hierarchy of the folders and subfolders can",
	#	"be used to represent various levels of classification",
	#	"that the software will be able to use subsequently.",
	#	"",
	#	"You must specify: (1) a grouping scheme to start with,",
	#	"(2) a base directory where to locate the training set,",
	#	"(3) a series of .zid files as source of vignettes.", "",
	#	"Use the following grouping scheme:", ""), init = defval,
	#	options = opts, help.topic = "makeTrain")
	## Analyze result
	#if (res == "ID_CANCEL") return(invisible())
	res <- dlgList(opts, preselect = defval, multiple = FALSE,
		title = "Select the default classes to use to initialize your training set:")$res
	if (!length(res)) return(invisible(NULL))
	## Did we selected "Another config..."?
	if (res == "Another config...") {
		## Ask for selecting a .zic file containing the config
        otherGrp <- selectFile("Zic", multiple = FALSE, quote = FALSE,
			title = "Select a .zic file...")
		if (!length(otherGrp)) return(invisible(NULL))
		## Cancelled dialog box
		res <- otherGrp
	} else if (res %in% c("Basic", "Detailed", "Very_detailed")) {
		## Did we selected a standard scheme?
		res <- paste("[", res, "]", sep = "")
	} else res <- Grp  # We should have selected the previously recorded scheme...
	## Save this config for later use
    options(ZI.DefaultGrouping = res)
	## Ask for the base directory
    dir <- dlgDir()$res
	if (!length(dir)) return(invisible(NULL))
	## Ask for a subdir for this training set
	subdir <- dlgInput("Subdirectory where to create the training set:",
		default = "_train")$res
	if (!length(subdir)) return(invisible(NULL))
	## Ask for the .zid files
    zidfiles <- selectFile(type = "ZidZidb", multiple = TRUE, quote = FALSE)
	if (!length(zidfiles)) return(invisible(NULL))
	## Prepare the training set
	prepareTrain(file.path(dir, subdir), zidfiles, template = res)
	imageViewer(file.path(dir, subdir, "_"))
	## Remember the directory...
	assignTemp("ZI.TrainDir", file.path(dir, subdir))
}
## Count cells in particles (colonies)
countCellsGUI <- function ()
{
    ## Get the training set directory
    dir <- getTemp("ZI.TrainDir")
    if (is.null(dir) || !file.exists(dir) || !file.info(dir)$isdir)
        dir <- getwd()
    traindir <- dlgDir(default = dir, title = paste("Select a", getTemp("ZIname"),
        "training set base dir"))$res
    if (!length(traindir) || !file.exists(traindir) || !file.info(traindir)$isdir)
        return(invisible(NULL))
    ## Select one class
    res <- jpgList(traindir, recursive = TRUE)
    if (!length(res))
        res <- pngList(traindir, recursive = TRUE)
    if (!length(res)) {
        warning("no PNG or JPEG vignettes found in this directory tree")
        return(invisible(FALSE))
    }
    res <- gsub("[\\]", "/", res)
    Id <- noExtension(res)
    Paths <- unique(dirname(res))
    Classes <- sort(basename(Paths))
    Class <- dlgList(Classes, multiple = FALSE,
		title = "Select one class:")$res
	if (!length(Class)) return(invisible(NULL))
    ## Ask to reset (if something is already set)
    countPath <- file.path(traindir, "_count.RData")
    reset <- FALSE
    if (file.exists(countPath)) {
        train2 <- readRDS(countPath)
        ncount <- sum(!is.na(train2$Nb_cells[train2$Class == Class]))
        if (ncount > 0) {
            if (ncount == 1) {
                msg <- "There is one vignette processed. Do you want to keep its count?"
            } else msg <- paste("There are", ncount,
                "vignettes already processed. Do you want to keep these counts?")
            res <- dlgMessage(msg, type = "yesnocancel")$res
            if (res == "cancel") return(invisible(NULL))
            reset = (res == "no")
        }
    }
    ## Call cellCount
    invisible(cellCount(traindir = traindir, class = Class, reset = reset))
}
## Read a training set and create a ZITrain object
collectTrain <- function ()
{
	## Get a possibly saved directory as default one
	dir <- getTemp("ZI.TrainDir")
	if (is.null(dir) || !file.exists(dir) || !file.info(dir)$isdir)
		dir <- getwd()
	## Ask for a base directory of a training set...
	dir <- dlgDir(default = dir, title = paste("Select a", getTemp("ZIname"),
		"training set base dir"))$res
	if (!length(dir) || !file.exists(dir) || !file.info(dir)$isdir)
		return(invisible(NULL))
	## Ask for a name for this ZITrain object
	name <- dlgInput("Name for the ZITrain object to create in the global environment:",
		default = "ZItrain")$res
	if (!length(name)) return(invisible(FALSE))
	name <- make.names(name)	# Make sure it is a valid name!
	## Get the training set and save it in .GlobalEnv under the provided name
	res <- getTrain(dir, creator = NULL, desc = NULL, keep_ = FALSE)
	.assignGlobal(name, res)
	## Remember the object name
	assignTemp("ZI.TrainName", name)
	## Print informations about this training set
	message("Manual training set data collected in '", name, "'")
	cat("\nClassification stats:\n")
	print(table(res$Class))
	cat("\nProportions per class:\n")
	print(table(res$Class) / length(res$Class) * 100)
}
## Add data to an existing training set
addVigsToTrain <- function ()
{
	## Select zid or zidb files to add in the training set
	zidb <- selectFile(type = "ZidZidb", multiple = TRUE, quote = FALSE)
	if (!length(zidb)) return(invisible(NULL))
	## Select the training set in which we add new vignettes
	dir <- getTemp("ZI.TrainDir")
	if (is.null(dir) || !file.exists(dir) || !file.info(dir)$isdir)
		dir <- getwd()
	## Ask for a base directory of a training set...
	dir <- dlgDir(default = dir, title = paste("Select a", getTemp("ZIname"),
		"training set base dir"))$res
	if (!length(dir) || !file.exists(dir) || !file.info(dir)$isdir)
		return(invisible(NULL))
	## Extract vignettes in the training set in a _NewVignettesX directory
	message("Adding vignettes from these files to _ subdir...")
	addToTrain(traindir = dir, zidbfiles = zidb)
}
## Compute differences between two training sets in a text file
compTrain <- function ()
{
	## Get the first training set directory
	dir <- getwd()
	traindir1 <- dlgDir(default = dir, title =
		paste("Select a first", getTemp("ZIname"), "training set base dir"))$res
	if (!length(traindir1) || !file.exists(traindir1) ||
		!file.info(traindir1)$isdir)
		return(invisible(NULL))
	## Get the second training set directory
	traindir2 <- dlgDir(default = dir, title =
		paste("Select a second (modified)", getTemp("ZIname"),
			"training set base dir"))$res
	if (!length(traindir2) || !file.exists(traindir2) ||
		!file.info(traindir2)$isdir)
		return(invisible(NULL))
	## TODO: could be PNG too!
	## List all vignettes (with paths) in train1 and train2
	list1 <- jpgList(traindir1, recursive = TRUE)
	list2 <- jpgList(traindir2, recursive = TRUE)
	## Extract vignette ID and corresponding path from list1 et list2
	PathsVigs1 <- strsplit(list1[grepl(".jpg", basename(list1))],
		"/(?=[^/]+$)", perl = TRUE)
	PathsVigs2 <- strsplit(list2[grepl(".jpg", basename(list2))],
		"/(?=[^/]+$)", perl = TRUE)
	Vigs1 <- unlist(lapply(PathsVigs1, `[[`, 2))
	Vigs2 <- unlist(lapply(PathsVigs2, `[[`, 2))
	Paths1 <- unlist(lapply(PathsVigs1, `[[`, 1))
	Paths2 <- unlist(lapply(PathsVigs2, `[[`, 1))
	## Search redundant vignettes
	RedundantVigs1 <- Vigs1[duplicated(Vigs1)]
	RedundantVigs2 <- Vigs2[duplicated(Vigs2)]
	## Differences between list 1 and list2 (symmetric difference)
	Chg <- union(setdiff(list1, list2), setdiff(list2, list1))
	ChgVigs <- unique(c(RedundantVigs1, RedundantVigs2, basename(Chg)))
	if (length(ChgVigs) < 1) {
		warning("No differences between these two training sets...")
	} else {
		ChgToSave <- NULL
		for (i in 1:length(ChgVigs)) {
			nameChg <- sub(".jpg", "", ChgVigs[i])
			if (ChgVigs[i] %in% RedundantVigs1) {
				status <- "Redundant"
				pathTrain1 <- Paths1[which(Vigs1 == ChgVigs[i])]
				pathTrain2 <- ""
				ChgToSave <- rbind(ChgToSave, cbind(nameChg, status,
					pathTrain1, pathTrain2))
			}
			if (ChgVigs[i] %in% RedundantVigs2) {
				status <- "Redundant"
				pathTrain1 <- ""
				pathTrain2 <- Paths2[which(Vigs2 == ChgVigs[i])]
				ChgToSave <- rbind(ChgToSave, cbind(nameChg, status,
					pathTrain1, pathTrain2))
			}
			if (!(ChgVigs[i] %in% RedundantVigs1) &
				!(ChgVigs[i] %in% RedundantVigs2)) {
				if (length(Paths1[which(Vigs1 == ChgVigs[i])]) < 1) {
					status <- "Added"
					pathTrain1 <- "Not found"
					pathTrain2 <- Paths2[which(Vigs2 == ChgVigs[i])]
				} else if (length(Paths2[which(Vigs2 == ChgVigs[i])]) < 1) {
					status <- "Deleted"
					pathTrain1 <- Paths1[which(Vigs1 == ChgVigs[i])]
					pathTrain2 <- "Not found"
				} else {
					status <- "Moved"
					pathTrain1 <- Paths1[which(Vigs1 == ChgVigs[i])]
					pathTrain2 <- Paths2[which(Vigs2 == ChgVigs[i])]
				}
				ChgToSave <- rbind(ChgToSave, cbind(nameChg, status,
					pathTrain1, pathTrain2))
			}
		}
		colnames(ChgToSave) <- c("Vignette", "Status",
            paste("Path in ", basename(traindir1), sep = ""),
            paste("Path in ", basename(traindir2), sep = ""))
		ChgToSave <- ChgToSave[order(ChgToSave[,
			which(colnames(ChgToSave) == "Status")]), ]
		compFile <- paste(traindir2, "/", basename(traindir1), " VS ",
			basename(traindir2), ".txt", sep = "")
		cat(paste("First training set: ", basename(traindir1), "\n", sep = ""),
			file = compFile, append = TRUE)
		cat(paste("Second training set: ", basename(traindir2), "\n", sep = ""),
			file = compFile, append = TRUE)
		cat("\nSummary of changes:\n", file = compFile, append = TRUE)
		ChgSummary <- table(ChgToSave[, which(colnames(ChgToSave) == "Status")])
		write.table(ChgSummary, file = compFile, sep = "\t",
			row.names = FALSE, col.names = FALSE, append = TRUE)
		cat("\n", file = compFile, append = TRUE)
		write.table(ChgToSave, file = compFile, col.names = FALSE,
			sep = "\t", row.names = FALSE, append = TRUE)
	}
}
## New version to accept variables selection and/or new formula 1.2-2
## TODO: avoid duplication of code here
makeClass <- function ()
{
 	## Create a classifier, using a ZI1Class object (new version)
	## Ask for an algorithm + additional parameters
	## Return a ZIClass object
	defval <- "linear discriminant analysis"
	opts <- c("linear discriminant analysis",
			  "recursive partitioning (tree)",
			  "k-nearest neighbour",
			  "learning vector quantization",
			  "neural network",
			  "random forest",
			  "Variables Selection")
 	#res <- modalAssistant(paste(getTemp("ZIname"), "make classifier"),
	#	c("This is a simplified version of the classifiers",
	#	"where you just need to select one algorithm.",
	#	"Warning! Many algorithms have parameters to be",
	#	"fine-tuned before efficient use... and this must be",
	#	"done for each specific data set! Here, only default",
	#	"parameters that have proven efficient with plankton",
	#	"are applied automatically. Some methods already work",
	#	"pretty well that way.",
	#	"", "Learn using an algorithm:", ""), init = defval,
	#	options = opts, help.topic = "makeClass")
	## Analyze result
	#if (res == "ID_CANCEL") return(invisible())
	res <- dlgList(opts, preselect = defval, multiple = FALSE,
		title = "Select an algorithm for creating your classifier:")$res
	if (!length(res)) return(invisible(NULL))
	if (res != "Variables Selection") {
		## Use default values for the classifier creation
		warnings("These defaults variables are used : logArea, Mean, StdDev, ",
			"Mode, Min, Max, logPerim., logMajor, logMinor, Circ., logFeret, ",
			"IntDen, Elongation, CentBoxD, GrayCentBoxD, CentroidsD, Range, ",
			"MeanPos, SDNorm, CV")
		## Compute algorithm from res
		algorithm <- switch(res,
			`linear discriminant analysis` = "lda",
			`recursive partitioning (tree)` = "rpart",
			`random forest` = "randomForest",
			`support vector machine` = "svm",
			`k-nearest neighbour` = "ipredknn",
			`learning vector quantization` = "mlLvq",
			`neural network` = "mlNnet")
		## Look if we have a manual training set object defined
		ZIT <- getTemp("ZI.TrainName")
		if (is.null(ZIT)) ZIT <- ""
		## Ask for a ZITrain object
		ZIT <- selectObject("ZITrain", multiple = FALSE, default = ZIT,
			title = "Choose one ZITrain objects:")
		if (!length(ZIT) || (length(ZIT) == 1 && ZIT == ""))
			return(invisible(NULL))
		## Ask for a name for this ZIClass object
		name <- dlgInput("Name for the ZIClass object to create in the global environment:",
			default = "ZIclass")$res
		if (!length(name)) return(invisible(NULL))
		name <- make.names(name)	# Make sure it is a valid name!
		## Calculate results
		res <- ZIClass(Class ~ ., data = get(ZIT, envir = .GlobalEnv),
			algorithm = algorithm)
	} else {
		## Options if 'Variables Selection' is selected v. 1.2-2
		opts <- c("linear discriminant analysis",
				"recursive partitioning (tree)",
				"k-nearest neighbour",
				"learning vector quantization",
				"neural network",
				"random forest")
		## Dialog box if 'Variables Selection' is selected v1.2-2
		#res <- modalAssistant(paste(getTemp("ZIname"), "make classifier"),
		#	c("This is a simplified version of the classifiers",
		#	"where you just need to select one algorithm.",
		#	"Warning! Many algorithms have parameters to be",
		#	"fine-tuned before efficient use... and this must be",
		#	"done for each specific data set!",
		#	"",
		#	"Here, you can select",
		#	"variables to use for the classifier creation.",
		#	"",
		#	"Warning! Select only pertinent and useful measurements.",
		#	"", "Learn using an algorithm:", ""), init = defval,
		#	options = opts, help.topic = "makeClass")
		#if (res == "ID_CANCEL") return(invisible())
		res <- dlgList(opts, preselect = defval, multiple = FALSE,
			title = "Select an algorithm for creating your classifier:")$res
		if (!length(res)) return(invisible(NULL))
		## Compute algorithm from res
		algorithm <- switch(res,
			`linear discriminant analysis` = "lda",
			`recursive partitioning (tree)` = "rpart",
			`random forest` = "randomForest",
			`support vector machine` = "svm",
			`k-nearest neighbour` = "ipredknn",
			`learning vector quantization` = "mlLvq",
			`neural network` = "mlNnet")
		## Look if we have a manual training set object defined
		ZIT <- getTemp("ZI.TrainName")
		if (is.null(ZIT)) ZIT <- ""
		## Ask for a ZITrain object
		ZIT <- selectObject("ZITrain", multiple = FALSE, default = ZIT,
			title = "Choose one ZITrain objects:")
		if (length(ZIT) == 0 || (length(ZIT) == 1 && ZIT == ""))
			return(invisible(NULL))
		## Ask for a name for this ZIClass object
		name <- dlgInput("Name for the ZIClass object to create:",
			title = "Creating a classifier", default = "ZIclass")$res
		if (!length(name)) return(invisible(NULL))
		name <- make.names(name)	# Make sure it is a valid name!
		## Calculate formula using variables of the training set
### TODO: change this: do not return a formula, but a list of variables to select + "Class"!
		## variables selection for the classifier
		## TODO: select variables to use for classification instead of returning a formula!
		selectVars <- function (ZITrain,
		calc.vars = getOption("ZI.calcVars", "calcVars")) {
			## ZITrain must be a ZItrain object
			if (!inherits(ZITrain, "ZITrain")) {
				warning("'ZITrain' must be a 'ZITrain' object")
				return(character(0))
			}
			calcfun <- match.fun(as.character(calc.vars)[1])
			## Parameters measured on particles and new variables calculated
			mes <- as.vector(colnames(calcfun(ZITrain)))
			presel <- c("Id", "FIT_Cal_Const", "Item", "FIT_Raw_Area",
				"FIT_Raw_Feret_Max", "FIT_Raw_Feret_Min", "FIT_Raw_Feret_Mean",
				"FIT_Raw_Perim", "FIT_Raw_Convex_Perim", "FIT_Feret_Max_Angle",
				"FIT_Feret_Min_Angle", "FIT_Avg_Red", "FIT_Avg_Green", "FIT_Avg_Blue",
				"FIT_PPC", "FIT_Ch3_Peak", "FIT_Ch3_TOF", "FIT_Ch4_Peak", "FIT_Ch4_TOF",
				"FIT_SaveX", "FIT_SaveY", "FIT_PixelW", "FIT_PixelH", "FIT_CaptureX",
				"FIT_CaptureY", "FIT_Edge_Gradient", "FIT_Timestamp1", "FIT_Timestamp2",
				"FIT_Source_Image", "FIT_Calibration_Image", "FIT_High_U32",
				"FIT_Low_U32", "FIT_Total", "FIT_Red_Green_Ratio",
				"FIT_Blue_Green_Ratio", "FIT_Red_Blue_Ratio", "FIT_Ch2_Ch1_Ratio",
				"X.Item.1", "X", "Y", "XM", "YM", "BX", "BY", "Width", "Height",
				"Angle", "XStart", "YStart", "Count",  "Label", "Dil", "Class")
			DontKeep <-  dlgList(mes, preselect = presel, multiple = TRUE,
				title = "Select variable you don't want to use in the classification")$res
			## Selection of features for the creation of the classifier
		#	keep <- dlgList(mes, preselect = c("ECD", "FIT_Area_ABD",
		#		"FIT_Diameter_ABD", "FIT_Volume_ABD", "FIT_Diameter_ESD",
		#		"FIT_Volume_ESD", "FIT_Length", "FIT_Width", "FIT_Aspect_Ratio",
		#		"FIT_Transparency", "FIT_Intensity", "FIT_Sigma_Intensity",
		#		"FIT_Sum_Intensity", "FIT_Compactness", "FIT_Elongation",
		#		"FIT_Perimeter", "FIT_Convex_Perimeter", "FIT_Roughness",
		#		"FIT_Ch1_Peak", "FIT_Ch1_TOF", "FIT_Ch2_Peak", "FIT_Ch2_TOF",
		#		"Area", "Mean", "StdDev", "Mode", "Min", "Max", "Perim.", "Width",
		#		"Height", "Major", "Minor", "Circ.", "Feret", "IntDen", "Median",
		#		"Skew", "Kurt", "Elongation", "CentBoxD", "GrayCentBoxD", "CentroidsD",
		#		"Range", "MeanPos", "SDNorm", "CV", "logArea", "logPerim.", "logMajor",
		#		"logMinor", "logFeret"),
		#		multiple = TRUE, title = "Select variables to use for classification")$res
			## Creation of one formula for classifier calculation
			keep <- mes[!mes %in% DontKeep]
			res <- as.formula(paste("Class ~ ", paste(keep, collapse = "+")))
			return(res)
		}
		form <- selectVars(get(ZIT, envir = .GlobalEnv, inherits = FALSE))
		## Calculate results using formula created by variables selection
		res <- ZIClass(form, data = get(ZIT, envir = .GlobalEnv), algorithm = algorithm)
	}
	## Store the resulting object
	.assignGlobal(name, res)
	## Print results
	print(res)
	cat("\n")
	## Remember that ZIClass object
    assignTemp("ZI.ClassName", name)
}
## Analyze confusion matrix
analyzeClass <- function ()
{
	## Analyze a classifier, using a ZI1Class object (new version)
	## Ask for an option of analysis
 	defval <- "Print Confusion Matrix"
	opts <- c("Print Confusion Matrix", "Summarize", "Plot Confusion Matrix",
		"Plot F-score", "Plot Dendrogram", "Plot Precision/recall")
	## Then, show the dialog box
 	#res <- modalAssistant(paste(getTemp("ZIClass"), "Analyze a classifier"),
	#	c("This is a simplified version of the analysis of classifiers",
	#	"where you just need to select one classifier.",
	#	"These options provide some tools to analyze your classifers.",
	#	"", "Select a classifer and a tool:", ""), init = defval,
	#	options = opts)
	## Analyze result
	#if (res == "ID_CANCEL") return(invisible()) # not error message is 'cancel'
	res <- dlgList(opts, preselect = defval, multiple = FALSE,
		title = "Select a classifier to be analyzed:")$res
	if (!length(res)) return(invisible(NULL))
 	## Analyze a classifier... currently, only calculate the confusion matrix
	## and edit it
	ZIC <- selectObject("ZIClass", multiple = FALSE,
		title = "Choose one ZIClass object:")
	if (!length(ZIC))
		stop("No classifier. Please, create one first!")
	ZIC <- get(ZIC, envir = .GlobalEnv)
	conf <- confusion(ZIC)
	switch(res,
		`Print Confusion Matrix` = print(conf),
		`Summarize` = print(summary(conf)),
		`Plot Confusion Matrix` = plot(conf, type = "image"),
		`Plot F-score` = plot(conf, type = "barplot"),
		`Plot Dendrogram` = plot(conf, type = "dendrogram"),
		`Plot Precision/recall` = plot(conf, type = "stars"))
	return(invisible(conf))
}
## Extract vignettes from zid files to respective directories
vignettesClass <- function ()
{
	## Ask for the base directory
    defdir <- getTemp("ZI.BaseDir", default = getwd())
	basedir <- dlgDir(default = defdir,
		title = "Select the base directory for the test set")$res
	if (!length(basedir)) return(invisible(NULL))
	## Ask for a subdir for this training set
	subdir <- dlgInput("Subdirectory where to create the test set:",
		default = "_test")$res
	if (!length(subdir)) return(invisible(NULL))
	testdir <- file.path(basedir, subdir)
	if (file.exists(testdir))
		stop("The directory '", testdir,
			"' already exists! Please, restart and specify a new one")
	## Select .zid files to be classified
	zid <- selectFile(type = "ZidZidb", multiple = TRUE, quote = FALSE)
	if (!length(zid)) return(invisible(NULL))
	## Look if we have a classifier object defined
	zic <- getTemp("ZI.ClassName", default = "")
	zic <- selectObject("ZIClass", multiple = FALSE, default = zic,
		title = "Choose a classifier (ZIClass object):")
	if (!length(zic)) return(invisible(FALSE))
	## Save this choice for later reuse
	assignTemp("ZI.ClassName", zic, replace.existing = TRUE)
	zicObj <- get(zic, envir = .GlobalEnv)
	## Sort vignettes in the different directories, as predicted by the classifier
	prepareTest(testdir, zid, template = zicObj, classes = zicObj)
	## Remember the directory...
	assignTemp("ZI.BaseDir", basedir)
	assignTemp("ZI.TestDir", testdir)
	## Explain what to do next...
	message("Vignettes classified in '", testdir, "'")
	message("View them in your favorite file browser (and possibly correct classification manually)")
	## Classify vignettes
#	if (length(zid) > 1) {
#		classVignettesAll(zidfiles = zid, Dir = "_manuValidation",
#			ZIClass = zicObj)
#	} else { # Possibly apply a filter
#		## Give a name for the final directory
#		finalDir <- dlgInput("Name for the automatic classification directory:",
#			default = noExtension(zid), title = "Parameter filter")$res
#		if (!length(finalDir)) return(invisible(NULL))
#
#		## Read the zid file
#		ZIDat <- zidDatRead(zid)
#
#		## Select a parameter to use for the threshold
#		threshold <- createThreshold(ZIDat = ZIDat)
#		if (length(threshold)) {
#			classVignettes(zidfile = zid, Dir = finalDir,ZIClass = zicObj,
#				ZIDat = ZIDat, Filter = threshold)
#		} else {
#			classVignettes(zidfile = zid, Dir = finalDir, ZIClass = zicObj)
#		}
#	}
}
## Perform classification validation
validClass <- function ()
{
	## Select one .zid or zidb file to be validated
	zid <- selectFile(type = "ZidZidb", multiple = FALSE, quote = FALSE)
	if (!length(zid)) return(invisible(NULL))
	## Look if we have a classifier object defined
	zic <- getTemp("ZI.ClassName", default = "")
	zic <- selectObject("ZIClass", multiple = FALSE, default = zic,
		title = "Choose a classifier (ZIClass object):")
	if (!length(zic)) return(invisible(FALSE))
	## Save this choice for later reuse
	assignTemp("ZI.ClassName", zic, replace.existing = TRUE)
	zicObj <- get(zic, envir = .GlobalEnv)
	## If we have a zid file, convert it into zidb
	if (hasExtension(zid, "zid")) {
		message("Converting data into zidb format...")
		if (!zidToZidb(zid))
			stop("problem while converting '", zid, "' into a zidb file")
		zid <- paste0(zid, "b") # The zidb file
		if (!file.exists(zid))
			stop("the created zidb file '", zid, "' is not found")
	}
	## Start validation of this sample
	correctError(zid, zicObj) # This is using default parameters!
}
## Augmentation of the training set by active learning
activeLearningGUI <- function ()
{
	## Active learning (adaptation of the training set with contextual items)
	## Select one .zid or zidb file to be validated
	zid <- selectFile(type = "ZidZidb", multiple = FALSE, quote = FALSE)
	if (!length(zid)) return(invisible(NULL))
	## Look if we have a training set object defined
	ZIT <- getTemp("ZI.TrainName")
	if (is.null(ZIT)) ZIT <- ""
	## Ask for a ZITrain object
	ZIT <- selectObject("ZITrain", multiple = FALSE, default = ZIT,
        title = "Choose one ZITrain objects:")
	if (!length(ZIT) || (length(ZIT) == 1 && ZIT == ""))
		return(invisible(NULL))
	## Call activeLearning (for augmentation of training set)
	train <- activeLearning(train = get(ZIT, envir = .GlobalEnv),
	                        add.mode = "SV+NSV", threshold = NA)
# 	## Call contextSelection (for selection of contextual samples)
# 	CtxSmp <- contextSelection()
# 	if (length(CtxSmp) < 1) {
# 		warning("No contextual samples selected! Initial training set will be used...")
# 	} else {
# 		## Call addItemsToTrain (for augmentation of the training set)
# 		train <- addItemsToTrain(train = get(ZIT, envir = .GlobalEnv),
# 			CtxSmp = CtxSmp, add.mode = "SV+NSV", threshold = NA,
# 			dropItemsToTrain = dropItemsToTrain)
# 	}
	#.assignGlobal(ZIT, train)
	classifier <- ZIClass(Class ~ ., data = train[!(names(train) %in% "AddedItems")],
        method = "mlRforest", calc.vars = calcVars, ntree = 200, cv.k = 10)
	attr(classifier, "path") <- attr(train, "path")
	## If we have a zid file, convert it into zidb
	if (hasExtension(zid, "zid")) {
		message("Converting data into zidb format...")
		if (!zidToZidb(zid))
			stop("problem while converting '", zid, "' into a zidb file")
		zid <- paste0(zid, "b") # The zidb file
		if (!file.exists(zid))
			stop("the created zidb file '", zid, "' is not found")
	}
	## Start validation of this sample
	correctError(zid, classifier) # This is using default parameters!
}
## Edit a samples description file... or create a new one!
editDescription <- function ()
{
	#res <- modalAssistant(paste(getTemp("ZIname"), "edit samples description"),
	#	c("Samples are about to be analyzed and collected together",
	#	"to form a series.",
	#	paste(getTemp("ZIname"), "needs to know which samples should be"),
	#	"collected into the same series and you must provide",
	#	"metadata information (especially date and time of",
	#	"collection, location of the sampling stations, or",
	#	"possibly temperature, salinity, turbidity, etc. that",
	#	"were recorded at the same time as these samples).",
	#	"",
	#	"A .zis file (by default, Description.zis) needs to be",
	#	"created and edited for each of the considered series.",
	#	"You can here edit, or create a new samples description",
	#	"file from the template.", "",
	#	"Click 'OK' to edit a samples description file now...", ""),
	#	init = "1", check = "New description file from template.",
	#	help.topic = "editDescription")
	## Analyze result
	#if (res == "ID_CANCEL") return(invisible())
	res <- dlgMessage(paste("Create a new description file from scratch?"),
		type = "yesnocancel")$res
	if (res == "cancel") return(invisible(NULL))
	## Edit/create the description file...
	if (res == "yes") {	# Create a Zis file ()take care: was "1" for modalAssistant!
		res <- dlgSave(default = "Description.zis",
			title = "Create a new ZIS file",
			filters = matrix(c("ZooImage samples description", ".zis"),
			ncol = 2, byrow = TRUE))$res
		if (!length(res)) return(invisible(NULL))
		if (regexpr("[.][zZ][iI][sS]$", res) < 0) res <- paste(res, ".zis",
			sep = "")
		zisfile <- zisCreate(res)
	} else { # Edit a Zis file
	    zisfile <- zisEdit(NULL)
	}
	## Remember the last zis file
    assignTemp("ZI.LastZIS", zisfile)
}
processSamplesWithCells <- function()
{
	## Ask for a "cells" file with data required to compute the number of cells
	## per colonies (per particles)
	cells <- getTemp("ZI.LastCells")
	if (is.null(cells) || !file.exists(cells))
		cells <- ""
	## Ask for the cells file
	if (cells != "") {
		cells <- dlgOpen(default = cells, title = "Select a cells counting RDS file",
			filters = matrix(c("Cells counting RDS file", ".rds"),
			ncol = 2, byrow = TRUE))$res
	} else if (file.exists(file.path(getwd(), "cells.rds"))) {
		cells <- dlgOpen(default = file.path(getwd(), "cells.RData"),
			title = "Select a cells counting RDS file",
			filters = matrix(c("Cells counting file", ".rds"),
			ncol = 2, byrow = TRUE))$res
	} else {
		cells <- dlgOpen(title = "Select a cells counting RDS file",
			filters = matrix(c("Cells counting file", ".rds"),
			ncol = 2, byrow = TRUE))$res
	}
	if (!length(cells)) return(invisible(NULL))
	## Remember last file used
	assignTemp("ZI.LastCells", cells)
	## Call .processSamples() using this file...
	.processSamples(cells)
}
## Just delegate to .processSamples() without providing a cells file to do
## the computation without cells counting
processSamples <- function ()
	.processSamples()
.processSamples <- function(cells = NULL)
{
	## Ask for a description.zis file, look at all samples described there
	## Calculate abundances, total and partial size spectra and possibly number
	## of cells per colonies (per particles) and biomasses
	## Get the last edited description.zis file
	## Get a possibly saved directory as default one
	zisfile <- getTemp("ZI.LastZIS")
	if (is.null(zisfile) || !is.character(zisfile) || !file.exists(zisfile))
		zisfile <- ""
	## Ask for a file
	if (zisfile != "") {
		zisfile <- dlgOpen(default = zisfile, title = "Select a ZIS file",
			filters = matrix(c("ZooImage samples description", ".zis"),
			ncol = 2, byrow = TRUE))$res
	} else if (file.exists(file.path(getwd(), "Description.zis"))) {
		zisfile <- dlgOpen(default = file.path(getwd(), "Description.zis"),
			title = "Select a ZIS file",
			filters = matrix(c("ZooImage samples description", ".zis"),
			ncol = 2, byrow = TRUE))$res
	} else {
		zisfile <- dlgOpen(title = "Select a ZIS file",
			filters = matrix(c("ZooImage samples description", ".zis"),
			ncol = 2, byrow = TRUE))$res
	}
	if (!length(zisfile)) return(invisible(NULL))
	## Add Kevin to use manual validation 2010-08-03
	## Option dialog box
	#res <- modalAssistant(paste(getTemp("ZIname"), "samples processing"),
	#	c(
	#		"Each sample registered in the description.zis file",
	#		"will be processed in turn to extract ecological",
	#		"parameters (abundances, biomasses, size spectra).",
	#		"",
	#		"If you want to save calculation done on each",
	#		"particle individually, check the option below.",
	#		"",
	#		"Click 'OK' to proceed...", ""
	#	), init = "0",
	#	options = "Manual Validation", check = "Save individual calculations",
	#	help.topic = "processSamples")
	## Analyze result
	#if (res == "ID_CANCEL") return(invisible())
	## PhG: this seems not to work => disable de dialog box for now
	exportdir <- NULL
	#res <- dlgMessage(paste("Save also calculations done on each particle individually?"),
	#	type = "yesnocancel")$res
	#if (res == "cancel") return(invisible(NULL))
	## Do we save individual calculations?
	#if (res == "yes")	# Note that for modalAssistant, it was "1"!
	#	exportdir <- dirname(zisfile) else exportdir <- NULL
	## Still used? Commented out for the moment!
	ManValid <- FALSE
	## Added by Kevin for semi auto classif
	## Do we use Semi automatic classification?
#	if (res == "Manual Validation") {
		#res <- modalAssistant(paste(getTemp("ZIname"), "samples processing"),
		#c(
		#	"Each sample registered in the description.zis file",
		#	"will be processed in turn to extract ecological",
		#	"parameters (abundances, biomasses, size spectra)",
		#	"after manual validation of automatic predictions",
		#	"done in the '_manualValidation' directory",
		#	"",
		#	"If you want to save calculation done on each",
		#	"particle individually, check the option below.",
		#	"",
		#	"Click 'OK' to proceed...", ""
		#), init = "0",
		#check = "Save individual calculations", help.topic = "processSamples")
		## Analyze result
		#if (res == "ID_CANCEL") return(invisible())
#		res <- dlgMessage(paste("Save also calculations done on each particle individually?"),
#			type = "yesnocancel")$res
#		if (res == "cancel") return(invisible(NULL))
#		## Do we save individual calculations?
#		if (res == "yes") # Note that for modalAsisstant, it was "1"!
#			exportdir <- dirname(zisfile) else exportdir <- NULL
## TODO: change this!
#		## Select the directory where manual validation is done
#		dir <- getTemp("ZI.TrainDir")
#		if (is.null(dir) || !file.exists(dir) || !file.info(dir)$isdir)
#			dir <- getwd()
#		## Ask for a base directory of a training set...
#		dir <- dlgDir(default = dir, title = paste("Select a",
#			getTemp("ZIname"), "Manual validation base dir"))$res
#		if (!length(dir) || !file.exists(dir) || !file.info(dir)$isdir)
#			return(invisible(NULL))
#		## Read the directory
#		ZIManTable <- ZIManRead(dir)
#		message("Read the manual validation directory...\n-- Done --")
#		ManValid <- TRUE
#	} else {
#		## Classification without any manual validation
#		ManValid <- FALSE
#	}
	## Get a list of samples from the description file
	smpdesc <- zisRead(zisfile)
	smplist <- listSamples(smpdesc)
	if (!length(smplist) || smplist == "")
		stop("No sample found in the description file!")
	## Are there corresponding .zidb files for all samples?
	zisdir <- dirname(zisfile)
	if (zisdir == ".") zisdir <- getwd()
	zidbfiles <- file.path(zisdir, paste(smplist, ".zidb", sep = ""))
	if (!all(file.exists(zidbfiles)) ||
		!all(regexpr("[.][zZ][iI][dD][bB]$", zidbfiles) > 0)) {
		zidbfiles <- file.path(zisdir, paste(smplist, ".zid", sep = ""))
		if (!all(file.exists(zidbfiles)) ||
			!all(regexpr("[.][zZ][iI][dD][bB]$", zidbfiles) > 0)) {
			stop("One or more .zidb/.zid files do not exist or is/are invalid, or mix of .zidb and .zid files!")
		}
	}
	## Get a classifier
	ZIC <- getTemp("ZI.ClassName")
	if (is.null(ZIC)) ZIC <- ""
	ZIC <- selectObject("ZIClass", multiple = FALSE, default = ZIC,
		title = "Choose a classifier (ZIClass object):")
	if (!length(ZIC) || (length(ZIC) == 1 && ZIC == ""))
		return(invisible(NULL))
	ZICobj <- get(ZIC, envir = .GlobalEnv)
	## Read a conversion table from disk (from /etc/Conversion.txt)
	## or an other position
	## First read the options to determine which file to use...
	ConvFile <- getOption("ZI.ConversionFile", file.path(getTemp("ZIetc"),
		"Conversion.txt"))
	## Does this file exists?
	if (!file.exists(ConvFile) || ConvFile == "")
		ConvFile <- file.path(getTemp("ZIetc"), "Conversion.txt")
	## Ask for selecting a Conversion file
	ConvFile2 <- dlgOpen(default = ConvFile,
		title = "Select a conversion file...", multiple = FALSE,
		filters = matrix(c("Biomass Conversion table (*Conversion.txt)", "Conversion.txt"),
		ncol = 2, byrow = TRUE))$res
	if (!length(ConvFile2)) return(invisible(NULL)) # Cancelled dialog box
	## Read the data from this table
	conv <- read.table(ConvFile2, header = TRUE, sep = "\t")
	## Save this config for later use
	options(ZI.ConversionFile = ConvFile2)
	## Get class breaks for size spectra
	brks <- dlgInput("Breaks for size spectrum classes (empty for no spectrum):",
		default = "seq(0.2, 2, by = 0.1)")$res
 	if (!length(brks)) return(invisible(NULL))
	brks <- eval(parse(text = brks))
	## Get a name for the variable containing results
	name <- dlgInput("Name for the ZIRes object to create in the global environment:",
		default = "ZIres")$res
	if (!length(name)) return(invisible(NULL))
	name <- make.names(name)
	## Add Kevin for manual validation
	if (!isTRUE(as.logical(ManValid))) ZIManTable <- NULL
	## TODO: we need at least keep and detail
	res <- processSampleAll(path = dirname(zisfile), zidbfiles = zidbfiles,
		ZIClass = ZICobj, cells = cells, biomass = conv, breaks = brks)
	## TODO: possibly export result in a file...
	## Assign this result to the variable
	.assignGlobal(name, res)
	## Remember the name of the variable
	assignTemp("ZI.LastRES", name)
}
viewResults <- function ()
{
 	## Make graphic representations of results...
	ZIR <- getTemp("ZI.LastRES")
	if (is.null(ZIR)) ZIR <- ""
	ZIR <- selectObject("ZIRes", multiple = FALSE, default = ZIR,
		title = "Choose one ZIRes object:")
	if (!length(ZIR) || (length(ZIR) == 1 && ZIR == ""))
		return(invisible(NULL))
	## Get the object
	ZIR <- get(ZIR, envir = .GlobalEnv)
	## Ask for selecting items in the list and make these graphs
	## Compute the list of graphs
	vars <- names(ZIR)
	## Eliminate variables that cannot be plotted...
	if (inherits(ZIR, "ZI1Res")) {
    vars <- vars[-(1:25)]
  } else vars <- vars[-1]
	vars <- vars[!vars == "Note"]
	## Add the spectra graphs
	spec <- attr(ZIR, "spectrum")
	varspec <- paste("spectrum of", names(spec))
	Vars <- c(vars, varspec)
	Keep <- dlgList(Vars, multiple = TRUE, title = "Select 1 to 12 graphs:")$res
	lKeep <- length(Keep)
	if (lKeep == 0) return(invisible())
	if (lKeep > 12) {
		Keep <- Keep[1:12]
		lKeep <- 12
	}
	## If there are spectrums, also ask for partial spectrums
	if (any(grepl("^spectrum of ", Keep))) {
		pspec <- names(spec[[1]])
		## Replace total by [none] in this list
		pspec[pspec == "total"] <- "[none]"
		if (length(pspec) == 1) {
      Pspec <- pspec
    } else {
      Pspec <- dlgList(pspec, multiple = FALSE, title = "Select taxon for partial spectrum:")$res
      if (!length(Pspec))
        return(invisible(NULL))
    }
	} else Pspec <- "[none]"
	## Do the graphs
	## Determine number of rows and columns
	nc <- round((lKeep + 1) / 3)
	if (nc > 3) nc <- 3
	if (lKeep == 8) nc <- 2
	nr <- c(1, 2, 3, 2, 3, 3, 3, 4, 3, 4, 4, 4)[lKeep]
	op <- par(no.readonly = TRUE)
	on.exit(par(op))
	par(mfrow = c(nr, nc))
	for (i in 1:lKeep) {
    	## Determine if it is a x/y graph, or a size spectrum
		if (grepl("^spectrum of ", Keep[i])) { # Size spectrum
			Ser <- sub("^spectrum of ", "", Keep[i])
			plot(spec[[Ser]][["total"]], lwd = 3, col = "gray", type = "h",
				main = Ser, ylab = "Frequency")
			if (Pspec != "[none]"){
				Spec <- spec[[Ser]][Pspec, ]
				Spec[Spec == 0] <- NA
				points(Spec, lwd = 6, col = 2, type = "h")
			}
		} else { # x/y graph
			 ## If there is NA in a variable, the plot generates an error
			 #Xdat <- ZIR[, "Date"]
			 Ydat <- ZIR[, Keep[i]]
			 #if (all(is.na(Xdat)) || all(is.na(Ydat))) {
			 if (all(is.na(Ydat))) {
			    plot(0:1, 0:1, type = "n", xlab = "", ylab = "", xaxt = "n",
					yaxt = "n", main = Keep[i])
			    text(0.5, 0.5, "No data!", adj = c(0.4, 0.5))
			} else {
			 	#plot(Xdat, Ydat, xlab = "Date", ylab = Keep[i], main = Keep[i])
				plot(Ydat, xlab = "", ylab = Keep[i], main = Keep[i])
			}
		}
	}
}
exportResults <- function ()
{
 	## Export one or more ZIRes objects to text files...
    res <- selectObject("ZIRes", multiple = TRUE,
		title = "Choose one or more ZIRes objects:")
	if (!length(res) || (length(res) == 1 && res == ""))
		return(invisible(NULL))
	## Select a directory where to place these files
	dir <- dlgDir()$res
	if (!length(dir)) return(invisible(NULL))
	filenames <- file.path(dir, res)
	## Export them there
	for (i in 1:length(res)) {
    	## We write those tables:
		## 1) Results [name].txt
		## 2) Metadata [name]_metadata.txt
		## 3) Size spectra [name]_spect_[sample].txt
		obj <- get(res[i], envir = .GlobalEnv)
		write.table(obj,  file = paste(filenames[i], "_AbdBio.txt", sep = ""),
			quote = FALSE, sep = "\t", eol = "\n", na = "NA", dec = getDec(),
			row.names = FALSE, col.names = TRUE, qmethod = c("escape", "double"))
		spc <- attr(obj, "spectrum")
		spcnames <- names(spc)
		if (!is.null(spcnames) && length(spcnames) > 0) {
			for (j in 1:length(spcnames)) {
				## Construct a data frame
				spc1 <- spc[[spcnames[j]]]
				#breaks <- attr(spc1, "breaks")
				#breaks <- breaks[1:(length(breaks) - 1)]
				breaks <- colnames(spc1)
				spctab <- as.data.frame(spc1)
				#spctab <- spctab[ , seq(2, ncol(spctab), by = 2)]
				#names(spctab) <- names(spc1)
				#spctab <- data.frame(breaks = breaks, spctab)
				spctab <- data.frame(breaks = breaks, spctab, check.names = FALSE)
				write.table(spctab,
					file = paste(filenames[i], "_Spectrum_", spcnames[j],
					".txt", sep = ""), quote = FALSE, sep = "\t", eol = "\n",
					na = "NA", dec = getDec(), row.names = FALSE,
					col.names = TRUE, qmethod = c("escape", "double"))
			}
		}
	}
	message(i, "ZIRes object(s) exported in'", dir, "'")
}
loadObjects <- function ()
{
	file <- selectFile("RData", multiple = FALSE, quote = FALSE,
		title = "Select a RData file...")
	if (!length(file)) return(invisible(NULL)) # Cancelled dialog box
	if (file.exists(file)) load(file, envir = .GlobalEnv)
}
saveObjects <- function ()
{
	Objects <- selectObject(c("ZIDat", "ZIDesc", "ZITrain", "ZIClass", "ZIRes",
		"ZIRecode"), multiple = TRUE,
		title = paste("Choose", getTemp("ZIname"), "object(s):"))
	if (!length(Objects) || (length(Objects) == 1 && Objects == ""))
		return(invisible(FALSE))
	file <- dlgSave(default = paste(getTemp("ZIname"), ".RData", sep = ""),
		title = paste("Save", getTemp("ZIname"), "data under..."),
		multiple = FALSE, filters = matrix(c("R data", ".RData"),
		ncol = 2, byrow = TRUE))$res
	if (!length(file)) return(invisible(NULL))
	if (regexpr("[.][rR][dD][aA][tT][aA]$", file) < 0)
		file <- paste(file, ".RData", sep = "")
	save(list = Objects, file = file, compress = TRUE)
}
listObjects <- function ()
{
    varlist <- objects(pos = 1)
	if (!length(varlist))
		stop("No objects currently loaded in memory!")
	Filter <- NULL
	for (i in 1:length(varlist)) Filter[i] <- inherits(get(varlist[i]),
		c("ZIDat", "ZIDesc", "ZITrain", "ZIClass", "ZIRes", "ZIRecode"))
	varlist <- varlist[Filter]
	if (!length(varlist)) {
		stop("No ", getTemp("ZIname"), " objects currently loaded in memory!")
	} else {
    	print(varlist)
	}
}
removeObjects <- function ()
{
	Objects <- selectObject(c("ZIDat", "ZIDesc", "ZITrain", "ZIClass", "ZIRes",
		"ZIRecode"), multiple = TRUE,
		title = paste(getTemp("ZIname"), "object(s) to remove:"))
	if (!length(Objects) || (length(Objects) == 1 && Objects == ""))
		return(invisible(FALSE))
	rm(list = Objects, envir = .GlobalEnv)
}
calib <- function ()
{
	## Select calibration file (*.tif or *.pgm) and calculate White/Black point
	#file <- selectFile("TifPgm", multiple = FALSE, quote = FALSE,
	#	title = "Select a calibration image...")
	# Use the simpler file selector for now...
	file <- file.choose()
	if (!length(file)) return(invisible(NULL)) # Cancelled
	if (file.exists(file)) {
		message("Calibrating gray scale... [", basename(file), "]")
		flush.console()
		res <- calibrate(file)
		message("WhitePoint=", round(res["WhitePoint"]))
		message("BlackPoint=", round(res["BlackPoint"]))
		if (length(attr(res, "msg")) > 0)
			message("\nTake care:")
		message(paste(attr(res, "msg"), collapse = "\n"))
	}
}
optInOutDecimalSep <- function ()
{
	## Define the default numeric decimal separator for input and output
	Dec <- getDec()
	## Possibly ask for another one
	DecList <- c(".", ",")
	DecSel <- dlgList(DecList, preselect = Dec, multiple = FALSE,
		title = "In/Out decimal separator")$res
	## Is the cancel button pressed, or is it still the same decimal separator
	if (!length(DecSel) || DecSel == Dec) return(invisible(Dec))
	## Record it in options
    options(OutDec = DecSel)
    ## Indicate change
    cat("In/Out decimal separator changed to '", DecSel, "'\n", sep = "")
 	return(invisible(DecSel))
}
###### Not in menus yet! ##################
## Subpart of zid file and return a subtable corresponding to the threshold
## TODO: is this really a top-menu function... or is it supposed to be used elsewhere?
#subpartZIDat <- function ()
#{
#    ## Select files to use
#    zidFile <- selectFile(type = "Zid", multiple = FALSE, quote = FALSE)
#	if (!length(zidFile)) return(invisible(NULL))
#
#    ## Read the zid file
#    zid <- zidDatRead(zidFile)
#
#    ## Select a parameter to use for the threshold
#    threshold <- createThreshold(ZIDat = zid)
#
#    ## Apply the thresold
#    res <- subpartThreshold(ZIDat = zid, Filter = threshold)
#    return(res)
#}
################################################################################
## New User Interface using Shiny for error correction
ZIUI <- function () {
	#appdir <- system.file("gui", "errorcorrection", package = "zooimage")
	#runApp(appdir)
	res <- dlgOpen(title = "Select one R method file",
		filters = dlgFilters[c("R", "All"), ])$res
	if (length(res)) {
		source(res, chdir = TRUE)
	}
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/gui.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
## 
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
## 
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
## 
## You should have received a copy of the GNU General Public License
## along with ZooImage. If not, see <http://www.gnu.org/licenses/>.
## Get the name of one or several objects of a given class
selectObject <- function (class = "data.frame", default = "", multiple = FALSE,
title = paste0("Choose a ", class, ":"))
{	
	objlist <- ls(envir = .GlobalEnv,)	# Get objects in .GlobalEnv
	if (!length(objlist)) {
		warning("There is no object of class '", paste(class, collapse = " "),
			"' in the user workspace!")
		return(character(0))
	}
	## Filter this list to keep only object inheriting a giving class...
	Filter <- NULL
	for (i in 1:length(objlist))
		Filter[i] <- inherits(get(objlist[i], envir = .GlobalEnv,
			inherits = FALSE), class)
	
	## Keep only those objects
	objlist <- objlist[Filter]	
	if (!length(objlist)) {	# No such objects in .GlobalEnv
		warning("There is no object of class '", paste(class, collapse = " "),
			"' in the user workspace!")
		return(character(0))
	}
	if (default == "") default <- objlist[1]
	dlgList(objlist, preselect = default, multiple = multiple,
		title = title)$res		
}
## Get the name of one or more lists with their components of a given class
selectList <- function (class = "data.frame", default = "", multiple = FALSE,
title = paste0("Choose a list (of ", class, "s):"))
{	
	filter <- function (x) {
		item <- get(x, envir = .GlobalEnv, inherits = FALSE)
		is.list(item) && all(sapply(item, function (y) inherits(y, class)))
	}
	varlist <- Filter(filter, objects(pos = 1))	
	if (length(varlist) == 0) {
		warning("no list of '", class, "' objects in the user workspace")
		return(character(0))
	}
	if (default == "") default <- varlist[1]
	dlgList(varlist, preselect = default, multiple = multiple,
		title = title)$res		
}
## Select one or several files of a given type
selectFile <- function (type = c("ZipZid", "ZimZis", "LstZid", "ZidZidb",
"Zip", "Zid", "Zidb", "Zim", "Zis", "Zie", "Zic", "Img", "TifPgm", "RData"),
multiple = FALSE, quote = TRUE, title = NULL)
{	
	type <- match.arg(type)
	Type <- switch(type,
		ZipZis = "Zip/Zis",
		ZimZis = "Zim/Zis",
		LstZis = "Lst/Zis",
		TifPgm = "Tiff/Pgm",
		ZidZidb = "Zid/Zidb",
		type)
	
	## Adapt title according to 'multiple'
	if (isTRUE(as.logical(multiple)) && !is.null(title)) {
		title <- paste("Select one or several", Type, "files...")
	} else title <- paste("Select one", Type, "file...")
	
	filters <- switch(type,
		ZipZid 	= c("ZooImage files"          		, ".zip",
					"ZooImage files"          		, ".zid"),
		ZimZis 	= c("ZooImage metadata files" 		, ".zim",
					"ZooImage metadata files" 		, ".zis"),
		LstZid  = c("FlowCAM list files"      		, ".lst",
					"ZooImage files"          		, ".zid"),
		ZidZidb = c("ZooImage files"          		, ".zid",
					"ZooImage databases"      		, ".zidb"),
		Zip		= c("ZooImage picture files"  		, ".zip"),
		Zid		= c("ZooImage data files"     		, ".zid"),
		Zidb    = c("ZooImage databases"      		, ".zidb"),
		Zim		= c("ZooImage metadata files" 		, ".zim"),
		Zis		= c("ZooImage sample files"   		, ".zis"),
		Zie		= c("ZooImage extension files"		, ".zie"),
		Zic     = c("ZooImage Classification Scheme",".zic" ),
		Img     = c("Tiff image files"        		, ".tif",
					"Jpeg image files"        		, ".jpg",
					"Zooimage import extensions"	,".zie",
					"Table and ImportTemplate.zie"	,".txt",
					"FlowCAM Table and ImportTemplate.zie",".txt"),
		TifPgm  = c("Tiff image files"        		, ".tif",
					"Pgm image files"         		, ".pgm"),
		RData   = c("R data"                  		, ".RData"))
	filters <- matrix(filters, ncol = 2, byrow = TRUE)
	
	res <- dlgOpen(title = title, multiple = multiple, filters = filters)$res	
	if (length(res) && res != "" && quote)
		res <- paste('"', res, '"', sep = "")
	res
}
## Select groups (taxa) from a a list
## Note: from a ZIC object, use: groups <- levels(attr(ZIC, "classes"))
selectGroups <- function (groups, multiple = TRUE,
title = "Select taxa you want to plot") {
	dlgList(groups, multiple = multiple, title = title)$res
}
## Create a threshold formula
createThreshold <- function (ZIDat) {
	## Select the parameter to use
	Param <- dlgList(names(ZIDat), multiple = FALSE,
		title = "Parameter to use")$res
	## Select the threshold
	Message <- paste("Range:", "From", round(range(ZIDat[, Param])[1],
		digits = 1), "To", round(range(ZIDat[, Param])[2], digits = 1),
		";", "Indicate the threshold:")
	Threshold <- dlgInput(Message, default = paste(Param, "< 50"))$res
	if (!length(Threshold)) invisible(NULL) else Threshold
}
## Start the image viewer application on the specified dir
imageViewer <- function (dir = getwd(), pgm = getOption("ZI.ImageViewer"))
{
	if (isWin()) {
		startPgm("ImageViewer", sprintf('"%s"',
			tools::file_path_as_absolute(dir)))
	} else if (isMac()) {
		cmd <- sprintf('/Applications/Utilities/XnViewMP.app/Contents/MacOS/xnview "%s"',
			dir)
		system(cmd, wait = FALSE, ignore.stdout = TRUE, ignore.stderr = TRUE)
	} else {
		cmd <- sprintf('nautilus --geometry 600x600 "%s"', dir) 
		system(cmd, wait = FALSE, ignore.stdout = TRUE, ignore.stderr = TRUE)
	}
}
startPgm <- function (program, cmdline = "", switchdir = FALSE,
iconize = FALSE, wait = FALSE)
{
	## Look if the program path is recorded in the options
	pgmPath <- getOption(program)
	if (!is.null(pgmPath) && file.exists(pgmPath)) {
		## Do we need to switch directory?
		if (switchdir) {
			curdir <- setwd(dirname(pgmPath))
			on.exit(setwd(curdir))
		}
		## Start it
		system(paste(pgmPath, cmdline), wait = wait, ignore.stdout = TRUE,
			ignore.stderr = TRUE)
	} else stop("Program '", program, "' not found!")
	## Do we need to iconize the assistant?
#	if (iconize && !is.null(WinGet("ZIDlgWin")))
#		tkwm.iconify(WinGet("ZIDlgWin"))
}
modalAssistant <- function (title, text, init, options = NULL, check = NULL,
select.file = NULL, returnValOnCancel = "ID_CANCEL", help.topic = NULL)
{
	## TODO!!!!
	message("Modal assistant temporarily disabled!")
	return(returnValOnCancel)
	
#	## Create an assistant dialog box which behaves as a modal dialog box
#	text <- paste(text, collapse = "\n")
#	try(tkWinAdd("ZIAssist", title = title, bind.delete = FALSE))
#	ZIAssist <- WinGet("ZIAssist")
#    tkbind(ZIAssist, "<Destroy>", function () {
#		tkgrab.release(ZIAssist)
#		tkWinDel("ZIAssist")
#		#tkfocus(WinGet("ZIDlgWin"))
#	})
#	## Assign cancel by default to the return value
#    assignTemp("ZIret", returnValOnCancel)
#    ## Do not show it until it is completelly constructed!
#	tkwm.withdraw(ZIAssist)
#	## Change the icon of that window (if under Windows)
#    if (isWin()) tk2ico.set(ZIAssist, getTemp("ZIico"))
#	## This is the variable holding the result
#	resVar <- tclVar(init)
#	## Draw the dialog area
#	dlgarea <- tk2frame(ZIAssist)
#	## Place the logo to the left
#    Logo <- tklabel(dlgarea,image = ImgGet("$Tk.logo"), bg = "white")
#	## Place dialog box data
#	txtarea <- tk2frame(ZIAssist)
#	Text <- tk2label(txtarea, text = text, width = 50)
#	#### TODO: this causes a problem in Tile 0.7.2?! , justify = "left")
#	tkgrid(Text, stick = "w")
#	## Do we put options?
#	if (!is.null(options)) {
#		for (i in 1:length(options)) {
#			rb <- tk2radiobutton(txtarea)
#			tkconfigure(rb, variable = resVar, value = options[i],
#				text = options[i])
#			#### TODO: this causes a problem in Tile 0.7.2?! , justify = "left")
#			tkgrid(rb, sticky = "w")
#		}
#	}
#	## Do we have to place a checkbox?
#	if (!is.null(check)) {
#		cb <- tk2checkbutton(txtarea)
#		tkconfigure(cb, variable = resVar, text = check)
#		#### TODO: this causes a problem in Tile 0.7.2?! , justify = "left")
#		tkgrid(cb, sticky = "w")
#	}
#	## Place everything in the dialog box
#	tkgrid(Logo, txtarea)
#	tkpack(dlgarea, anchor = "nw")
#	## Place buttons
#
#    "onOK" <- function () {
#        assignTemp("ZIret", tclvalue(resVar))
#        tkgrab.release(ZIAssist)
#        tkWinDel("ZIAssist")
#		#tkfocus(WinGet("ZIDlgWin"))
#    }
#    "onCancel" <- function () {
#        tkgrab.release(ZIAssist)
#        tkWinDel("ZIAssist")
#		#tkfocus(WinGet("ZIDlgWin"))
#    }
#    butbar <- tk2frame(ZIAssist)
#    OK.but <- tk2button(butbar, text = "   OK   ", command = onOK)
#    Cancel.but <- tk2button(butbar, text = " Cancel ", command = onCancel)
#	if (is.null(help.topic)) {
#    	tkgrid(OK.but, Cancel.but, sticky = "e")
#	} else {    # Create also a help button
#		"onHelp" <- function () {
#			eval(browseURL(help(help.topic , htmlhelp=TRUE)[1] ),
#				envir = .GlobalEnv )
#		}
#        Help.but <- tk2button(butbar, text = "  Help  ", command = onHelp)
#        tkgrid(OK.but, Cancel.but, Help.but, sticky = "e")
#	}
#	tkpack(butbar, side = "bottom", fill = "x")
#	tkpack(tk2separator(ZIAssist), side = "bottom", fill = "x")
#    tkbind(ZIAssist, "<Return>", onOK)
#	if (isWin()) tcl("wm", "attributes", ZIAssist, toolwindow = 1, topmost = 1)
#	tkwm.resizable(ZIAssist, 0, 0)
#	## Focus on that window
#	tkfocus(ZIAssist)	# Doesn't work with Rgui.exe, but tkwm.deiconify does
#    tkwm.deiconify(ZIAssist)
#    tkgrab.set(ZIAssist)
#    tkwait.window(ZIAssist)
#    return(getTemp("ZIret"))
}
.assignGlobal <- function (x, value)
{
	G <- .GlobalEnv
	assign(x = x, value = value, envir = G)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/guiutils.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
## 
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
## 
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
## 
## You should have received a copy of the GNU General Public License
## along with ZooImage. If not, see <http://www.gnu.org/licenses/>.
## ZooImage >= 3 importation routines
## TODO:
## - Import data with replicates as subdirs of one common dir
## - Import grayscale data from a "grey" subdir of common dir, or in a first stage,
##   do not use any subdir data that do not contain a .lst file (cf, data are
##   in subdirs of "grey" dir)
## - Warning: do not allow to mix, say 10x and 4x in the same sample! => check this!
## - Replicates are AR.B25.2014-05-19.300A4X.01, .02, .03, ... => correct label from there?
## - Use jpeg format for non color vignettes + check the difference in weight and
##   speed of loading in R
## - Calculate default concentration values, using $Fluid$TotalVolumeML assuming
##   no dilution of the sample... SubPart is TotalVolumeML, SubCell = 1, VolIni = 1
## - Add size scale in vignettes
## - Note: using jpeg instead of png: 10sec instead of 14sec, and 4.9Mb instead of 14.7Mb
##   loading time for 25 vignettes faster too.
#### Importation of FlowCAM data without image reanalysis ######################
## Read a FlowCAM .ctx file
## TODO: add label everywhere in front of each table
readFlowCAMctx <- function (ctx, stop.it = TRUE)
{
	## Check arguments
	stop.it <- isTRUE(as.logical(stop.it))
	## ctx must be an existing file
	if (!file.exists(ctx))
		if (stop.it)
			stop("'ctx' must be an existing (.ctx) file") else return(NULL)
	
	## Get the label from the directory containing the data
	label <- basename(dirname(ctx))
	if (label == ".") label <- basename(getwd())
	
	## Read .ctx data
	dat <- scan(ctx, what = character(), sep = "\t", skip = 0,
		blank.lines.skip = TRUE, flush = TRUE, quiet = TRUE, comment.char = "")
	
	## This is an .ini format
	V <- parseIni(dat, label = "")
	
	## Rework a few fields
	
	## Strings are imported as factors, but we really want characters
	factorsAsStrings <- function (x)
		as.data.frame(lapply(x, function (x)
		if (is.factor(x)) as.character(x) else x), stringsAsFactors = FALSE)
	V <- lapply(V, factorsAsStrings)
	
	## Empty strings are imported as logical with NA value
	logicalNaAsStrings <- function (x)
		as.data.frame(lapply(x, function (x)
		if (is.logical(x) && is.na(x)) "" else x), stringsAsFactors = FALSE)
	V <- lapply(V, logicalNaAsStrings)
	
	## Special conversion into POSIXct for $General$RunStartTime and $RunEndTime
	V$General$RunStartTime <- as.POSIXct(V$General$RunStartTime)
	V$General$RunEndTime <- as.POSIXct(V$General$RunEndTime)
	
	## We need these keys that may not be present in old .ctx files	
	if (is.null(V$Fluid$TotalVolumeML)) {
		## Volume calculation
		cst <- V$Fluid$CalibConstant
		if (is.null(cst)) cst <- V$Fluid$CalibrationConstant
		Height <- (V$CaptureRegion$AcceptableBottom -
			V$CaptureRegion$AcceptableTop) * cst
		Width <- (V$CaptureRegion$AcceptableRight -
			V$CaptureRegion$AcceptableLeft) * cst
		Area <- Height * Width
		## Volume of one image
		Volume <- (Area / (1e8)) * (V$Fluid$FlowCellDepth / 10000) # mL
		V$Fluid$TotalVolumeML <- Volume * V$CaptureStats$RawImageTotal
	}
	## This is missing in 1.5.14, but can be calculated
	if (is.null(V$CameraBehavior$AutoImageRate))
		V$CameraBehavior$AutoImageRate <- V$CaptureStats$RawImageTotal /
			V$CaptureStats$ImageCaptureTotal.Seconds
	
	## In 1.5.14, no distinction beween ThresholdDark and ThresholdLight
	## So, copy Threshold to both of them
	if (!is.null(V$CaptureParameters$Threshold) &&
		is.null(V$CaptureParameters$ThresholdDark)) {
		V$CaptureParameters$ThresholdDark <- V$CaptureParameters$Threshold
		V$CaptureParameters$ThresholdLight <- V$CaptureParameters$Threshold
	}
	
	## In 1.5.14, no RecalibrationIntervalMinutes but SaveIntervalMinutes
	V$Files$RecalibrationIntervalMinutes <- V$Files$SaveIntervalMinutes 
	
	## Calculated fields (wrong units or other problems)
	mins <- V$RunTermination$MaxRunTimeMinutes
	if (length(mins) == 0) mins <- 0
	secs <- V$RunTermination$MaxRunTimeSeconds
	if (length(secs) == 0) secs <- 0
	V$RunTermination$MaxRunTime <- mins * 60 + secs
	
	## Possibly read also data from _notes.txt
    notes <- sub("\\.ctx$", "_notes.txt", ctx)
	if (file.exists(notes)) {
		## TODO: parse key=value items
		notesData <- readLines(notes, warn = FALSE)	
		notesData <- paste(notesData, collapse = "\n") 
	} else noteData <- ""
	
	## TODO: check there is no Fraction, Process and Subsample entries yet!
	
	## Add Fraction data
	V$Fraction <- data.frame(Code = "", Min = -1, Max = -1)
	
	## Add Process information
	useESD <- V$CaptureParameters$UseESDForCapture
	if (is.null(useESD)) useECD <- FALSE else useECD <- useESD != 1
	V$Process <- data.frame(Version = "1.0-0", Method = "Direct VS import",
		MinSize = as.numeric(V$CaptureParameters$MinESD)/1000, # In mm
		MaxSize = as.numeric(V$CaptureParameters$MaxESD)/1000, # In mm
		UseECD = useECD)
	
	## Add Subsample information
	## TODO: get this from _notes.txt... Here, assume using 10mL / 1L
	
	V$Subsample <- data.frame(SubPart = 0.01, SubMethod = 1,
		CellPart = 1, Replicates = 1, VolIni = 1, VolPrec = 0.1)
	
	## Add Label in front of each table
	V <- lapply(V, function (x) cbind(data.frame(Label = label), x))
	
	## Return the resulting list
	V	
}
## Examples
#ctxFile <- "/Users/phgrosjean/Desktop/Intercalibration/BE.ArMix.2009-04-29.300A4X_01/BE.ArMix.2009-04-29.300A4X_01.ctx"
#readFlowCAMctx(ctxFile)
## A 1.5.14 file
#ctxFile1 <- "/Users/phgrosjean/Documents/Pgm/ZooPhytoImage_1.2-1-examples/FlowCAM-example-FIT-VIS/143-144526.ctx"
#readFlowCAMctx(ctxFile1)
## Read a flowCAM .lst file
readFlowCAMlst <- function (lst, skip = 2, read.ctx = TRUE)
{
    ## Check arguments
	## lst must be an existing file
	if (!file.exists(lst))
		stop("'lst' must be an existing (.lst) file")
	## skip at least 2 rows, but for realtime, can skip more...
	skip <- as.integer(skip)[1]
	if (skip < 2) {
		warning("'skip' cannot be lower than 2... fixed!")
		skip <- 2
	}
	read.ctx <- isTRUE(as.logical(read.ctx))
	
	## For format 017 we have now column names hardcoded
	header <- scan(lst, what = character(), nlines = 2L, quiet = TRUE)
	if (as.integer(header[1]) >= 17 && substr(header[2], 1, 10) == "num-fields") {
		## Format >= 17. Columns names are hardcoded!
		nfields <- as.integer(strsplit(header[2], "|", fixed = TRUE)[[1]][2])
		if (!length(nfields) || is.na(nfields) || nfields < 44)
			stop("Unrecognized .lst file format: number of fields is ", nfields)
		skip <- nfields + 2
		## Read column header information
		hcol <- scan(lst, what = character(), sep = "|", skip = 2L,
			nlines = nfields, quiet = TRUE)
		if (length(hcol) != nfields * 2)
			stop("Unrecognized .lst file format: incorrect header")
		hcol <- matrix(hcol, ncol = 2, byrow = TRUE)
		cnames <- hcol[, 1]
		## Make sure all names start with FIT_ and are Capitalized
		capital <- function(x) {
			s <- strsplit(x, "_")
			sapply(s, function (x) paste(toupper(substring(x, 1, 1)),
				substring(x, 2), sep = "", collapse = "_"))
		}
		cnames <- paste("FIT", capital(cnames), sep = "_")
		## Special replacements
		cnames <- sub("Abd", "ABD", cnames)
		cnames <- sub("Esd", "ESD", cnames)
		cnames <- sub("FIT_Ch([1-9])_Width", "FIT_Ch\\1_TOF", cnames)
		## We need to replace names by their zooimage equivalent
		cnames[cnames == "FIT_Id"] <- "Id" # The only one not starting with FIT_
		cnames[cnames ==  "FIT_ABD_Area"] <-  "FIT_Area_ABD"
		cnames[cnames == "FIT_ABD_Diameter"] <- "FIT_Diameter_ABD"
		cnames[cnames == "FIT_ESD_Diameter"] <- "FIT_Diameter_ESD"
		cnames[cnames == "FIT_Raw_Perimeter"] <- "FIT_Raw_Perim"
		cnames[cnames == "FIT_Raw_Convex_Perimeter"] <- "FIT_Raw_Convex_Perim"
		cnames[cnames == "FIT_Collage_File"] <- "FIT_Filename"
		cnames[cnames == "FIT_Timestamp"] <- "FIT_Timestamp1"
		cnames[cnames == "FIT_Image_X"] <- "FIT_SaveX"
		cnames[cnames == "FIT_Image_Y"] <- "FIT_SaveY"
		cnames[cnames == "FIT_Image_W"] <- "FIT_PixelW"
		cnames[cnames == "FIT_Image_H"] <- "FIT_PixelH"
		cnames[cnames == "FIT_Src_X"] <- "FIT_CaptureX"
		cnames[cnames == "FIT_Src_Y"] <- "FIT_CaptureY"
		cnames[cnames == "FIT_Src_Image"] <- "FIT_Source_Image"
		cnames[cnames == "FIT_Cal_Image"] <- "FIT_Calibration_Image"
		## Note: in comparison to old format, we have in addition:
		#"FIT_Camera", "FIT_Fringe_Size", "FIT_Circle_Fit", "FIT_Ch1_Area",            
        #"FIT_Ch2_Area", "FIT_Ch3_Area"    
		#
		# Plus "FIT_Symmetry", "FIT_Circularity_Hu", "FIT_Intensity_Calimage",
		# "FIT_Raw_Convex_Hull_Area", "FIT_Raw_Filled_Area"
		
		## Read the data in
		tab <- read.table(lst, header = FALSE, sep = "|", dec = ".", 
            skip = skip, col.names = cnames)
		## Add missing fields from the previous versions
		tab$FIT_Ch4_Peak <- NA
		tab$FIT_Ch4_TOF <- NA
		tab$FIT_High_U32 <- NA
        tab$FIT_Low_U32 <- NA
        tab$FIT_Total <- NA
		tab$FIT_Timestamp2 <- as.character(NA)
	} else { # Older format. We have to guess column names!	
		## Determine version of the FlowCAM's table according to number of cols
		ncol <- length(read.table(lst, header = FALSE, sep = ":", dec = ".",
			skip = skip, nrows = 1))
    
		## Read .lst data
		## TODO: if export file exists, verify column names here (.csv file)
		if (ncol == 44) {  # This should be FlowCAM II
			tab <- read.table(lst, header = FALSE, sep = ":", dec = '.',
				skip = skip, col.names = c("Id", "FIT_Cal_Const", "FIT_Raw_Area",
				"FIT_Raw_Feret_Max", "FIT_Raw_Feret_Min", "FIT_Raw_Feret_Mean",
				"FIT_Raw_Perim", "FIT_Raw_Convex_Perim", "FIT_Area_ABD",
				"FIT_Diameter_ABD", "FIT_Length", "FIT_Width", "FIT_Diameter_ESD",
				"FIT_Perimeter", "FIT_Convex_Perimeter", "FIT_Intensity",
				"FIT_Sigma_Intensity", "FIT_Compactness", "FIT_Elongation",
				"FIT_Sum_Intensity", "FIT_Roughness", "FIT_Feret_Max_Angle",
				"FIT_Avg_Red", "FIT_Avg_Green", "FIT_Avg_Blue", "FIT_PPC",
				"FIT_Ch1_Peak", "FIT_Ch1_TOF", "FIT_Ch2_Peak", "FIT_Ch2_TOF",
				"FIT_Ch3_Peak", "FIT_Ch3_TOF", "FIT_Ch4_Peak", "FIT_Ch4_TOF",
				"FIT_Filename", "FIT_SaveX", "FIT_SaveY", "FIT_PixelW",
				"FIT_PixelH", "FIT_CaptureX", "FIT_CaptureY", "FIT_High_U32",
				"FIT_Low_U32", "FIT_Total"))
        
			## Add columns present in .lst from FlowCAM III (same table for all)
			tab$FIT_Feret_Min_Angle <- NA
			tab$FIT_Edge_Gradient <- NA
			tab$FIT_Timestamp1 <- NA
			tab$FIT_Timestamp2 <- NA
			tab$FIT_Source_Image <- NA
			tab$FIT_Calibration_Image <- NA
    
		} else if (ncol == 47) { # This should be FlowCAM III
			tab <- read.table(lst, header = FALSE, sep = ":", dec = '.',
				skip = skip, col.names = c("Id", "FIT_Cal_Const", "FIT_Raw_Area",
				"FIT_Raw_Feret_Max", "FIT_Raw_Feret_Min", "FIT_Raw_Feret_Mean",
				"FIT_Raw_Perim", "FIT_Raw_Convex_Perim", "FIT_Area_ABD",
				"FIT_Diameter_ABD", "FIT_Length", "FIT_Width", "FIT_Diameter_ESD",
				"FIT_Perimeter", "FIT_Convex_Perimeter", "FIT_Intensity",
				"FIT_Sigma_Intensity", "FIT_Compactness", "FIT_Elongation",
				"FIT_Sum_Intensity", "FIT_Roughness", "FIT_Feret_Max_Angle",
				"FIT_Feret_Min_Angle", "FIT_Avg_Red", "FIT_Avg_Green",
				"FIT_Avg_Blue", "FIT_PPC", "FIT_Ch1_Peak", "FIT_Ch1_TOF",
				"FIT_Ch2_Peak", "FIT_Ch2_TOF", "FIT_Ch3_Peak", "FIT_Ch3_TOF",
				"FIT_Ch4_Peak", "FIT_Ch4_TOF", "FIT_Filename", "FIT_SaveX",
				"FIT_SaveY", "FIT_PixelW", "FIT_PixelH", "FIT_CaptureX",
				"FIT_CaptureY", "FIT_Edge_Gradient", "FIT_Timestamp1",
				"FIT_Timestamp2", "FIT_Source_Image", "FIT_Calibration_Image"))
        
			## Add columns present in list files from FlowCAM II
			tab$FIT_High_U32 <- NA
			tab$FIT_Low_U32 <- NA
			tab$FIT_Total <- NA
		
		} else stop("Unrecognized FlowCAM format") # TODO: adapt for the new soft
	}
	
	## New variables calculation (present in export .csv from the FlowCAM)
	## Code already checked
    tab$FIT_Volume_ABD <- (4/3) * pi * (tab$FIT_Diameter_ABD/2)^3
    tab$FIT_Volume_ESD <- (4/3) * pi * (tab$FIT_Diameter_ESD/2)^3
    tab$FIT_Aspect_Ratio <- tab$FIT_Width / tab$FIT_Length
    tab$FIT_Transparency <- 1 - (tab$FIT_Diameter_ABD/tab$FIT_Diameter_ESD)
    tab$FIT_Red_Green_Ratio <- tab$FIT_Avg_Red / tab$FIT_Avg_Green
    tab$FIT_Blue_Green_Ratio <- tab$FIT_Avg_Blue / tab$FIT_Avg_Green
    tab$FIT_Red_Blue_Ratio <- tab$FIT_Avg_Red / tab$FIT_Avg_Blue
    tab$FIT_Ch2_Ch1_Ratio <- tab$FIT_Ch2_Peak / tab$FIT_Ch1_Peak
	
	## Need label
	label <- basename(dirname(lst))
	if (label == ".") label <- basename(getwd())
	
	## Try to extract metadata from .ctx file, if it exists
    ctx <- sub("\\.lst$", ".ctx", lst)
	if (read.ctx && file.exists(ctx)) {
		ctxData <- readFlowCAMctx(ctx)
    } else { # Use minimum default metadata
		ctxData <- list(
			Fraction = data.frame(Label = label, Code = "", Min = -1, Max = -1),
			Process = data.frame(Label = label, Version = "1.0-0",
				Method = "Direct VS import", MinSize = NA, MaxSize = NA, UseECD = NA),
			Subsample = data.frame(Label = label, SubPart = 0.01, SubMethod = 1,
				CellPart = 1, Replicates = 1, VolIni = 1, VolPrec = 0.1)
		)
	}
	Sub <- ctxData$Subsample
	
	## Rework the table by renaming Id by Item, and prepending it with
	## Label, Item and ECD and postpending it with Dil
	n <- nrow(tab)
	items <- tab$Id
	tab$Id <- NULL
	dil <- 1/(Sub$SubPart * Sub$CellPart * Sub$Replicates * Sub$VolIni)
	tab <- cbind(data.frame(Label = rep(label, n), Item = items,
		ECD = ecd(tab$FIT_Area_ABD)), tab, data.frame(Dil = rep(dil, n)))
	
	## Add metadata and change class of the object
	attr(tab, "metadata") <- ctxData
	class(tab) <- c("ZI3Dat", "ZIDat", "data.frame")
    tab
}
## Example
#lstFile <- "/Users/phgrosjean/Desktop/Intercalibration/BE.ArMix.2009-04-29.300A4X_01/BE.ArMix.2009-04-29.300A4X_01.lst"
#res <- readFlowCAMlst(lstFile)
#lstFile1 <- "/Users/phgrosjean/Documents/Pgm/ZooPhytoImage_1.2-1-examples/FlowCAM-example-FIT-VIS/143-144526.lst"
#res1 <- readFlowCAMlst(lstFile1)
## Temporary name!
importFlowCAM <- function (lst, rgb.vigs = FALSE,  type = "ZI3", replace = FALSE)
{
	## Check arguments
    rgb.vigs <- isTRUE(as.logical(rgb.vigs))    
    if (type != "ZI3") {
        warning("only 'ZI3' is currently supported for 'type'")
        return(invisible(FALSE))
    }
	
	## Read metadata
	dat <- readFlowCAMlst(lst, skip = 2, read.ctx = TRUE)
	## Check results
	if (!is.data.frame(dat) && NROW(dat) < 1)
		stop("Problem while importing FlowCAM data, or empty series")
	if (is.null(attr(dat, "metadata")))
		stop("Problem while importing FlowCAM metadata from the .ctx file")
	
	## Change dir to sample's parent directory
    sampledir <- dirname(lst)
    if (sampledir == ".") sampledir <- getwd()
	label <- basename(sampledir)
    parentdir <- dirname(sampledir)
    #odir <- setwd(sampledir)
    odir <- setwd(parentdir)
    on.exit(setwd(odir))
	
	## .zidb file is computed, and check if file already exists
    zidbfile <- paste(sampledir, "zidb", sep = ".")
    if (!isTRUE(as.logical(replace)) && file.exists(zidbfile)) {
        return(invisible(TRUE))
    }
	
	## Create the .zidb file
    message("Creating the ZIDB file...")
    filehashOption(defaultType = "DB1")
    unlink(zidbfile)
    dbCreate(zidbfile)
    db <- dbInit(zidbfile)
    dbInsert(db, ".ZI", 3)
	if (isTRUE(rgb.vigs)) {
		dbInsert(db, ".ImageType", "png")
	} else {
		dbInsert(db, ".ImageType", "jpeg")
	}
    
    ## Add vignettes to the .zidb file
    message("Adding vignettes to ZIDB file...")
	
#    ## TODO: change this: do not use _import dir
#    zidbdir <- file.path(dirname(sampledir), "_import", basename(sampledir))
#    if (file.exists(zidbdir) && dir(zidbdir) != 0) 
#        stop("The destination dir already exists and is not empty!")
#    dir.create(zidbdir, recursive = TRUE, showWarnings = FALSE)
    tif <- dir(sampledir, pattern = "[0-9]\\.tif$", full.names = TRUE)
    isCal <- grepl("^.*cal_image_[0-9]+\\.tif$", tif)
    calFiles <- tif[isCal]
    colFiles <- tif[!isCal]
    if (length(calFiles) == 0) 
        stop("No background calibration image found")
    if (length(colFiles) == 0) 
        stop("No collages found")
		
	## Read all background calibration images into a list
	cals <- list()
	for (i in 1:length(calFiles)) {
		cals[[i]] <- readTIFF(source = calFiles[i])
		## If the image is RGB, we got three dimensions to reduce to two
		cdim <- dim(cals[[i]])
		if (length(cdim) == 3 && cdim[3] == 3) {
			## Calculate the CIE 1931 linear luminance Y as grayscale
			## Y = 0.2126 R + 0.7152 G + 0.0722 B
			cals[[i]] <- 0.2126 * cals[[i]][, , 1] + 0.7152 * cals[[i]][, , 2] +
				0.0722 * cals[[i]][, , 3]
		}
		if (length(dim(cals[[i]])) != 2)
			stop("unrecognized calibration image type; ",
				"cannot convert it to 8-bit grayscale")	
	}
	
	## Read collages one by one and extract vignettes from them,
	## using information gathered into dat
	colFile <- "" # File of current collage
	## Since R indexing starts at 1 but FlowCAM pixel indexing starts at 0,
	## add one where it is required
	dat1 <- dat
	dat1$FIT_SaveX <- dat$FIT_SaveX + 1
	dat1$FIT_SaveY <- dat$FIT_SaveY + 1
	dat1$FIT_CaptureX <- dat$FIT_CaptureX + 1
	dat1$FIT_CaptureY <- dat$FIT_CaptureY + 1
	
	## Extract a submatrix, given coordinates X1, Y1, X2, Y2
	crop <- function (mat, coords)
		mat[coords[2]:coords[4], coords[1]:coords[3]]
	
	## Determine best gray level for background after substraction
	gray <- attr(dat, "metadata")$CaptureParameters$ThresholdLight
	if (!length(gray)) {
		warning("Unknown threshold gray level; using 40")
		gray <- 40 # Target something like 40
	}
	gray <- gray / 255
	## Threshold = 2 * gray, since we add it once while subtracting background
	threshold <- 1 - 2 * gray
	
	## Proceed with each vignette
	nmax <- nrow(dat1)
	for (i in 1:nmax) {
		
		progress(i, nmax + 1)
		d <- dat1[i, ]
		## Do we need to load the next collage?
		if (as.character(d$FIT_Filename) != colFile) {
			filename <- as.character(d$FIT_Filename)
			collage <- readTIFF(source = file.path(sampledir, filename))
			colFile <- d$FIT_Filename
			colFiles <- colFiles[colFiles != filename]
			## If the image is RGB, we got three dimensions to reduce to two
			cdim <- dim(collage)
			if (length(cdim) == 3 && cdim[3] == 3) {
				## Calculate the CIE 1931 linear luminance Y as grayscale
				## Y = 0.2126 R + 0.7152 G + 0.0722 B
				collage <- 0.2126 * collage[, , 1] + 0.7152 * collage[, , 2] +
					0.0722 * collage[, , 3]
			}
			if (length(dim(collage)) != 2)
				stop("unrecognized collage image type; ",
					"cannot convert it to 8-bit grayscale")
		}
		
		## Get coordinates of the vignette in that collage
		size <- c(d$FIT_PixelW, d$FIT_PixelH) - 1 # Still the problem of 0 vs 1
		colCoords <- c(d$FIT_SaveX, d$FIT_SaveY)
		colCoords <- c(colCoords, colCoords + size)
		calCoords <- c(d$FIT_CaptureX, d$FIT_CaptureY)
		calCoords <- c(calCoords, calCoords + size)
			
		## Extract the vignette and corresponding background from the collage
		vig <- crop(collage, colCoords)
		## If  FIT_Calibration_Image is NA, use first one => TODO: check this!
		if (is.na(d$FIT_Calibration_Image)) d$FIT_Calibration_Image <- 1
		back <- crop(cals[[d$FIT_Calibration_Image]], calCoords)
		
		## Substract background and save vignette
		vig2 <- 1 + vig - back - gray
		vig2[vig2 > 1] <- 1
		vig2[vig2 < 0] <- 0
		
		if (isTRUE(rgb.vigs)) {
			## Calculate mask
			mask <- matrix(1, nrow = nrow(vig2), ncol = ncol(vig2))
			mask[vig2 > threshold] <- 0
			## Do we need to fill holes?
			## TODO...
			
			## Combine grayscales and mask into a RGB image
			vig2 <- structure(c(vig2, vig2, mask), dim = c(dim(vig2), 3))
		}
		
		## Write this vignette
#		vigFile <- file.path(zidbdir,
#			sub("\\.tif$", paste0("_", i, ".png"), filename))
#		writePNG(image = vig2, target =  vigFile)
		#VigName <- sub("\\.tif$", paste0("_", i), filename)
        VigName <- paste(label, i, sep = "_")
		
		## In case we use grayscale vignettes, use jpeg, otherwise, use png
		if (isTRUE(rgb.vigs)) {
			dbInsert(db, VigName, writePNG(image = vig2, target = raw()))
		} else {
			dbInsert(db, VigName, writeJPEG(image = vig2, target = raw(),
				quality = 0.95))
		}
	}
	
	## Create zidb
	## TODO...
	#dat
	    message("Adding data from ZIM files to ZIDB file...")
#    for (i in 1:length(Zims)) {
#        Zim <- Zims[i]
#        ZimName <- sub("\\.zim$", "", basename(Zim))
#        ZimSize <- file.info(Zim)$size
#        if (is.na(ZimSize)) {
#            warning("file '", Zim, "' not found or of null length")
#            return(invisible(FALSE))
#        }
#        dbInsert(db, ZimName, readBin(Zim, "raw", ZimSize + 100))
#    }
    
    ## Adding metadata and particles' attributes to the .zidb file
    ## TODO: SampleData come from a DESCRIPTION. zis file???
	## Here, use a default format
	smpdat <- data.frame(Label = label, Station = NA, Data = NA, Time = NA,
		TimeZone = NA, Latitude = NA, Longitude = NA, CorrdsPrec = NA,
		Operator = NA, Note = NA) # TODO: add note from FlowCAM data!!!
	class(smpdat) <- c("ZIDesc", "data.frame")
	
	message("Adding sample data to ZIDB file...")
    dbInsert(db, ".SampleData", smpdat)
	
    message("Adding R data to ZIDB file...")
#    zidat <- file.path(zidir, paste0(basename(zidir), "_dat1.RData"))
#    obj <- load(zidat)
#    if (length(obj) != 1) {
#        warning("Error loading ", zidat)
#        return(invisible(FALSE))
#    }
    dbInsert(db, ".Data", dat)
#    if (isTRUE(as.logical(delete.source))) 
#        unlink(zidir, recursive = TRUE)
    message("-- Done! --")
    invisible(TRUE)
}
## Example
## Test version 2.2.1
#lstFile <- "/Users/phgrosjean/Desktop/Intercalibration/BE.ArMix.2009-04-29.300A4X_01/BE.ArMix.2009-04-29.300A4X_01.lst"
#res <- importFlowCAM(lstFile)
## Test version 1.5.14
## TODO: This does not work (incorrect number of dimensions => imports images as an array?)
#lstFile1 <- "/Users/phgrosjean/Documents/Pgm/ZooPhytoImage_1.2-1-examples/FlowCAM-example-FIT-VIS/143-144526.lst"
#res1 <- importFlowCAM(lstFile1)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/import.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
## 
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
## 
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
## 
## You should have received a copy of the GNU General Public License
## along with ZooImage. If not, see <http://www.gnu.org/licenses/>.
## Get the directory containing the plankton sorter files
planktonSorterDir <- function ()
	system.file("planktonSorter", package = "zooimage")
## Generate a planktonSorter.html page
planktonSorterPage <- function (groups, vigns, title = "Plankton sorter/Step1",
id = title, step = 1, port = NULL, file = NULL) {
    if (is.null(port)) {
		## Make sure the R Httpd server is started
		tools <- getNamespace("tools")
		if (R.Version()$`svn rev` >= 67550) {
			port <- tools::startDynamicHelp(NA)
		} else {
			port <- tools$httpdPort
		}
		if (port == 0) port <- startDynamicHelp(TRUE)
		if (port == 0) stop("Impossible to start the R httpd server")
			
		e <- tools$.httpd.handlers.env
		e[["planktonSorterValidate"]] <- planktonSorterValidate
	}
	paths <- groups
	## Add the "[other]" group...
	groups <- names(paths)
	if (!any(groups == "[other]")) {
		## Add the [other] group
		paths <- c(paths, `[other]` = "_/[other]")
		groups <- c(groups, "[other]")
	}
	title <- as.character(title)[1]
    
    ## Create header of the document
	html <- '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>'
    html <- c(html, sprintf("<title>%s</title>", title))
    html <- c(html, sprintf('<meta http-equiv="Content-Type" content="text/html; charset=%s">',
		.mimeEncoding("UTF-8")))  # .mimeEncoding() = zooimage:::.mimeEncoding()
	html <- c(html, sprintf(
'	<script src="../planktonSorter/jquery-1.11.0.min.js"></script>
	<link rel="stylesheet" type="text/css" href="../planktonSorter/planktonSorter.css">	
	<script src="../planktonSorter/planktonSorter.js"></script>
	<script language="javascript" type="text/javascript">
		function back () {
			var ww = window.open("../step%s/planktonSorter.html", "_self");
		}
	</script>
</head>
', as.character(step - 1))
	)
	
	## Create the toolbar with the hidden submission form in it
	if (step <= 1) disabled <- "disabled" else disabled <- "disabled" #disabled <- ""
	html <- c(html, sprintf('<body>
<div id="toolbar" style="width:20000px;">
	<form id="submitForm" action="http://127.0.0.1:%s/custom/planktonSorterValidate" method="post">
		<input id="results" type="hidden" name="%s" value="">
		<input type="button" onclick="back()" value="<< Back" %s>
		<input type="button" onclick="saveDragDropNodes()" value="Validate">
	</form>
</div>', as.character(round(port)[1]), id, disabled))
    ## Create headers for all categories
    html <- c(html, '<div id="header" style="width:20000px;">',
        sprintf('	<span id="header_box" title="%s">%s</span>', paths, groups),
        '</div>'
    )
    
    ## Create the "Unclassified" area
    html <- c(html, '
<div id="dragDropContainer">
	<div id="topBar">
		<p></p>
	</div>
	
	<div id="listOfItems">
		<div>
			<p>Unclassified</p>
			<ul id="_">
			</ul>
		</div>
	</div>
'       
    )
    ## Add one div per group and populate it with the corresponding vignettes
    addGroup <- function (grp, vigns) {		
		vignNames <- sub("\\.[a-zA-Z]+$", "", vigns)
		c(sprintf('		
		<div>
			<p>%s</p>
			<ul id="%s">', grp, grp),
			sprintf('				<li id="%s"><img src="%s" alt="%s" class="preview"></li>',
				vignNames, vigns, vignNames),
'			</ul>
		</div>'
        )
    }
	
    ## Size of main container is 210 + 91*nGroups
	size <- 210 + 91 * length(groups) # PHG: with 90, the [other] group sometimes is wrapped!
    html <- c(html,
		sprintf('	<div id="mainContainer" style="width:%spx;">	
		<!-- ONE <UL> for each "group" -->', as.character(size)))
    ## Add the groups, plus an [other] group too!
	for (group in groups)
		html <- c(html, addGroup(group, vigns[names(vigns) == group]))
	
	## Finalize the page
	html <- c(html,
'   </div>
</div>
<ul id="dragContent"></ul>
<div id="dragDropIndicator"><img src="../planktonSorter/insert.gif"></div>
</body>
</html>'
    )
    
	if (is.null(file)) {
		html
	} else {
		cat(html, sep = "\n", file = file, append = FALSE)
		invisible(html)
	}
}
planktonSorterReport <- function (title = "Plankton sorter/Step1 - Results",
id = title, step = 1, port = NULL, file = NULL) {
    if (is.null(port)) {
		## Make sure the R Httpd server is started	
		tools <- getNamespace("tools")
		if (R.Version()$`svn rev` >= 67550) {
			port <- tools::startDynamicHelp(NA)
		} else {
			port <- tools$httpdPort
		}
		if (port == 0) port <- startDynamicHelp(TRUE)
		if (port == 0) stop("Impossible to start the R httpd server")
	}
	title <- as.character(title)[1]
    
    ## Create header of the document
	html <- '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>'
    html <- c(html, sprintf("<title>%s</title>", title))
    html <- c(html, sprintf('<meta http-equiv="Content-Type" content="text/html; charset=%s">',
		.mimeEncoding("UTF-8")))   # .mimeEncoding() = zooimage:::.mimeEncoding()
	html <- c(html, sprintf('	<meta http-equiv="cache-control" content="no-cache">
	<link rel="stylesheet" type="text/css" href="../planktonSorter/planktonSorter.css">
	<script language="javascript" type="text/javascript">
		function back () {
			var ww = window.open("../step%s/planktonSorter.html", "_self");
		}
		
		function done () {
			var results = document.getElementById(\'results\');
			var saveString = "done>>> " + results.name;
			// Post these results to the R process
			results.value = saveString;
			document.getElementById(\'submitForm\').submit();
		}
		
		function next () {
			var results = document.getElementById(\'results\');
			var saveString = "iterate>>> " + results.name;
			// Post these results to the R process
			results.value = saveString;
			document.getElementById(\'submitForm\').submit();
		}
		
	</script>
</head>
', as.character(step))
	)
	
	## Create the toolbar with the hidden submission form in it
	html <- c(html, sprintf('<body>
<div id="toolbar" style="width:20000px;">
	<form id="submitForm" action="http://127.0.0.1:%s/custom/planktonSorterValidate" method="post">
		<input id="results" type="hidden" name="%s" value="">
		<input type="button" onclick="back()" value="<< Back" disabled>
		<input type="button" onclick="next()" value="Next >>">
		<span>   </span>
		<input type="button" onclick="done()" value="Done">
	</form>
</div>', as.character(round(port)[1]), id))
    ## Create headers for all categories
    html <- c(html, sprintf('	<br><h3>%s</h3>
<p><img src="ReportError.png?v=%s" alt="Suspects and error"></p>', title,
		as.character(round(runif(1, max = 100000))))
    )
    
	## Finalize the page
	html <- c(html,
'
</body>
</html>'
    )
    
	if (is.null(file)) {
		html
	} else {
		cat(html, sep = "\n", file = file, append = FALSE)
		invisible(html)
	}
}
planktonSorterValidate <- function (path, query, body, ...) {
    if (!length(body)) return()
	## Special cases "iterate>>> " or "done>>> "
	if (substring(body, 1, 11) == "iterate>>> ") {
		res <- substring(body, 12)
		res <- unlist(strsplit(res, "/", fixed = TRUE))
		reportfile <- get(res[1], envir = TempEnv())$iterate()
		
	} else if (substring(body, 1, 8) == "done>>> ") {
		res <- substring(body, 9)
		res <- unlist(strsplit(res, "/", fixed = TRUE))
		res <- get(res[1], envir = TempEnv())$done()
		
#		html <- '<html>
#<head>
#<title>Final results</title>
#</head>
#<body>
#<h3>Final results</h3>
#'
#		html <- c(html, res,
#'
#</body>
#</html>'
#		)
#		return(list(payload = paste(html, collapse = "\n"), `content-type` = "text/html"))
		
	} else {
		## General case: got validation results...
		res <- unlist(strsplit(body, ";", fixed = TRUE))
		res <- unlist(strsplit(res, "/", fixed = TRUE))
		res <- matrix(as.character(res), ncol = 2, byrow = TRUE)
		## Get data first row
		object <- res[1, 1]
		#step <- as.numeric(res[1, 2])
		#Class <- as.vector(res[-1, 1])
		#Vigns <- as.vector(res[-1, 2])
		reportfile <- get(object, envir = TempEnv())$validate(res)
	}
	
	## Update the errorCorrection object accordingly, and save this also in the Zidb file
	#url <- paste0("file://", reportfile)
	#html <- get(object, envir = TempEnv())$validate(res)
	
    html <- '<html>
<head>
<title>Waiting for R process...</title>
<meta name="keywords" content="automatic redirection">
<script language="javascript" type="text/javascript">
	 var ww = window.open(window.location, "_self");
	 window.close();
</script>
</head>
<body>
Waiting for R process...
</body>
</html>'
	
    list(payload = html, `content-type` = "text/html")
}
#html <- sprintf(
#'<html>
#<head>
#<title>Generating report...</title>
#<meta http-equiv="refresh" content="0; URL=%s">
#<meta name="keywords" content="automatic redirection">
#</head>
#<body>
#Generating report...
#<br>
#If it does not load, try to
#<a href="%s">get it</a> 
#manually.
#</body>
#</html>', url, url)
activeLearning <- function (train, add.mode = "SV+NSV", threshold = NA)
{
  ## Active learning (adaptation of the training set with contextual items)
  if (!inherits(train, "ZITrain"))
    stop("'train' does not appear to be a valid training set, or problem when reading the training set")
  
  ## Call contextSelection (for selection of contextual samples)
  CtxSmp <- contextSelection()
  if (length(CtxSmp) < 1) {
    warning("No contextual samples selected! Initial training set will be used...")
  } else {
    ## Call addItemsToTrain (for augmentation of the training set)
    train <- addItemsToTrain(train = train, CtxSmp = CtxSmp, add.mode = add.mode, 
                             threshold = threshold, dropItemsToTrain = dropItemsToTrain)
  }
### PhG: this classifier object does not seem to be used afterward => commented out!
###  #.assignGlobal(ZIT, train)
###  classifier <- ZIClass(Class ~ ., data = train[!(names(train) %in% "AddedItems")], 
###                        method = "mlRforest", calc.vars = calcVars, ntree = 200, cv.k = 10)
###  attr(classifier, "path") <- attr(train, "path")
  
  ## Return the augmented training set
  train
}
correctError <- function(zidb, classifier, data = zidbDatRead(zidb), mode = "validation",
fraction = 0.05, sample.min = 100, sample.max = 200, grp.min = 2,
random.sample = 0.1, algorithm = "rf", diff.max = 0.2, prop.bio = NULL,
reset = TRUE, result = NULL) {
	## Default value for result
	if (is.null(result))
		result <- paste(sub("\\.[zZ][iI][dD][bB]$", "",
			basename(zidb)), "valid", sep = "_")
		
	## Look if the zidb file exists
	zidb <- as.character(zidb)[1]
	if (!file.exists(zidb))
		stop("zidb file '", zidb, "' not found")
	## Make sure data is correct
	if (missing(data)) {
		## Check that the dat file is read from zidb
		if (!inherits(data, "ZIDat"))
			stop("Corrupted zidb or data file")
	} else {
		## Make sure data refers to the same particules as in zidb
		data2 <- zidbDatRead(zidb)
		if (!inherits(data, "ZIDat"))
			stop("Corrupted zidb or data file")
		if (any(sort(makeId(data)) != sort(makeId(data2))))
			stop("'data' and 'zidb' does not refer to the same vignettes")
	}
	## If data contains a class, make sure its levels match those of the chosen classifier
	groups <- sort(basename(attr(classifier, "path")))
	if (!is.null(data$Class)) {
		if (any(sort(levels(data$Class)) != groups))
			stop("There is a 'Class' variable in 'data', but its levels do not match those of the 'classifier'")
	}
	## For the rest, the arguments will be checked in the errorCorrection() function
	
	## We need to give a unique name for this object. It is the zidb basename
	## plus a digest of the groups
	sample <- sub("\\.[zZ][iI][dD][bB]$", "", basename(zidb))
	dgroups <- digest(groups)
	Name <- paste(sample, dgroups, sep = "__")
	
	## Check the directory and reset it, if asked for
	testdir <- file.path(tempdir(), sample)
	if (file.exists(testdir)) {
		if (isTRUE(reset)) {
			unlink(testdir, recursive = TRUE)
		} else {
			stop("Sample seems currently under validation process; use reset = TRUE instead")
		}
	}
	
	## Create this object in TempEnv()
	ec <- errorCorrection (data, classifier, zidb = zidb, mode = mode,
		fraction = fraction, sample.min = sample.min, sample.max = sample.max,
		grp.min = grp.min, random.sample = random.sample, algorithm = algorithm,
		diff.max = diff.max, prop.bio = prop.bio, testdir = testdir, id = Name,
		result = result, envir = parent.frame())
	if (mode != "stat") assignTemp(Name, ec)
	
	## Start its first iteration...
	ec$iterate()
	
	## Return the object
	ec
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/planktonSorter.R 
 | 
					
	# Copyright (c) 2004-2018, Ph. Grosjean <[email protected]>
#
# This file is part of ZooImage
#
# ZooImage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# ZooImage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZooImage. If not, see <http://www.gnu.org/licenses/>.
# Get information about a sample, given its name
sampleInfo <- function(filename,  type = c("sample", "fraction", "image",
"scs", "date", "id", "frac", "imgnbr"), ext = "_dat[135][.]zim$") {
  base <- basename(as.character(filename))
  if (ext != "") base <- sub(ext, "", base)
  # Filename without extension is supposed to follow the convention:
  # scs.date.id+f[img] with scs.date.id forming an unique sample identifier
  # Note: not all verifications are conducted. So, it sometimes returns a
  # result even if the name does not conform to this specification!
  # TODO: check that the name follows the convention and determine what is
  #       optional, like date, for instance)
  switch(match.arg(type),
    sample = sub("\\+[a-zA-Z][0-9.]+$", "", base),
    fraction = sub("[0-9.]+$", "", base),
    image = base,
    scs = sub("[+.].+$", "", base),
    date = {
      res <- try(
        as.Date(sub("^.*([0-9]{4}-[0-1][0-9]-[0-3][0-9]).*$", "\\1", base)),
        silent = TRUE)
      if (inherits(res, "try-error")) {
        warning("Wrong sample filename: impossible to extract the sample date")
        as.Date(NA)
      } else res
    },
    id = sub("^.*\\..*\\.(.*)\\+.*$", "\\1", base),
    frac = sub("^.*\\+([a-zA-Z]).*$", "\\1",base),
    imgnbr = as.numeric(sub("^.*\\+[a-zA-Z]([0-9.]*)$", "\\1", base)),
    {
      warning("'type' must be 'sample', 'fraction', 'image', 'scs', 'date', ",
        "'id', 'frac' or 'imgnbr'")
      character(0)
    }
  )
}
# Convert underscores into spaces
underscoreToSpace <- function(string)
  gsub("_", " ", string)
# Trim leading and trailing white spaces and tabs
trimString <- function(string)
  sub("\\s+$", "", sub("^\\s+", "", string))
# All sample with at least one entry in a given object
listSamples <- function(ZIobj) {
  if (!inherits(ZIobj, c("ZIDat", "ZIDesc","ZITrain","ZITest"))) {
    warning("'ZIobj' must be a 'ZIDat', 'ZIDesc', 'ZITrain' or 'ZITest' object")
    return(character(0))
  }
  # List all samples represented in a given object
  if (inherits(ZIobj, "ZIDat")) {
    res <- sort(unique(sampleInfo(as.character(ZIobj$Label),
    type = "sample", ext = "")))
  } else if (inherits(ZIobj, "ZIDesc")) {
    res <- sort(unique(as.character(ZIobj$Label)))
  } else if (inherits(ZIobj, c("ZITrain", "ZITest"))) {
    res <- as.character(ZIobj$Id)
    res <- sub("_[0-9]*$", "", res)
    res <- sort(unique(sampleInfo(res, type = "sample", ext = "")))
  }
  res
}
# Unique identifiers (Ids) are a combination of Label and Item
makeId <- function(ZIDat)
  paste(ZIDat$Label, ZIDat$Item, sep = "_")
# Add classes into a ZIDat object, from ZITrain or ZITest objects
addClass <- function(ZIDat, ZIobj) {
  # Is there a 'Class' variable in ZIobj?
  Cl <- ZIobj$Class
  if (!length(Cl))
    stop("No 'Class' column found in the ZIobj object")
  # Select only those items that are in ZIDat, in the correct order...
  Id <- ZIobj$Id
  if (!length(Id)) Id <- makeId(ZIobj)
  if (!length(Id)) stop("unable to get particle Ids from 'ZIobj'")
  names(Cl) <- Id
  ZIDat$Class <- Cl[makeId(ZIDat)]
  ZIDat
}
# Default list of variables to drop
# Version 3.0-1: added a list of useless FIT variables to be dropped
dropVars <- function() {
  res <- try(get("ZI.dropVarsDef"), silent = TRUE)
  if (inherits(res, "try-error"))
    res <- getOption("ZI.dropVarsDef",
      c("Id", "Label", "Item", "X", "Y", "XM", "YM", "BX", "BY", "Width",
      "Height", "Angle", "XStart", "YStart", "Dil", "Predicted",
      "Predicted2", "FIT_Cal_Const", "FIT_Avg_Red", "FIT_Avg_Green",
      "FIT_Avg_Blue", "FIT_PPC", "FIT_Ch1_Peak", "FIT_Ch1_TOF",
      "FIT_Ch2_Peak", "FIT_Ch2_TOF", "FIT_Ch3_Peak", "FIT_Ch3_TOF",
      "FIT_SaveX", "FIT_SaveY", "FIT_PixelW", "FIT_PixelH",
      "FIT_CaptureX", "FIT_CaptureY", # Keep this one?"FIT_Edge_Gradient",
      "FIT_Source_Image", "FIT_Calibration_Image", "FIT_High_U32",
      "FIT_Low_U32", "FIT_Total", "FIT_Red_Green_Ratio",
      "FIT_Blue_Green_Ratio", "FIT_Red_Blue_Ratio",
      "FIT_Ch2_Ch1_Ratio", "FIT_Ch4_Peak", "FIT_Ch4_TOF", "FIT_Timestamp1",
      "FIT_Timestamp2", "FIT_Camera", "FIT_FringSize",
      "FIT_Ch1_Area", "FIT_Ch2_Area", "FIT_Ch3_Area",
      "FIT_TimeStamp1", "FIT_Source_Image.1",
      "X.Item.1", "FeretAngle", "Count",
      "Skew", "Kurt", "Solidity", # Last 3: NAs with multiple ROIs
      "MinFeret", "AR", "Round", # Problems with these variables at IFREMER!?
      # Added in zooimage v.5:
      "FIT_Filename", "FIT_Feret_Min_Angle", "FIT_Feret_Max_Angle",
      # This is somehow redundant with other variables
      "FIT_Raw_Area", "FIT_Raw_Perim", "FIT_Raw_Convex_Perim",
      "FIT_Raw_Feret_Max", "FIT_Raw_Feret_Min", "FIT_Raw_Feret_Mean",
      "FIT_Diameter_ABD", # This one is indeed ECD
      # Changes in variables names
      "FIT_Ppc", "FIT_Fringe_Size", "FIT_Circle_Fit",
      # Found in format 17 of a color FlowCAM (from KAUST) and not used yet
      "FIT_Symmetry", "FIT_Circularity_Hu", "FIT_Intensity_Calimage",
      "FIT_Raw_Convex_Hull_Area", "FIT_Raw_Filled_Area",
      "FIT_CircleFit", "FIT_Edge_Gradient"
      # TODO: should we drop also Id.1, Class, Validated and Suspect???
    ))
  as.character(res)
}
# Calculate derived variables... default function
calcVars <- function(x, drop.vars = NULL, drop.vars.def = dropVars()) {
  # This is the calculation of derived variables
  # Note that you can make your own version of this function for more
  # calculated variables!
  # Calculate derived variables... FlowCAM's Visual Spreadsheet
  calcVarsVIS <- function(x, drop.vars = NULL, drop.vars.def = dropVars()) {
    # Use only FIT_xxx vars, andderived attributes (26 attributes in total):
    # ECD, FIT_Area_ABD, FIT_Length, FIT_Width, FIT_Diameter_ESD,
    # FIT_Perimeter, FIT_Convex_Perimeter, FIT_Intensity, FIT_Sigma_Intensity,
    # FIT_Compactness, FIT_Elongation, FIT_Sum_Intensity, FIT_Roughness,
    # FIT_Volume_ABD, FIT_Volume_ESD, FIT_Aspect_Ratio, FIT_Transparency,
    # CV, MeanFDia, Transp2, FeretRoundness & Perim_Ratio
    # A small hack to correct some 0 (which can be problematic in further calcs)
    noZero <- function(x) {
      x[x == 0] <- 1e-09
      x
    }
    # Euclidean distance between two points
    distance <- function(x, y)
      sqrt(x^2 + y^2)
    # All FIT_Raw_xxx vars have their counterpart resized in um:
    # FIT_Raw_Area -> FIT_Diameter_ABD
    # FIT_Raw_Feret_Max -> FIT_Length
    # FIT_Raw_Feret_Min -> FIT_Width
    # FIT_Raw_Feret_Mean -> FIT_Diameter_ESD
    # FIT_Raw_Perim -> FIt_Perimeter
    # FIT_Raw_Convex_Perim -> FIt_Convex_Perimeter
    # (=> all FIT_Raw_xxx should be eliminated in dropVars()!)
    # (re)calculate ECD from FIT_DIameter_ABD (was once calc from FIT_Raw_Area)
    x$ECD <- noZero(ecd(x$FIT_Area_ABD))
    x$FIT_Area_ABD <- noZero(x$FIT_Area_ABD)
    x$FIT_Length <- noZero(x$FIT_Length)
    x$FIT_Width <- noZero(x$FIT_Width)
    x$FIT_Diameter_ESD <- noZero(x$FIT_Diameter_ESD)
    x$FIT_Perimeter <- noZero(x$FIT_Perimeter)
    x$FIT_Convex_Perimeter <- noZero(x$FIT_Convex_Perimeter)
    x$FIT_Intensity <- noZero(x$FIT_Intensity)
    x$FIT_Sigma_Intensity <- noZero(x$FIT_Sigma_Intensity)
    x$FIT_Sum_Intensity <- noZero(x$FIT_Sum_Intensity)
    x$FIT_Compactness <- noZero(x$FIT_Compactness)
    x$FIT_Elongation <- noZero(x$FIT_Elongation)
    x$FIT_Roughness <- noZero(x$FIT_Roughness)
    x$FIT_Aspect_Ratio <- noZero(x$FIT_Aspect_Ratio)
    x$FIT_Volume_ABD <- noZero(x$FIT_Volume_ABD)
    x$FIT_Volume_ESD <- noZero(x$FIT_Volume_ESD)
    x$FIT_Transparency <- noZero(x$FIT_Transparency)
    x$FIT_Edge_Gradient <- noZero(x$FIT_Edge_Gradient)
    # Additional calculated variables
    # This is FIT_Aspect_Ratio! x$ARFeret <- x$FIT_Width/x$FIT_Length
    # For later on:
    x$EdgeRange <- abs(x$FIT_Intensity - x$FIT_Edge_Gradient)
    x$CV <- x$FIT_Sigma_Intensity/x$FIT_Intensity * 100
    x$MeanFDia <- (x$FIT_Length + x$FIT_Width) / 2
    x$Transp2 <- 1 - (x$FIT_Diameter_ABD/x$MeanFDia)
    x$Transp2[x$Transp2 < 0] <- 0
    x$FeretRoundness <- 4 * x$FIT_Area_ABD/(pi * sqrt(x$FIT_Length))
    # ImageJ calculation
    x$Circ. <- 4 * pi * x$FIT_Area_ABD / sqrt(x$FIT_Perimeter)
    # For later on:
    x$EdgeCV <- x$FIT_Sigma_Intensity/x$FIT_Edge_Gradient * 100
    x$EdgeSDNorm <- x$FIT_Intensity/x$EdgeRange
    x$Perim_Ratio <- x$FIT_Convex_Perimeter / x$FIT_Perimeter
    # Eliminate variables that are not predictors... and use Id as rownames
    Id <- x$Id
    if (length(Id)) rownames(x) <- Id
    # Variables to drop
    # For those samples treated with FIT_VIS in ImageJ, we need to get rid of
    # the ImageJ variables
    x$Area <- NULL
    x$Mean <- NULL
    x$StdDev <- NULL
    x$Mode <- NULL
    x$Min <- NULL
    x$Max <- NULL
    x$Perim. <- NULL
    x$Major <- NULL
    x$Minor <- NULL
    x$Circ. <- NULL
    x$Feret <- NULL
    x$IntDen <- NULL
    x$Median <- NULL
    dropAll <- unique(as.character(c(drop.vars, drop.vars.def)))
    for (dropVar in dropAll) x[[dropVar]] <- NULL
    # Return the recalculated data frame
    x
  }
  # For data from the FlowCAM, we use a specific function
  if (any(names(x) == "FIT_Length"))
    return(calcVarsVIS(x, drop.vars = drop.vars, drop.vars.def = drop.vars.def))
  # A small hack to correct some 0 (which can be problematic in further calcs)
  noZero <- function(x) {
    x[x == 0] <- 0.000000001
    x
  }
  # Euclidean distance between two points
  distance <- function(x, y)
    sqrt(x^2 + y^2)
  x$Minor <- noZero(x$Minor)
  x$Major <- noZero(x$Major)
  x$AspectRatio <- x$Minor / x$Major
  x$CentBoxD <- distance(x$BX + x$Width/2 - x$X, x$BY + x$Height/2 - x$Y)
  x$GrayCentBoxD <- distance(x$BX + x$Width/2 - x$XM, x$BY + x$Height/2 - x$YM)
  x$CentroidsD <- distance(x$X - x$XM, x$Y - x$YM)
  x$Range <- x$Max - x$Min
  x$MeanPos <- (x$Max - x$Mean) / x$Range
  x$SDNorm <- x$StdDev / x$Range
  x$CV <- x$StdDev / x$Mean * 100
  x$Area <- noZero(x$Area)
  #x$logArea <- log(x$Area)
  x$Perim. <- noZero(x$Perim.)
  #x$logPerim. <- log(x$Perim.)
  #x$logMajor <- log(x$Major)
  #x$logMinor <- log(x$Minor)
  #x$logECD <- log(noZero(x$ECD))
  x$Feret <- noZero(x$Feret)
  #x$logFeret <- log(x$Feret)
  x$MeanDia <- (x$Major + x$Minor) / 2
  x$MeanFDia <- (x$Feret + x$Minor) / 2
  #x$logMeanDia <- log(x$MeanDia)
  #x$logMeanFDia <- log(x$MeanFDia)
  x$Transp1 <- 1 - (x$ECD / x$MeanDia)
  x$Transp1[x$Transp1 < 0] <- 0
  x$Transp2 <- 1 - (x$ECD / x$MeanFDia)
  x$Transp2[x$Transp2 < 0] <- 0
  PA <- x$Perim.^2/16 - x$Area
  x$Elongation <- ifelse(PA <= 0, 1, x$Area / (x$Perim./4 - PA^.5)^2)
  x$Compactness <-  x$Perim.^2/4/pi/x$Area  # env. 1/Circ.
  x$Roundness <- 4 * x$Area / (pi * sqrt(x$Major))
  # Eliminate variables that are not predictors... and use Id as rownames
  Id <- x$Id
  if (length(Id)) rownames(x) <- Id
  # Variables to drop
  dropAll <- unique(as.character(c(drop.vars, drop.vars.def)))
  for (dropVar in dropAll) x[[dropVar]] <- NULL
  # Return the recalculated data frame
  x
}
# Calculate equivalent circular diameter (similar to equivalent spherical
# diameter, but for 2D images)
ecd <- function(area, cells = 1)
  2 * sqrt(area / cells / pi)
# Parse an ini file (.zim, .zie, etc. are .ini files!)
# TODO: manage the case where there is no '=' in the data!
parseIni <- function(data, label = "1") {
  # Parse an ini file (tag=value => 'tag', 'value')
  # and make a list with different sections
  # Is str a section?
  is.section <- function(str)
    as.logical(length(grep("^\\[.+\\]$", trimString(str)) > 0))
  # Get the name of a section
  get.section.name <- function(str)
    sub("^\\[", "", sub("\\]$", "", trimString(str)))
  # Transform a vector of characters into a data frame,
  # possibly with type conversion
  vector.convert <- function(vec)
    as.data.frame(lapply(as.list(vec), type.convert))
  if (!length(data) || !inherits(data, "character"))
    return(character(0))
  # Trim leading and trailing white spaces
  data <- trimString(data)
  # Convert underscore to space
  data <- underscoreToSpace(data)
  # Eliminate empty lines
  data <- data[data != ""]
  data <- paste(data, " ", sep = "")
  if (!length(data)) return(character(0))
  # Substitute the first '=' sign by another separator unlikely to appear in
  # the argument
  data <- sub("=", "&&&&&", data)
  # Split the strings according to this separator
  data <- strsplit(data, "&&&&&")
  # Get a matrix
  data <- t(as.data.frame(data))
  rownames(data) <- NULL
  # Make sure we have a section for the first entries (otherwise, use [.])
  if (!is.section(data[1, 1]))
    data <- rbind(c("[.]", "[.]"), data)
  Names <- as.vector(trimString(data[, 1]))
  Dat <- as.vector(trimString(data[, 2]))
  # Determine which is a section header
  Sec <- grep("\\[.+\\]$", Names)
  SecNames <- get.section.name(Names[Sec])
  # Make a vector of sections
  if (length(Sec) == 1) {
    SecNames <- rep(SecNames, length(Names))
  } else {
    SecNames <- rep(SecNames, c(Sec[2:length(Sec)], length(Names) + 1) - Sec)
  }
  # Replace section headers from all vectors
  Names[Sec] <- "Label"
  Dat[Sec] <- label
  names(Dat) <- Names
  # Transform SecNames in a factor
  SecNames <- as.factor(SecNames)
  # Split Dat on sections
  DatSec <- split(Dat, SecNames)
  # For each section, transform the vector in a data frame and possibly
  # convert its content
  DatSec <- lapply(DatSec, vector.convert)
  # Eliminate "Label" if it is ""
  if (label == "")
    DatSec <- lapply(DatSec, function(x) x[-1])
  DatSec
}
# Grayscale calibration in O.D. scale
# TODO: rework all this using ImageJ in zooimagej (should be much faster)
calibrate <- function(ODfile) {
  # TODO: include also a spatial calibration procedure
  # (with a black circle around the center of the image)
  # and check also other characteristics, especially the sharpness
  cal <- c(NA, NA)
  names(cal) <- c("WhitePoint", "BlackPoint")
  msg <- character(0)
  if (!file.exists(ODfile)) {
    msg <- paste("O.D. file '", ODfile, "' not found!", sep = "")
    attr(cal, "msg") <- msg
    return(cal)
  }
  # Is it a test file?
  #if (.isTestFile(ODfile)) {
  #  # We behave like if the file was correct and return fake calibration data!
  #  cal <- c(1000, 50000)
  #  names(cal) <- c("WhitePoint", "BlackPoint")
  #  attr(cal, "msg") <- character(0)
  #  return(cal)
  #}
  #filedir <- dirname(ODfile)
  #if (filedir != ".") {
  #  # Temporary change directory to the one where the file is located
  #  inidir <- setwd(filedir)
  #  on.exit(setwd(inidir))
  #  ODfile <- basename(ODfile)
  #}
  # The command to use depends on the format of the image (determined on the
  # extension)
  #ext <- tolower(rev(strsplit(ODfile, "\\.")[[1]])[1])
  #pgmfile <- ODfile
  #if (ext == "tif") {
  #  ## First, convert into a .pgm file
  #  pgmfile <- paste(ODfile, "pgm", sep = ".")
####    netpbm_tifftopnm( ODfile, pgmfile )
  #  delfile <- TRUE
  #  ext <- "pgm"
  #} else delfile <- FALSE
  #if (ext != "pgm")
  #  return(paste("Unrecognized image format for '", ODfile, "'", sep = ""))
####  OD <- netpbm_pgmhist(pgmfile, delete = delfile)
  ## Make sure we work with 16bit images
  #if (max(OD$Gray) < 256) {
  #  msg <- c(msg, "O.D. seems to be a 8bit image (16bit required)")
  #} else {
  #  ## Eliminate values with low number of points
  #  OD <- OD[OD$Count > 100, ]
  # PhG: new code... fully implemented in R
  grays <- readTIFF(ODfile, as.is = TRUE)
  grays <- sort.int(as.integer(grays), method = "quick")
  grays <- as.data.frame(unclass(rle(grays)))
  OD <- grays[grays$lengths > 200, ]
  names(OD) <- c("Count", "Gray")
  # Look at range: should be widespread enough, but without saturation
  rngOD <- range(OD$Gray)
  if (rngOD[2] > 65500) msg <-
    c(msg, "Images are overexposed, or whitepoint is already calibrated")
  if (rngOD[2] < 55000)
    msg <- c(msg, "Images are underexposed")
  # Saturation on the left-side of the histogram is not much a problem!
  if (rngOD[2] - rngOD[1] < 40000)
    msg <- c(msg, "Images lack contrast")
  # We should end up with four segments
  graylev <- OD$Gray
  gap <- (diff(graylev) > 500)
  # There are not *exactly* four gaps => problem with the image!
  if (sum(gap) != 4) {
    msg <- c(msg, "Impossible to calibrate O.D.: wrong image")
  } else {
    # Get the five peaks, analyze them and get modes for blank, NDx2,
    # NDx4 and NDx8
    peaks <- as.factor(cumsum(c(0, gap)) + 1)
    peaksgray <- split(graylev, peaks)
    names(peaksgray) <- c("Black", "NDx8", "NDx4", "NDx2", "White")
    # These are supposed to be all narrow peaks... check this
    peakspan <- sapply(peaksgray, range)
    peaksrange <- peakspan[2, ] - peakspan[1, ]
    # 1.2-2: width of black peak is much larger for Epson 4990
    # => be more tolerant for that peak
    if (any(peaksrange > c(20000, rep(5000, 4)))) {
      wrongpeaks <- paste(names(peaksrange)[peaksrange > 5000], collapse = ", ")
      msg <- c(msg, paste("Wrong O.D. image: lack of homogeneity for",
        wrongpeaks))
    }
    # Look for the gray levels at the top of the peaks
    peaksheight <- split(OD$Count, peaks)
    names(peaksheight) <- c("Black", "NDx8", "NDx4", "NDx2", "White")
    findmax <- function(x)
      which.max(lowess(x, f = 0.05, iter = 1)$y)
    peaksval <- sapply(peaksheight, findmax)
    # Get the number of pixels in the white peak
    nbrwhite <- peaksheight$White[peaksval["White"]]
    # Replace the location by the actual gray level
    for (i in 1:5)
      peaksval[i] <- peaksgray[[i]][peaksval[i]]
    # If the number of pixels for pure white is larger than the white
    # peak found, replace it by pure white (65535)
    nbrpurewhite <- OD[nrow(OD), 2]
    if (nbrpurewhite > nbrwhite)
      peaksval["White"] <- 65535
    # Now, we need to calibrate the black and white points
    WhitePoint <- 65535 - peaksval["White"]
    # Perform a correction for the white point
    peaksval <- peaksval + WhitePoint
    # Transform those gray levels into O.D.
    peaksOD <- log(peaksval) * 65535 / log(65535)
    # Create a data frame with gray levels and corresponding OD for
    # White, NDx2, NDx4 and NDx8
    calib <- data.frame(Gray = peaksOD[5:2], OD = c(0, 0.3, 0.6, 0.9))
    # Fit a line on these data
    calib.lm <- lm(OD ~ Gray, data = calib)
    # Check that calibration line is fine (i.e., the ANOVA should
    # reject H0 at alpha = 5%)
    if (anova(calib.lm)[["Pr(>F)"]][1] > 0.01)
      msg <- c(msg, "Wrong OD calibration: not a straight line relation at alpha level = 0.01")
    # Check also that R squared is at least 0.98
    rsq <- summary(calib.lm)$r.squared
    if (rsq < 0.98)
      msg <- c(msg, paste("Bad OD calibration (R squared = ",
        formatC(rsq, digits = 3), ")", sep = ""))
    # Check linearity of the relationship by fitting a second order
    # polynome and by looking at the t-test for the x square parameter
    calib2.lm <- lm(OD ~ I(Gray^2) + Gray, data = calib)
    if (summary(calib2.lm)$coefficients["I(Gray^2)", "Pr(>|t|)"] < 0.01)
      msg <- c(msg, "Nonlinear OD calibration at alpha level = 0.01")
    # Calculate the value of the black point to get 0.004 OD per gray
    # level after conversion (see the manual)
    ccoef <- coef(calib.lm)
    BlackPoint <- (1.024 - ccoef[1]) / ccoef[2]
    # Get the calibration data
    cal[1] <- round(WhitePoint)
    cal[2] <- round(BlackPoint)
  }
  attr(cal, "msg") <- msg
  cal
}
# example:
# setwd("g:/zooplankton/madagascar2macro")
# calibrate("test.tif")
# Decimal separator to use in import/export ZooImage files
getDec <- function() {
  Dec <- getOption("OutDec", ".")
  # It must be either "." or ","!
  if (!Dec %in% c(".", ","))
    Dec <- "."
  Dec
}
# Add a comment (from a zimfile) into a zip archive
zipNoteAdd <- function(zipfile, zimfile) {
  zipfile <- as.character(zipfile)
  if (length(zipfile) != 1) {
    warning("exactly one 'zipfile' must be provided")
    return(FALSE)
  }
  if (!file.exists(zipfile)) {
    warning("'zipfile' not found: '", basename(zipfile), "'")
    return(FALSE)
  }
  zimfile <- as.character(zimfile)
  if (length(zimfile) != 1) {
    warning("exactly one 'zimfile' must be provided")
    return(FALSE)
  }
  if (!file.exists(zimfile)) {
    warning("'zimfile' not found: '", basename(zimfile), "'")
    return(FALSE)
  }
  if (isWin()) {
    cmd <- sprintf('%s /c type "%s" | "%s" -zq "%s" ', Sys.getenv("COMSPEC"),
      zimfile, Sys.getenv("R_ZIPCMD", "zip"), zipfile)
    res <- try(system(cmd, show.output.on.console = FALSE, invisible = TRUE,
      intern = FALSE), silent = TRUE)
  } else {
    cmd <- sprintf('zip -zq "%s" < "%s" ', zipfile, zimfile)
    res <- try(system(cmd, ignore.stdout = TRUE, ignore.stderr = TRUE,
      intern = FALSE), silent = TRUE)
  }
  if (inherits(res, "try-error")) {
    warning(as.character(res)) # Turn error into warning
    return(FALSE)
  }
  if (res != 0) {
    warning("error while adding .zim data to '", basename(zipfile), "'")
    FALSE
  } else TRUE
}
# Extract the comment from the zipfile
zipNoteGet <- function(zipfile, zimfile = NULL) {
  zipfile <- as.character(zipfile)
  if (length(zipfile) != 1) {
    warning("exactly one 'zipfile' must be provided")
    return(NULL)
  }
  if (!file.exists(zipfile)) {
    warning("'zipfile' not found: '", basename(zipfile), "'")
    return(NULL)
  }
  if (length(zimfile)) {
    zimfile <- as.character(zimfile)
    if (length(zimfile) != 1) {
      warning("exactly one 'zimfile' must be provided")
      return(NULL)
    }
  }
  # Make sure old data do not remain in zimfile
  unlink(zimfile)
  # We use unzip... and assume it is located at the same place as zip!
  if (isWin()) {
    zippgm <- Sys.getenv("R_ZIPCMD", "zip")
    unzippgm <- sub("zip$", "unzip", zippgm)
    if (unzippgm == zippgm || inherits(try(system("unzip", intern = TRUE),
      silent = TRUE), "try-error")) {
      warning("'unzip' program is required, but not found")
      return(NULL)
    }
    cmd <- sprintf('"%s" -zq "%s"', unzippgm, zipfile)
    res <- try(system(cmd, invisible = TRUE, intern = TRUE), silent = TRUE)
  } else {# Linux or MacOS
    cmd <- sprintf('unzip -zq "%s"', zipfile)
    res <- try(system(cmd, intern = TRUE), silent = TRUE)
  }
  if (inherits(res, "try-error")) {
    warning(as.character(res))
    return(NULL)
  }
  if (length(res) < 2) {
    warning("no comment data found in '", basename(zipfile), "'")
    return(character(0))
  }
  # Write the output to the file if needed and return the result
  if (length(zimfile)) {
    cat(res, file = zimfile, sep = "\n")
    invisible(res)
  } else res
}
.make_scales <- function(pixels.per.unit, units = "mm", base.dir = tempdir()) {
  # Depending on the range of pixels.per.unit, we cook different bar scales
  # This range should be wide enough... or you should use different units!
  if (pixels.per.unit <= 12) {# For instance, 300dpi
    coef <- 8
    vals <- c("2.4", "4", "8")
  } else if (pixels.per.unit <= 25) {# For instance, 600dpi
    coef <- 4
    vals <- c("1.2", "2", "4")
  } else if (pixels.per.unit <= 50) {# For instance, 1200dpi
    coef <- 2
    vals <- c("0.6", "1", "2")
  } else if (pixels.per.unit <= 100) {# For instance, 2400dpi
    coef <- 1
    vals <- c("0.3", "0.5", "1")
  } else if (pixels.per.unit <= 200) {# For instance, 4800dpi
    coef <- 0.5
    vals <- c(".15", ".25", "0.5")
  } else {# >= 9600dpi
    coef <- 0.25
    vals <- c(".07", ".12", ".25")
  }
  labels <- paste0(vals, units)
  images <- file.path(base.dir, c("scale30.png", "scale50.png", "scale100.png"))
  left <- floor((100 - pixels.per.unit * coef) / 2) / 100
  right <- 1 - (ceiling((100 - pixels.per.unit * coef) / 2) / 100)
  # 100 pixels-wide scale
  png(images[3], width = 100, height = 16, antialias = "none")
  opar <- par(no.readonly = TRUE)
  par(mai = c(0, 0.025, 0, 0.025), oma = c(0, 0, 0, 0), lend = 2)
  plot(c(left, right), c(0.8, 0.8), type = "l",
    xaxt = "n", yaxt = "n", xaxs = "i", yaxs = "i", lwd = 4, col = "black",
    xlim = c(0, 1), ylim = c(0, 1), xlab = "", ylab = "", bty = "n")
  text(0.5, 0.35, labels = labels[3], adj = c(0.5, 0.5))
  dev.off()
  # 50 pixels-wide scale
  png(images[2], width = 50, height = 16, antialias = "none")
  opar <- par(no.readonly = TRUE)
  par(mai = c(0, 0.025, 0, 0.025), oma = c(0, 0, 0, 0), lend = 2)
  plot(c(left, right), c(0.8, 0.8), type = "l",
    xaxt = "n", yaxt = "n", xaxs = "i", yaxs = "i", lwd = 4, col = "black",
    xlim = c(0, 1), ylim = c(0, 1), xlab = "", ylab = "", bty = "n")
  text(0.5, 0.35, labels = labels[2], adj = c(0.5, 0.5))
  dev.off()
  # 30 pixels-wide scale
  png(images[1], width = 30, height = 16, antialias = "none")
  opar <- par(no.readonly = TRUE)
  par(mai = c(0, 0.025, 0, 0.025), oma = c(0, 0, 0, 0), lend = 2)
  plot(c(left, right), c(0.8, 0.8), type = "l",
    xaxt = "n", yaxt = "n", xaxs = "i", yaxs = "i", lwd = 4, col = "black",
    xlim = c(0, 1), ylim = c(0, 1), xlab = "", ylab = "", bty = "n")
  text(0.5, 0.35, labels = labels[1], adj = c(0.5, 0.5), cex = 0.75)
  dev.off()
  list(coef = coef, labels = labels, images = images)
}
# Test...
#.make_scales(96) #2400dpi
#.make_scales(72)
#.make_scales(48) #1200dpi
#.make_scales(24) #600dpi
#.make_scales(12) #300dpi
#.make_scales(192)#4800dpi
#.make_scales(384)#9600dpi
makeZIVignettes <- function(orig.dir = getwd(), target.dir = dirname(orig.dir),
clean.work = FALSE) {
  # The orig.dir is supposed to be "_work" subdir of where we did image analysis
  odir <- setwd(orig.dir)
  on.exit(setwd(odir))
  # all_ok indicates if all images were correctly processed (only set to FALSE
  # in case of error)
  all_ok <- TRUE
  # List of _dat1|3|5.zim files
  zims <- dir(pattern = "_dat[135]\\.zim$")
  if (!length(zims))
    stop("No '_dat[135].zim' files in 'orig.dir'")
  # List of _col1|3|5.zim files
  imgs <- dir(pattern = "_col[135]\\.tif$")
  # Check that both lists agree, and there are such files
  if (!length(imgs))
    stop("No '_col[135].tif' files in 'orig.dir'")
  if (length(zims) != length(imgs) ||
      any(sub("_dat[135]\\.zim$", "", zims) !=
          sub("_col[135]\\.tif$", "", imgs)))
    stop("You must have pairs of '_dat[135].zim' and ",
      "'_col[135].tif' files in 'orig.dir'")
  # For each _dat[135].zim file, create the directory with vignettes
  # and _dat[135].zim files
  # (renamed _dat1.zim after transforming them into ZI1-compatibles files)
  # as one got it directly from ZI1-5 processes
  l <- length(zims)
  # Scale bars are recalculated according to the size of one pixel.
  # Start with a silly value to make sure it is calculated at first image
  lastpixsize <- -1
  for (i in 1:l) {
    zim <- zims[i]
    img <- imgs[i]
    # Compute the directory name
    smp <- sub("\\+[A-Z][0-9]*_dat5\\.zim$", "", zim)
    smpdir <- file.path(target.dir, smp)
    message("Processing image ", i, "/", l, ", for sample ", smp, "... ",
      sep = "")
    flush.console()
    # If the directory exists, check it is really a dir, not a file!
    if (file.exists(smpdir)) {
      if (!file.info(smpdir)$isdir)
        stop("Sample directory exists for ", smp,
          " but does not appear to be a directory!")
      #cat("skipping (file already exists)\n")
    }
    dir.create(smpdir, showWarnings = FALSE)
    # Read the zim file and do some corrections in it
    zimdat <- readLines(zim)
    if (length(zimdat) < 10 || substring(zimdat[1], 1, 2) != "ZI")
      stop("The following .zim file seems corrupted: ", zim)
    # Correct ZI1, ZI3 or ZI5 into ZI1 (we'll make it compatible with v.1!)
    zimdat[1] <- "ZI1"
    # Determine where the table of data is starting in the file
    dpos <- (1:length(zimdat))[zimdat == "[Data]"]
    if (length(dpos) != 1)
      stop("Data section not found or multiple Data sections in ", zim)
    # Code, Min, Max, SubPart, SubMethod contain all values for all images
    getKeyValue <- function(dat, key, multiple = FALSE) {
      l <- length(dat)
      regexp <- paste0("^", key, "=")
      position <- (1:l)[grepl(regexp, dat)]
      if (!length(position))
        return(list(pos = integer(0), value = character(0)))
      value <- trimws(sub(regexp, "", dat[position]))
      if (isTRUE(multiple)) {
        # Split items according to comas
        value <- trimws(strsplit(value, ",")[[1]])
      }
      list(pos = position, value = value)
    }
    # Just keep the one that suits this particular image
    code <- getKeyValue(zimdat, "Code", multiple = TRUE)
    if (length(code$pos) != 1)
      stop("Error in zim file '", zim, "': no or several 'Code=' entries")
    # Add number to code
    code$label <- code$value
    lcodes <- length(code$value)
    ucodes <- unique(code$value)
    for (ucode in ucodes) {
      upos <- (1:lcodes)[code$value == ucode]
      code$label[upos] <- paste0(ucode, 1:length(upos))
    }
    # Get the code for the current image
    icode <- sub("^.+\\+([A-Z][0-9]*)\\_dat[135]\\.zim$", "\\1", zim)
    # If icode has no numbers, add 1 at the end
    if (grepl("[A-Z]$", icode)) icode <- paste0(icode, "1")
    if (!icode %in% code$label) {
      # Try also without the number
      icode <- substring(icode, 1, 1)
    }
    if (!icode %in% code$label) {
      # Finally try with "1" at the end
      icode <- paste0(icode, "1")
    }
    if (!icode %in% code$label)
      stop("Code ", icode, " not found in the .zim file for ", zim)
    # Determine the position of image in the codes
    ipos <- (1:lcodes)[code$label == icode]
    # Keep only corresponding code
    zimdat[code$pos] <- paste0("Code=", code$value[ipos])
    # Do the same for Min, Max, SubPart and SubMethod
    # Min
    Min <- getKeyValue(zimdat, "Min", multiple = TRUE)
    if (length(Min$pos) != 1)
      stop("Error in zim file '", zim, "': no or several 'Min=' entries")
    if (length(Min$value) != lcodes)
      stop("Non matching number of items for Code= and Min= entries in ", zim)
    zimdat[Min$pos] <- paste0("Min=", Min$value[ipos])
    # Max
    Max <- getKeyValue(zimdat, "Max", multiple = TRUE)
    if (length(Max$pos) != 1)
      stop("Error in zim file '", zim, "': no or several 'Max=' entries")
    if (length(Max$value) != lcodes)
      stop("Non matching number of items for Code= and Max= entries in ", zim)
    zimdat[Max$pos] <- paste0("Max=", Max$value[ipos])
    # SubPart
    SubPart <- getKeyValue(zimdat, "SubPart", multiple = TRUE)
    if (length(SubPart$pos) != 1)
      stop("Error in zim file '", zim, "': no or several 'SubPart=' entries")
    if (length(SubPart$value) != lcodes)
      stop("Non matching number of items for Code= and SubPart= entries in ",
        zim)
    zimdat[SubPart$pos] <- paste0("SubPart=", SubPart$value[ipos])
    # SubMethod
    SubMethod <- getKeyValue(zimdat, "SubMethod", multiple = TRUE)
    if (length(SubMethod$pos) != 1)
      stop("Error in zim file '", zim, "': no or several 'SubMethod=' entries")
    if (length(SubMethod$value) != lcodes)
      stop("Non matching number of items for Code= and SubMethod= entries in ",
        zim)
    zimdat[SubMethod$pos] <- paste0("SubMethod=", SubMethod$value[ipos])
    # Special treatment for 'Time' (get it and take it out of there!)
    smptime <- getKeyValue(zimdat, "Time")
    zimdat <- zimdat[-smptime$pos]
    smptime <- smptime$value
    # In case smptime is just hh:mm, add :00 to get hh:mm:ss
    if (grepl("^[0-9]{1,2}:[0-9]{2}$", smptime))
      smptime <- paste0(smptime, ":00")
    if (grepl("^[0-9]:", smptime))
      smptime <- paste0("0", smptime)
    # Just in case CellPart is missing, add it before Replicates
    # with default value 0.73
    if (!any(grepl("^CellPart=", zimdat))) {
      reppos <- (1:length(zimdat))[grepl("^Replicates=", zimdat)]
      if (length(reppos))
        zimdat[reppos] <- paste0("CellPart=0.73\n", zimdat[reppos])
    }
    # Write the modified zim file in the destination directory
    # TODO: we shouldstart to accept versions 3 and 5 all over ZI now!
    writeLines(zimdat, file.path(smpdir, sub("_dat[135]\\.zim$", "_dat1.zim",
      basename(zim))))
    # Read the color (.tif) image
    pic <- readTIFF(img)
    idat <- read.delim(zim, skip = dpos)
    idat$name <- paste(idat$Label, idat$X.Item, sep = "_")
    # Size of one pixel
    pixunit <- getKeyValue(zimdat, "PixelUnit")$value[1]
    if (is.na(pixunit) || !length(pixunit) || pixunit == "")
      pixunit <- "mm" # Default unit, if not specified
    pixsize <- as.numeric(getKeyValue(zimdat, "PixelSize")$value[1])
    if (is.na(pixsize))
      stop("Impossible to find the size of a pixel in the image ",
        img, " from ", zim)
    if (pixsize != lastpixsize) {# Recalculate coef and scale bars
      scales <- .make_scales(1 / pixsize, pixunit)
      lastpixsize <- pixsize
      # Read the three scale bar files (0.3, 0.5 and 1mm at 2400dpi)
      #scale0.3 <- readPNG(file.path(getTemp('ZIetc'),"Scale2400_0.3mm.png"))
      scale0.3 <- readPNG(scales$images[1])
      if (!is.matrix(scale0.3)) scale0.3 <- scale0.3[, , 1]
      #scale0.5 <- readPNG(file.path(getTemp('ZIetc'),"Scale2400_0.5mm.png"))
      scale0.5 <- readPNG(scales$images[2])
      if (!is.matrix(scale0.5)) scale0.5 <- scale0.5[, , 1]
      #scale1 <- readPNG(file.path(getTemp('ZIetc'),"Scale2400_1mm.png"))
      scale1 <- readPNG(scales$images[3])
      if (!is.matrix(scale1)) scale1 <- scale1[, , 1]
    }
    # Transform coordinates into pixel sizes
    idat$BX <- round(idat$BX/pixsize)
    idat$BY <- round(idat$BY/pixsize)
    idat$Width <- round(idat$Width/pixsize)
    idat$Height <- round(idat$Height/pixsize)
    # Create vignettes
    pl <- dim(pic)[2]
    ph <- dim(pic)[1]
    for (j in 1:nrow(idat)) {
      Width <- idat$Width[j]
      Height <- idat$Height[j]
      BX <- round(idat$BX[j] - Width / 4)
      BY <- round(idat$BY[j] - Height / 4)
      BX2 <- round(BX + (Width * 1.5))
      BY2 <- round(BY + (Height * 1.5))
      # Constrain bounding box inside the picture
      if (BX < 1) BX <- 0
      if (BY < 1) BY <- 1
      if (BX2 > pl) BX2 <- pl
      if (BY2 > ph) BY2 <- ph
      # Crop the picture into a new image
      if (length(dim(pic)) == 2) {# Grayscale picture
        vig <- pic[BY:BY2, BX:BX2]
      } else {# Color picture
        vig <- pic[BY:BY2, BX:BX2, ]
      }
      # Add the scale at the top-left
      if (Width * 1.5 < 50) { # Use the 0.3mm scale
        xmax <- min(30, dim(vig)[2])
        ymax <- min(16, dim(vig)[1])
        scale <- scale0.3[1:ymax, 1:xmax]
      } else if (Width * 1.5 < 100) { # Use the 0.5mm scale
        xmax <- min(50, dim(vig)[2])
        ymax <- min(16, dim(vig)[1])
        scale <- scale0.5[1:ymax, 1:xmax]
      } else {# Use the 1mm scale
        xmax <- min(100, dim(vig)[2])
        ymax <- min(16, dim(vig)[1])
        scale <- scale1[1:ymax, 1:xmax]
      }
      if (length(dim(vig)) == 2) {# Grayscale picture
        vig[1:ymax, 1:xmax][scale < 1] <- scale[scale < 1]
      } else {# Color picture
        vig[1:ymax, 1:xmax, 2][scale < 1] <- scale[scale < 1]
        sel <- scale < 1 & vig[1:ymax, 1:xmax, 3] > 0.2
        vig[1:ymax, 1:xmax, 3][sel] <- scale[sel]/2
      }
      # Write this into a png file
      writePNG(vig, file.path(smpdir, paste(idat$name[j], "png", sep = ".")))
    }
    # Delete _work files if required
    if (isTRUE(clean.work)) {
      unlink(zim)
      unlink(img)
    }
    # Done
    #cat("OK\n")
    #flush.console()
    # Before switching to another picture, or at the end, create the .zidb file
    if (i == l) {
      if (!zidbMake(smpdir, smptime = smptime, replace = TRUE,
        delete.source = TRUE))
        all_ok <- FALSE
    } else {
      nextsmp <- sub("\\+[A-Z][0-9]+_dat[135]\\.zim$", "", zims[i + 1])
      if (nextsmp != smp) {
        if (!zidbMake(smpdir, smptime = smptime, replace = TRUE,
          delete.source = TRUE))
          all_ok <- FALSE
      }
    }
  }
  invisible(all_ok)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/utilities.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
## 
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
## 
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
## 
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Check that the file is a zic file
zicCheck <- function (zicfile)
{	
	zicfile <- as.character(zicfile)
	if (!length(zicfile)) {
		warning("No zicfile provided")
		return(FALSE)
	}
	if (length(zicfile) > 1) {
		warning("testing only first zicfile")
		zicfile <- zicfile[1]
	}
	
	## This should be a .zic file directly
	if (!checkFileExists(zicfile)) return(FALSE)
	
	## First line of the file must be "ZI1", "ZI2", or "ZI3"
	if (!checkFirstLine(zicfile)) return(FALSE) 
	
	## Second line must be [path]
	Line2 <- scan(zicfile, character(), skip = 1, nmax = 2, quiet = TRUE)
	if (tolower(Line2[1]) != "[path]") {
		warning("not a ZIC file, or corrupted at line #2!")
		return(FALSE)
	}
	if (length(Line2) < 2) {
		warning("empty ZIC file is not correct")
		return(FALSE)
	} else return(TRUE)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/zic.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Check consistency of a zooimage directory before creating .zid or .zidb file
zidVerify <- function (zidir, type = c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"),
check.vignettes = TRUE)
{
	## Check the format of the file
	## This should be a directory containing XXX+YY_dat1.zim files
	## + .jpg or .png files (vignettes)
	if (any(!type %in% c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"))) {
		warning("only 'ZI1', 'ZI2', 'ZI3', 'ZI4', or 'ZI5' are currently supported for 'type'")
		return(invisible(FALSE))
	}
	## Check the list of _dat1.zim
	dat1files <- zimDatList(zidir)
	if (!length(dat1files)) {
		## Special case for the FlowCAM where the _dat1.zim file is not created
		## automatically, but all data are there to do so
		lstfile <- paste(zidir, "lst", sep = ".")
		zimfile <- paste(zidir, "zim", sep = ".")
		if (file.exists(lstfile)) {
			if (!file.exists(zimfile)) {
				warning("FlowCAM data with no '_dat1.zim' file and no '.zim' file to create it")
				return(invisible(FALSE))
			}
			## Try to create the _dat1.zim file now
			res <- zimDatMakeFlowCAM(zimfile)
			if (!res) {
				warning("cannot create the '_dat1.zim' file from FlowCAM data")
				return(invisible(FALSE))
			}
			dat1files <- zimDatList(zidir)
			if (!length(dat1files)) {
				warning("impossible to create '_dat1.zim' file!")
				return(invisible(FALSE))
			}
		} else {
			warning("no '_dat1.zim' file!")
			return(invisible(FALSE))
		}
	}
    ## Check the content of all these "_dat1.zim" files
	## and retrieve the number of items measured
	dat1files <- sort(dat1files)
	## Default to -1 for corrupted dat1 files
	nitems <- sapply(dat1files, function(x) {
		zimVerify(file.path(zidir, x))
	})
	ok <- all(nitems >= 0)
	if (!ok) {
		warning("corrupted '_dat1.zim' files: ", paste(dat1files[nitems < -1],
			collapse = ", "))
		return(invisible(FALSE))
	}
	## Check the vignettes
	if (isTRUE(as.logical(check.vignettes))) {
        ## Check that we have corresponding vignettes (XXX+YY_ZZZ.jpg/png files)
    	samples <- sub("_dat1[.]zim$", "", dat1files)
    	## Check the content of the directory for .jpg or .png files
    	for (i in 1:length(samples)) {
			## List the jpegs
    		regex <- gsub("[+]", "[+]", samples[i])
    		regex <- gsub("[.]", "[.]", regex)
    		regex2 <-  paste("^", regex, "_[0-9]+[.]jpg$", sep = "")
			vigstype <- "jpg"
    		vigs <- dir(zidir, pattern = regex2)
			if (!length(vigs)) { # Try also for .png vignettes
				regex2 <-  paste("^", regex, "_[0-9]+[.]png$", sep = "")
				vigstype <- "png"
				vigs <- dir(zidir, pattern = regex2)
			}
    		## Get their numbers, sort them, and make sure none is missing
    		n <- nitems[i]
			## If impossible to know how many items, just count vignettes
    		if (n < 1) n <- length(vigs)
			## Construct a vector with names of vignettes as they should be
    		chkvigs <- paste(samples[i], "_", 1:n, ".", vigstype, sep = "")
    		if (length(vigs) == 0 && length(chkvigs) > 0) {
				warning("no vignettes for ", samples[i])
				ok <- FALSE
            } else if (length(chkvigs) != length(vigs) ||
				!all(sort(chkvigs) == sort(vigs))) {
				warning("mismatch vignettes for ", samples[i])
				ok <- FALSE
			}
        }
    }
	invisible(ok)
}
zidVerifyAll <- function (path = ".", samples = NULL,
type = c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"), check.vignettes = TRUE)
{
	## Verify all of these directories
	if (any(!type %in% c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"))) {
		warning("only 'ZI1', 'ZI2', 'ZI3', 'ZI4', or 'ZI5' are currently supported for 'type'")
		return(invisible(FALSE))
	}
	## First, switch to that directory
	if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- "."	# Indicate we are now in the right path
	## Process the list of samples
	if (!length(samples)) {	# Compute them from path
		d <- dir(path, pattern = "^[^_]")	# All items not starting with '_'
		samples <- unique(d[file.info(d)$isdir])	# Keep only directories
	}
	## If there is no dir, exit now
	if (!length(samples)) {
		warning("There is no directories to verify in ", getwd())
		return(invisible(FALSE))
	}
	## Start the process
	smax <- length(samples)
	message("Verification of .zid content...")
	flush.console()
	ok <- batch(samples, zidVerify, type = type,
		check.vignettes = check.vignettes, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(samples),
			" samples pass verification (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Compress one sample as a single .zid zipped file
zidCompress <- function (zidir, type = c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"), check = TRUE,
check.vignettes = TRUE, replace = FALSE, delete.source = replace)
{
	## Check the format
	if (any(!type %in% c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"))) {
		warning("only 'ZI1', 'ZI2', 'ZI3', 'ZI4', or 'ZI5' are currently supported for 'type'")
		return(invisible(FALSE))
	}
	## We need to switch to the root of sample dir first for correct path
	## in the zip file
	rootdir <- dirname(zidir)
	initdir <- setwd(rootdir)
	on.exit(setwd(initdir))
	zidir <- basename(zidir) # Use only the latest dir (the "sample dir")
	## The .zid file is located in the "root" dir, same name as the
	## "sample dir", with .zid extension
	zidfile <- paste(zidir, "zid", sep = ".")
	if (!isTRUE(as.logical(replace)) && file.exists(zidfile)) {
		## It is not advised to delete source without rebuilding the .zid file
		## but it was expressly asked!
		### TODO: verify we have the same files in the .zid and initial dir
		## before deleting files!
		if (delete.source && file.exists(zidir))
			unlink(zidir, recursive = TRUE)
		return(invisible(TRUE))	# Nothing else to do
	}
	## Make sure everything is fine for this directory
	if (isTRUE(as.logical(check)))
		if (!zidVerify(zidir, type = type, check.vignettes = check.vignettes))
			return(invisible(FALSE))
	## Make sure the .RData file is created (or refreshed)
	if (!zidDatMake(zidir, type = type, replace = replace))
		return(NULL)
	## Do compress the directory in the .zip file
	## Copy or move all corresponding files to a .zid zip-compressed file
	res <- zip(zidfile, zidir, flags = "-rq9X")
	## Do we delete sources?
	if (isTRUE(as.logical(delete.source)))
		unlink(zidir, recursive = TRUE)
	invisible(res != 0)
}
## Compress all data in the corresponding directory
zidCompressAll <- function (path = ".", samples = NULL,
type = c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"), check = TRUE, check.vignettes = TRUE,
replace = FALSE, delete.source = replace)
{
	if (any(!type %in% c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"))) {
		warning("only 'ZI1', 'ZI2', 'ZI3', 'ZI4', or 'ZI5' are currently supported for 'type'")
		return(invisible(FALSE))
	}
	## First, switch to that directory
	if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- "."	# Indicate we are now in the right path
	## Get the list of samples to process
	if (!length(samples)) {	# Compute them from path
		d <- dir(path, pattern = "^[^_]")	# All items not starting with '_'
		samples <- unique(d[file.info(d)$isdir])	# Keep only directories
	}
	## If there is no dir, exit now
	if (!length(samples)) {
		warning("there is no directories to process in ", getwd())
		return(invisible(FALSE))
	}
	## Start the process
	if (isTRUE(as.logical(check)))
		if (!zidVerifyAll(path = path, samples = samples,
			check.vignettes = check.vignettes))
			return(invisible(FALSE))
	## Compress these files
	message("Compression of .zid data...")
	flush.console()
	ok <- batch(samples, zidCompress, type = type, check = FALSE,
			check.vignettes = check.vignettes, replace = replace,
			delete.source = delete.source, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(samples),
			" items were compressed in .zid files (see .last.batch)")
		invisible(FALSE)
	} else {
		## Possibly clean the whole directory (move .zim files to \_raw
		## and delete the \_work subdir if everything is fine
		zidClean(path = path, samples = samples)
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Clean Zid (eliminate the _work subdirectory and move initial data to _raw)
zidClean <- function (path = ".", samples = NULL)
{
	## Do we have samples to process
    if (!length(samples)) return(invisible(FALSE))
    ## First, switch to that directory
    if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	## Identify paths
	message("Cleaning directory...")
	zimfiles   <- zimList( "." )
	zimsamples <- sub("^(.*)[+].+", "\\1", zimfiles)
    ## Keep only those .zim files related to samples
    zimfiles <- zimfiles[zimsamples %in% samples]
	## Process
    if (length(zimfiles)) {
        rawdir <- file.path(".", "_raw")
		## If the _raw subdirectory does not exists, create it
        if (!file.exists(rawdir)) dir.create(rawdir)
		copyto <- file.path(".", "_raw", zimfiles)
		## Move these .zim files
        for (i in 1:length(zimfiles))
			file.rename(zimfiles[i], copyto[i])
    }
    ## Delete completely the _work subdirectory
    unlink(file.path(".", "_work"), recursive = TRUE)
	return(invisible(TRUE))
}
## Uncompress a .zid file to get all its content.
## Use 'delete.source = TRUE' with caution!
zidUncompress <- function (zidfile, path = dirname(zidfile),
delete.source = FALSE)
{
	## Check if the file provided is a .zid file, and if it exists
	if (!checkFileExists(zidfile, extension = "zid"))
		return(invisible(FALSE))
	message("Unzipping '", zidfile, "' ...")
	## Uncompress it
	if (!length(tryCatch(unzip(zidfile, overwrite = FALSE, exdir = path),
			error = function (e) warning(e),
			warning = function (w) return()))) {
		message("    ... not done!")
		return(invisible(FALSE))
	}
	## Do we delete sources?
	if (isTRUE(as.logical(delete.source))) unlink(zidfile)
	## Invisibly indicate success
	invisible(TRUE)
}
## Uncompress all .zid files in the 'path.extract' directory
zidUncompressAll <- function (path = ".", zidfiles = zidList(path,
full.names = TRUE), path.extract = path, skip.existing.dirs = TRUE,
delete.source = FALSE)
{
	## Initial checks
	if (!length(zidfiles)) {
        warning("no ZID files!")
		return(invisible(FALSE))
	}
	## Start the process
	ok <- TRUE
	## Check that dirs / files with corresponding names exist in path.extract
	checkdirs  <- file.path(path.extract, noExtension(zidfiles))
	fileExists <- file.exists(checkdirs) & !file.info(checkdirs)$isdir
	dirExists  <- file.exists(checkdirs) & file.info(checkdirs)$isdir
	## If any file not being a dir exist there, stop the process
	if (any(fileExists)) {
        warning("one or several files have same name as uncompressed dirs!")
		return(invisible(FALSE))
	}
	## Should we eliminate files whose corresponding dirs exist?
	if (skip.existing.dirs && any(dirExists)) {
		cat(sum(dirExists), "file(s) already uncompressed skipped!\n")
        warning(paste("Skipping already uncompressed file(s):",
			paste(zidfiles[dirExists], collapse = ",")))
	}
	zidfiles <- zidfiles[!dirExists]
	## Decompress the files remaining in the list
	smax <- length(zidfiles)
	if (!length(zidfiles)) {
		message("-- Done! - (nothing to decompress)")
		return(invisible(TRUE))
	}
	## Uncompress these files
	message("Decompression of ZID archives...")
	flush.console()
	ok <- batch(zidfiles, zidUncompress, path = path.extract,
		delete.source = delete.source, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(zidfiles),
			" ZID files were uncompressed (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Make a .RData file that collates together data from all the "_dat1.zim",
## "_dat3.zim" and "_dat5.zim" files of a given sample
zidDatMake <- function (zidir, type = "ZI5", replace = FALSE)
{
    if (any(!type %in% c("ZI1", "ZI2", "ZI3", "ZI4", "ZI5"))) {
		warning("only 'ZI1', 'ZI2', 'ZI3', 'ZI4' or 'ZI5' are currently supported for 'type'")
		return(invisible(FALSE))
	}
    ## Here, we still keep the _dat1.RData format for backward compatibility!
    RDataFile <- file.path(zidir, paste0(basename(zidir), "_dat1.RData"))
    ## File already exists
    if (file.exists(RDataFile) && !replace)
        return(invisible(TRUE))
	ok <- TRUE
    dat1files <- zimDatList(zidir)
    ## Create _dat5.zim file if it is missing (for FlowCAM data)
    if (!length(dat1files)) {
        SmpDir <- dirname(zidir)
        zimDatMakeFlowCAM(file.path(SmpDir,
			paste(basename(zidir), "zim", sep = ".")))
        dat1files <- zimDatList(zidir)
        if (!length(dat1files)) {
            warning("no '_dat1.zim', '_dat3.zim', or '_dat5.zim' file!")
			return(invisible(FALSE))
		}
    }
    dat1files <- sort(dat1files)
    #Here, I want to use separate settings image-by-image, and not only fraction-by-fraction!
    #fractions <- sampleInfo(dat1files, "fraction")
    fractions <- sampleInfo(dat1files, "image")
    ## Avoid collecting duplicate informations about fractions
    fracdup <- duplicated(fractions)
    results <- lapply(seq.int(1, length(dat1files)), function (i) {
        dat1path <- file.path(zidir, dat1files[i])
        if (!isZim(dat1path)) return(invisible(FALSE))
        ## Read the header
        Lines <- scan(dat1path, character(), sep = "\t", skip = 1,
            blank.lines.skip = FALSE, flush = TRUE, quiet = TRUE,
            comment.char = "#")
        if (length(Lines) < 1) {
            warning( dat1files[i], " is empty, or is corrupted")
            return(invisible(FALSE))
        }
        ## Trim leading and trailing spaces in Lines
        Lines <- trimString(Lines)
        ## Convert underscore to space
        Lines <- underscoreToSpace(Lines)
        ## Determine start of the measurements table (it is '[Data]' header)
        endhead <- tail(which(Lines == "[Data]"), 1)
        if (!is.null(endhead) && endhead > 1)
            Lines <- Lines[seq.int(1, endhead - 1)]
        ## Decrypt all lines, that is, split on first occurrence
		## of "=" into 'tag', 'value' and separate into sections
        if (!is.null(Lines))
            meta <- parseIni(Lines, sub("_dat[135][.]zim$", "", fractions[i]))
        if (!is.null(endhead)) {
            mes <- read.table(dat1path, header = TRUE, sep = "\t",
                dec = ".", as.is = FALSE, skip = endhead + 1,
                comment.char = "#", na.strings = "null")
            ## We have several problems here:
            ## 1) There is sometimes a column full of NAs at the end.
            ##    This is because ImageJ adds an extra tab at the end of the line.
            ## [RF] FIXME: this should not be the case anymore because we have
            ## more control on what ImageJ is doing
			## [PhG] We keep this here anyway for old datasets!
            if (all(is.na(mes[, ncol(mes)])))
                mes <- mes[, -ncol(mes)]
            ## 2) The first column is the 'Item', but its name '!Item' is
            ##    transformed into 'X.Item'
            ## 3) The '%Area' is transformed into 'X.Area'
            Names <- names(mes)
            if (Names[1] == "X.Item") Names[1] <- "Item"
            if ("X.Area" %in% Names) Names[Names == "X.Area"] <- "PArea"
            ## Invert 'Item' and 'Label'
            mes <- mes[, c(2, 1, 3:ncol(mes))]
            Names <- Names[c(2, 1, 3:length(Names))]
            names(mes) <- make.names(Names, unique = TRUE)
            Sub <- meta$Subsample
            # A workaround, just in case CellPart or Replicates are missing,
            # then take respectively 0.73 and 1
            if (is.null(Sub$CellPart)) Sub$CellPart <- 0.73
            if (is.null(Sub$Replicates)) Sub$Replicates <- 1
			Sub$Dil <- 1/(Sub$SubPart * Sub$CellPart * Sub$Replicates *
                Sub$VolIni)
            mes$Dil <- rep(Sub$Dil[Sub$Label == fractions[i]], nrow(mes))
        } else {
            mes <- NULL
        }
        list(meta = meta, mes = mes)
    })
    notnull.filter <- Negate(is.null)
    results <- Filter(notnull.filter, results)
    list.allmeta <- Filter(notnull.filter, lapply(results, "[[", "meta"))
    list.allmes <- Filter(notnull.filter, lapply(results, "[[", "mes"))
	listMerge <- function (x, y) {
		xitems <- names(x)
		yitems <- names(y)
		xandy <- xitems[xitems %in% yitems]
		xonly <- xitems[!(xitems %in% xandy)]
		yonly <- yitems[!(yitems %in% xandy)]
		## First merge common items
		if (length(xandy) > 0) {
			res <- lapply(xandy, function (item) {
				merge(x[[item]], y[[item]], all = TRUE)
			})
			names(res) <- xandy
		} else {
			res <- list()
		}
		if (length(xonly) > 0) res[xonly] <- x[xonly]
		if (length(yonly) > 0) res[yonly] <- y[yonly]
		res
	}
    list.allmeta <- list.allmeta[!fracdup] # only the levels of not duplicated metadata
    lmeta <- length(list.allmeta[])
    allmeta <- list.allmeta[[1]]
    if (lmeta > 1) {
      for (i in 2:lmeta)
        allmeta <- listMerge(allmeta, list.allmeta[[i]])
    }
    listCombine <- function (lst) {
      force(lst)
      mergefun <- function (x, y) {
        if (all(sort(names(x)) == sort(names(y)))) {
          rbind(x, y)
        } else {
          merge(x, y, all = TRUE)
        }
      }
      Reduce(mergefun, lst)
    }
    allmes <- listCombine(list.allmes)
    rownames(allmes) <- 1:nrow(allmes)
    Names <- names(allmes)
	## Calculate an ECD from Area (or FIT_Area_ABD) if there is not one yet
    if (!"ECD" %in% Names) {
		if ("FIT_Area_ABD" %in% Names) { # This is FlowCAM data!
			ECD <- ecd(allmes$FIT_Area_ABD)
			allmes <- data.frame(allmes[, 1:2], ECD = ECD,
				allmes[, 3:ncol(allmes)])
		} else if ("Area" %in% Names) { # All other cases
			ECD <- ecd(allmes$Area)
			allmes <- data.frame(allmes[, 1:2], ECD = ECD,
				allmes[, 3:ncol(allmes)])
		}
	}
    attr(allmes, "metadata") <- allmeta
    class(allmes) <- c("ZI5Dat", "ZIDat", "data.frame")
    ZI.sample <- allmes
    save(ZI.sample, file = RDataFile, ascii = FALSE, version = 2,
		compress = TRUE)
    if (ok) ok <- file.exists(RDataFile)
	if (!ok) warning("problem creating the RData file")
    return(invisible(ok))
}
## Read the .Rdata in a .zid file or corresponding directory
zidDatRead <- function (zidfile)
{
	## Identify the file and stop if it does not exists
	sample <- noExtension(zidfile)
	RdataFile <- paste0(sample, "_dat1.RData")
	deletefile <- FALSE
	if (!checkFileExists(zidfile, message = "%s not found!")) return(NULL)
	## Treat different kind of files
	if (!hasExtension(zidfile, "zid")) {
		# Is it a directory?
		if (file.info(zidfile)$isdir) {
			# Is there a .RData file in this directory?
			rdata <- file.path(zidfile, RdataFile)
			if (!file.exists(rdata)) {
				# Try to create it
				zidDatMake(zidfile)
				if (!checkFileExists(rdata,
					message = "error creating the RData file"))
					return(NULL)
			}
		} else {
			warning("unrecognized file: ", zidfile)
			return(NULL)
		}
	} else {  # This is a .zid file
		rdata <- file.path(sample, RdataFile)
		zidExtract <- function (file, zidfile) {
			tmpd <- tempdir()
			unzip(zidfile, file, exdir = tmpd, overwrite = TRUE,
				junkpaths = TRUE)
			res <- file.path(tmpd, basename(file))
			if (file.exists(res)) res else NULL
		}
		rdata <- zidExtract(rdata, zidfile)
		if (!length(rdata)) {
			warning("error reading RData file from ", basename(zidfile))
			return(NULL)
		}
		deletefile <- TRUE
	}
	## Load that file
	ZI.sample <- NULL
	load(rdata)
	## Fix ECD in case of FIT_VIS data
	if ("FIT_Area_ABD" %in% names(ZI.sample))
		ZI.sample$ECD <- ecd(ZI.sample$FIT_Area_ABD)
	## Delete the file
	if (deletefile) {
		unlink(rdata)
		# If the directory is empty, delete it also
		datadir <- file.path(tempdir(), sample)
		if (file.exists(datadir) && !length(dir(datadir)))
			unlink(datadir)
	}
	## Set the class
	if (!inherits(ZI.sample, "ZIDat") && inherits(ZI.sample, "data.frame"))
		class(ZI.sample) <- c("ZI5Dat", "ZIDat", "data.frame")
	return(ZI.sample)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/zid.R 
 | 
					
	## Copyright (c) 2015, Ph. Grosjean <[email protected]> & K. Denis
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Make a ZooImage database file for one sample
zidbMake <- function (zidir,
zidbfile = paste0(sub("[/\\]+$", "", zidir), ".zidb"),
zisfile = file.path(dirname(zidir), "Description.zis"), type = "ZI5",
smptime = "", check = FALSE, check.vignettes = FALSE, replace = FALSE,
delete.source = replace)
{
	## Check the format
	if (!type %in% c("ZI1", "ZI3", "ZI5")) {
		warning("only 'ZI1', 'ZI3', or 'ZI5' are currently supported for 'type'")
		return(invisible(FALSE))
	}
	if (!isTRUE(as.logical(replace)) && file.exists(zidbfile)) {
		## Nothing to do... the file already exists
		if (isTRUE(as.logical(delete.source)) &&
			file.exists(zidir) && file.info(zidir)$isdir)
			unlink(zidir, recursive = TRUE)
		return(invisible(TRUE))	# Nothing else to do
	}
	## Make sure everything is fine for this directory
	if (isTRUE(as.logical(check)))
		if (!zidVerify(zidir, type = type, check.vignettes = check.vignettes)) {
			warning("problem when verifying the ZID data")
		  return(invisible(FALSE))
		}
	## Make sure the .RData file is created (or refreshed)
	if (!zidDatMake(zidir, type = type, replace = replace)) {
		warning("problem when creating the RData file")
	  return(invisible(FALSE))
	}
    ## List all vignettes
    Vigs <- dir(zidir, pattern = "\\.jpg$", full.names = TRUE)
	## Maybe the vignettes are in .png format...
	if (!length(Vigs)) {
		Vigs <- dir(zidir, pattern = "\\.png$", full.names = TRUE)
		VigType <- "png"
	} else VigType <- "jpg"
	if (!length(Vigs)) {
		warning("No vignettes found (JPEG or PNG files)")
		return(invisible(FALSE))
	}
    ## List all .zim files
    Zims <- dir(zidir, pattern = "\\.zim$", full.names = TRUE)
	if (!length(Zims)) {
		warning("No ZIM files found!")
		return(invisible(FALSE))
	}
	## Make sure data from the .zis file are correct
	if (!checkFileExists(zisfile, "zis", force.file = TRUE)) {
		warning("No ZIS file (Description.zis usually) file found")
	  return(invisible(FALSE))
	}
	zisData <- zisRead(zisfile)
	# Replace <<<SMP>>>, <<<DATE>>> and <<<TIME>>> by the correct values
	# in case we got only one entry
	if (length(zisData$Label) == 1) {
    if (zisData$Label == "<<<SMP>>>")
      zisData$Label <- basename(zidir)
    if (zisData$Date == "<<<DATE>>>")
      zisData$Date <- sampleInfo(basename(zidir),  type = "date")
      #zisData$Date <- as.Date(sub("^.+\\.([0-9][0-9][0-9][0-9]-[0-1][0-9]-[0-3][0-9])\\..+$", "\\1", basename(zidir)))
    if (zisData$Time == "<<<TIME>>>")
      zisData$Time <- smptime
	}
	isSample <- (zisData$Label == basename(zidir))
	if (!length(isSample) || sum(isSample) < 1) {
		warning("Incorrect .zis file, or the file does not contain data for this sample")
		return(invisible(FALSE))
	}
    ## Extract data for this sample
	zisData <- zisData[isSample, ]
	## TODO: may be check that a minimum set of variables is there...
    ## Create the .zidb file: put all vignettes there, plus the .RData file
	message("Creating the ZIDB file...")
    filehashOption(defaultType = "DB1")
    unlink(zidbfile)
    dbCreate(zidbfile)
    db <- dbInit(zidbfile)
    ## Indicate which zooimage version and which image type we use
    dbInsert(db, ".ZI", 5)
    dbInsert(db, ".ImageType", VigType)
    ## Read each vignette in turn and add it to the database
	message("Adding vignettes to ZIDB file...")
    VigExt <- paste0("\\.", VigType, "$")
	for (i in 1:length(Vigs)) {
    	Vig <- Vigs[i]
    	VigName <- sub(VigExt, "", basename(Vig))
    	VigSize <- file.info(Vig)$size
		if (is.na(VigSize)) {
			warning("file '", Vig, "' not found, or of null length")
			return(invisible(FALSE))
		}
    	dbInsert(db, VigName, readBin(Vig, "raw", VigSize + 100))
    }
    ## Add .zim files to db
	message("Adding data from ZIM files to ZIDB file...")
    for (i in 1:length(Zims)) {
    	Zim <- Zims[i]
    	ZimName <- sub("\\.zim$", "", basename(Zim))
    	ZimSize <- file.info(Zim)$size
		if (is.na(ZimSize)) {
			warning("file '", Zim, "' not found or of null length")
			return(invisible(FALSE))
		}
    	dbInsert(db, ZimName, readBin(Zim, "raw", ZimSize + 100))
    }
    ## Add zis info to db
	message("Adding sample data to ZIDB file...")
	dbInsert(db, ".SampleData", zisData)
    ## Add the data frame with all data and metadata to the file
	message("Adding R data to ZIDB file...")
    zidat <- file.path(zidir, paste0(basename(zidir), "_dat1.RData"))
    ## Check also other variants
	if (!file.exists(zidat))
		zidat <- file.path(zidir, paste0(basename(zidir), "_dat3.RData"))
	if (!file.exists(zidat))
		zidat <- file.path(zidir, paste0(basename(zidir), "_dat5.RData"))
	obj <- load(zidat)
	if (length(obj) != 1) {
		warning("Error loading ", zidat)
		return(invisible(FALSE))
	}
    dat <- get(obj)
	## Fix ECD in case of FIT_VIS data
	if ("FIT_Area_ABD" %in% names(dat)) dat$ECD <- ecd(dat$FIT_Area_ABD)
	dbInsert(db, ".Data", dat)
	## Do we delete sources?
    if (isTRUE(as.logical(delete.source)))
        unlink(zidir, recursive = TRUE)
	message("-- Done! --")
	## Indicate success...
	invisible(TRUE)
}
## Make all .zidb files for data in the corresponding directory
zidbMakeAll <- function (path = ".", samples,
zisfiles = file.path(dirname(samples), "Description.zis"), type = "ZI5",
check = FALSE, check.vignettes = FALSE, replace = FALSE, delete.source = replace)
{
	if (!type %in%  c("ZI1", "ZI3", "ZI5"))
		stop("only 'ZI1', 'ZI3', or ZI5' are currently supported for 'type'")
	## First, switch to that directory
	if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- "."	# Indicate we are now in the right path
	## Get the list of samples to process
	if (missing(samples) || !length(samples)) {	# Compute them from path
		## All dirs not starting with '_'
		dirs <- dir(path, pattern = "^[^_]", full.names = TRUE)
		samples <- unique(dirs[file.info(dirs)$isdir]) # Keep only directories
	}
	## If there is no dir, exit now
	if (!length(samples)) {
		warning("there are no directories to process in ", getwd())
		return(invisible(FALSE))
	}
	## Check zisfiles and make sure the vector has same length as samples
	## possibly recycling the file(s)
	zisfiles <- as.character(zisfiles)
	if (!length(zisfiles)) {
		warning("You must provide at least one ZIS file with samples characteristics")
		return(invisible(FALSE))
	}
	if (!checkFileExists(zisfiles, "zis", force.file = TRUE))
		return(invisible(FALSE))
	zisfiles <- rep(zisfiles, length.out = length(samples))
	## Possibly verify the files
	if (isTRUE(as.logical(check)))
		if (!zidVerifyAll(path = path, samples = samples,
			check.vignettes = check.vignettes))
			return(invisible(FALSE))
	## Create the .zidb files
	message("Creation of ZIDB files...")
	flush.console()
	zidbMakeOne <- function (item, samples, zisfiles, type, check.vignettes,
		replace, delete.source)
		zidbMake(samples[item], zisfile = zisfiles[item], type = type,
			check = FALSE, check.vignettes = check.vignettes, replace = replace,
			delete.source = delete.source)
	items <- 1:length(samples)
	ok <- batch(items, zidbMakeOne, samples = samples, zisfiles = zisfiles,
			type = type, check.vignettes = check.vignettes, replace = replace,
			delete.source = delete.source, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(samples),
			" items were correctly processed (see .last.batch)")
		invisible(FALSE)
	} else {
		## Possibly clean the whole directory (move .zim files to \_raw
		## and delete the \_work subdir if everything is fine
		zidClean(path = path, samples = samples)
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Convert .zid file to .zidb file
zidToZidb <- function (zidfile, zisfile = file.path(dirname(zidfile),
"Description.zis"), replace = FALSE, delete.source = replace)
{
    if (!file.exists(paste0(zidfile, "b")) || isTRUE(as.logical(replace))) {
		ZidDir <- sub("\\.zid$", "", zidfile)
		IniDir <- dirname(zidfile)
		## Unzip the file...
		message("Unzipping ZID file '", basename(zidfile), "' ...")
		if (!length(tryCatch(unzip(zidfile, overwrite = replace,
			junkpaths = FALSE, exdir = IniDir), error = function (e) warning(e),
			warning = function (w) return()))) {
			message("    ... not done!")
			return(invisible(FALSE))
		}
		## Make sure ZidDir is created...
		if (!checkDirExists(ZidDir,
			message = 'expected unzipped dir "%s" not found'))
			return(invisible(FALSE))
		## Create the .zidb file
		res <- zidbMake(zidir = ZidDir, type = "ZI3", check = TRUE,
			check.vignettes = TRUE, replace = replace,
			delete.source = delete.source)
	} else res <- TRUE
	# Do we have to delete the zidfile?
	if (res && isTRUE(as.logical(delete.source))) unlink(zidfile)
	message("-- Done! --")
	invisible(res)
}
## Convert all .zid files to .zidb files
zidToZidbAll <- function (path = ".", zidfiles, zisfiles =
file.path(dirname(zidfiles), "Description.zis"), replace = FALSE,
delete.source = replace)
{
    ## First, switch to that directory
	if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- "."	# Indicate we are now in the right path
	## Get the list of zidfiles to process
	if (missing(zidfiles) || !length(zidfiles))	# Compute them from path
		zidfiles <- dir(path, pattern = extensionPattern("zid"),
			full.names = TRUE) # All .zid files
	## If there is no zidfiles in this dir, exit now
	if (!length(zidfiles)) {
		warning("There is no ZID files to process in ", getwd())
		return(invisible(FALSE))
	}
	## Make sure there is no path associated
	#if (!all(zidfiles == basename(zidfiles))) {
	#	warning("You cannot provide paths for ZID files, just file names")
	#	return(invisible(FALSE))
	#}
	## Check zisfiles and make sure the vector has same length as zidfiles
	## possibly recycling the file(s)
	zisfiles <- as.character(zisfiles)
	if (!length(zisfiles)) {
		warning("You must provide at least one ZIS file with samples characteristics")
		return(invisible(FALSE))
	}
	if (!checkFileExists(zisfiles, "zis", force.file = TRUE))
		return(invisible(FALSE))
	zisfiles <- rep(zisfiles, length.out = length(zidfiles))
	## Create the .zidb files from the .zid files
	message("Conversion of ZID to ZIDB files...")
	flush.console()
	zidConvertOne <- function (item, zidfiles, zisfiles, replace, delete.source)
		zidToZidb(zidfiles[item], zisfile = zisfiles[item], replace = replace,
			delete.source = delete.source)
	items <- 1:length(zidfiles)
	ok <- batch(items, zidConvertOne, zidfiles = zidfiles, zisfiles = zisfiles,
		replace = replace, delete.source = delete.source, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(zidfiles),
			" items were correctly converted (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Convert a .zidb file to a .zid file
zidbToZid <- function (zidbfile, zisfile = file.path(dirname(zidbfile),
"Description.zis"), replace = FALSE, delete.source = replace)
{
	zidfile <- paste(basename(zidbfile), "zid", sep = ".")
	if (!isTRUE(as.logical(replace)) && file.exists(zidfile)) {
		## It is not advised to delete source without rebuilding the .zid file
		## but it was expressly asked!
		### TODO: verify we have the same data in the .zid and .zidb files
		### before deleting the .zidb file!
		if (delete.source && file.exists(zidbfile))
			unlink(zidbfile)
		return(invisible(TRUE))	# Nothing else to do
	}
	if (!file.exists(zidfile) || isTRUE(as.logical(replace))) {
		ZidDir <- sub("\\.zidb$", "", zidbfile)
		## Create the directory to extract data
		dir.create(ZidDir)
		## Link database to objects in memory
		Zidb <- zidbLink(zidbfile)
		## All files in Zidb
		AllFiles <- ls(Zidb) # List vars not starting with . => zims + vignettes
		# .zim files
		isZimFile <- grep("_dat.$", AllFiles)
		ZimNames <- AllFiles[isZimFile]
		message("Extracting data from ZIM files...")
		for (ZimName in ZimNames)
		    writeBin(Zidb[[ZimName]],
				con = file.path(ZidDir, paste0(ZimName, ".zim")))
		## Vignettes
		VignNames <- AllFiles[-isZimFile]
		message("Extracting vignettes...")
		extension <- Zidb$.ImageType
		for(i in 1:length(VignNames)){
		    writeBin(Zidb[[VignNames[i]]],
				con = file.path(ZidDir, paste(VignNames[i], extension,
				sep = ".")))
		}
		# Rdata
		ZI.sample <- Zidb$.Data
		message("Extracting Rdata file...")
		save(ZI.sample, file = file.path(ZidDir, paste(sub(".zidb", "",
			basename(zidbfile)), "_dat1.RData", sep = "")))
		# .zis data
		message("Extraction of ZIS data not supported yet...")
		## TODO...
		# Create zid file
		message("Compressing ZID file...")
		res <- zidCompress(zidir = ZidDir, type = "ZI3", check = FALSE,
			check.vignettes = FALSE, replace = replace, delete.source = TRUE)
	} else res <- TRUE
	# Do we have to delete the zidbfile?
	if (res && isTRUE(as.logical(delete.source))) unlink(zidbfile)
	message("-- Done! --")
	invisible(res)
}
# Convert .zidb files to .zid files
zidbToZidAll <- function (path = ".", zidbfiles, zisfiles =
file.path(dirname(zidbfiles), "Description.zis"), replace = FALSE,
delete.source = replace)
{
    ## First, switch to that directory
	if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- "."	# Indicate we are now in the right path
	## Get the list of zidbfiles to process
	if (missing(zidbfiles) || !length(zidbfiles))	# Compute them from path
		zidbfiles <- dir(path, pattern = extensionPattern("zidb"),
			full.names = TRUE) # All .zidb files
	## If there is no zidbfiles in this dir, exit now
	if (!length(zidbfiles)) {
		warning("There is no ZIDB files to process in ", getwd())
		return(invisible(FALSE))
	}
	## Make sure there is no path associated
	#if (!all(zidbfiles == basename(zidbfiles))) {
	#	warning("You cannot provide paths for .zidb files, just file names")
	#	return(invisible(FALSE))
	#}
	## Check zisfiles and make sure the vector has same length as zidbfles
	## possibly recycling the file(s)
	zisfiles <- as.character(zisfiles)
	if (!length(zisfiles)) {
		warning("You must provide at least one ZIS file with samples characteristics")
		return(invisible(FALSE))
	}
	if (!checkFileExists(zisfiles, "zis", force.file = TRUE))
		return(invisible(FALSE))
	zisfiles <- rep(zisfiles, length.out = length(zidbfiles))
	## Create the .zid files from the .zidb files
	message("Conversion of ZIDB to ZID files...")
	flush.console()
	zidConvertOne <- function (item, zidbfiles, zisfiles, replace, delete.source)
		zidbToZid(zidbfiles[item], zisfile = zisfiles[item], replace = replace,
			delete.source = delete.source)
	items <- 1:length(zidbfiles)
	ok <- batch(items, zidConvertOne,zidbfiles = zidbfiles, zisfiles = zisfiles,
		replace = replace, delete.source = delete.source, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(zidbfiles),
			" items were correctly converted (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Link the database to R objects
zidbLink <- function (zidbfile)
	db2env(dbInit(zidbfile))
## Read only Rdata file from a .zidb database
zidbDatRead <- function (zidbfile) {
	res <- zidbLink(zidbfile)$.Data
	## Fix ECD in case of FIT_VIS data
	if ("FIT_Area_ABD" %in% names(res)) res$ECD <- ecd(res$FIT_Area_ABD)
	res
}
## Read only the sample data
zidbSampleRead <- function (zidbfile)
	zidbLink(zidbfile)$.SampleData
## Get a quick numerical summary for a zidb file
zidbSummary <- function(zidbfile, n = 3) {
  cat("== Sample metadata ==\n")
  print(zidbSampleRead(zidbfile))
  dat <- zidbDatRead(zidbfile)
  cat("\n")
  print(attr(dat, "metadata"))
  if (n > 0) {
    cat("\n== Sample data (first ", n, " lines on ", nrow(dat), ") ==\n",
      sep = "")
    print(head(dat, n = n))
  }
  invisible(dat)
}
## Functions to plot a collage
zidbPlotNew <- function (main = "ZooImage collage", ...)
{
	par(mfrow = c(1, 1), mar = c(0.1, 0.1, 2.1, 0.1))
	plot(0:1, 0:1, type = "n", xaxt = "n", yaxt = "n", xlab = "", ylab = "",
		xaxs = "i", yaxs = "i", xlim = 0:1, ylim = 0:1, bty = "o",
		main = main, ...)
}
## Function to get a vignette from the database, rescale it and draw it in its
## corresponding vignette item
zidbDrawVignette <- function(rawimg, type, item, nx = 5, ny = 5,
vmar = 0.01)
{
	if (missing(type)) type <- "guess"
  ## Centers for each vignette, on a graph area of [0, 1] on x and y
	nv <- nx * ny
	## Coordinates for centers of each vignette area
	xc <- (1:nx) / nx - 1/(nx * 2)
	yc <- (ny:1) / ny - 1/(ny * 2) # Because we want to start at the top and it
	## is the higher coord x and y coordinates for each vignette (fill from left
	## to right and top to bottom)
	vcoord <- expand.grid(list(x = xc, y = yc))
	## Half width and half height of a vignette area
	vhw <- ((xc[2] - xc[1]) - vmar) / 2
	vhh <- ((yc[1] - yc[2]) - vmar) / 2
	## Coordinates of top-left and bottom-right for vignettes areas
	vtl <- vcoord
	vtl$x <- vtl$x - vhw
	vtl$y <- vtl$y + vhh
	vbr <- vcoord
	vbr$x <- vbr$x + vhw
	vbr$y <- vbr$y - vhh
	## rawimg is a raw object containing JPEG or PNG data
	## item is the number of vignette area where to draw the vignette
	item <- as.integer(item[1])
	if (item < 1 || item > length(vtl$x)) stop("Wrong vignette item number")
	## Conversion from a raw object to a displayable image is done using
	## readPNG() or readJPEG() from the png/jpeg packages... For fast
	## processing, use native format, but 16bit not accepted for PNG and there
	## is a problem in case of transparency channel (if any) in PNG images on
	## windows devices
	if (type == "guess") {
	  vigimg <- try(readPNG(rawimg, native = TRUE), silent = TRUE)
	  if (inherits(vigimg, "try-error"))
	    vigimg <- readJPEG(rawimg, native = TRUE)
	} else if (type == "png") {
		vigimg <- readPNG(rawimg, native = TRUE)
	} else vigimg <- readJPEG(rawimg, native = TRUE)
	vigdim <- dim(vigimg) # Dimensions of the image in pixels
	## Determine top-left and bottom-right points of vignette bounding rectangle
	## for optimum display...
	## top-left point is always from the grid
	xleft <- vtl$x[item]
	ytop <- vtl$y[item]
	## Size of internal collage area (which is [0,1] both in x and y) in pixels
	totpx <- dev.size(units = "px")
	plt <- par("plt")
	totpx[1] <- totpx[1] * (plt[2] - plt[1]) # Width of collage area in pixels
	totpx[2] <- totpx[2] * (plt[4] - plt[3]) # Height of collage are in pixels
	## Size of vignette areas in pixels
	vwpx <- vhw * 2 * totpx[1]
	vhpx <- vhh * 2 * totpx[2]
	## If the vignette is smaller than the area, it fits without rescaling!
	if (vigdim[2] <= vwpx && vigdim[1] <= vhpx) {
		xright <- xleft + 2 * vhw / vwpx * vigdim[2]
		ybottom <- ytop - 2 * vhh / vhpx * vigdim[1]
	} else { # We need to rescale down the vignette to fit it in the area
		## Which dimension will fit the whole area?
		vigratio <- vigdim[2] / vigdim[1]
		arearatio <- vwpx / vhpx
		if (vigratio < arearatio) { # Fit height
			ybottom <- ytop - (2 * vhh)
			xright <- xleft + (2 * vhh * vigratio / arearatio)
		} else { # Fit width
			xright <- xleft + (2 * vhw)
			ybottom <- ytop - (2 * vhw / vigratio * arearatio)
		}
	}
	## Interpolation only works outside of windows!
	interpolate <- (names(dev.cur()) != "windows")
	## Note that if there is a transparency layer, a special treatment
	## is required for windows devices, see ?readPNG
	## Now, display that vignette in the collage
	rasterImage(vigimg, xleft, ybottom, xright, ytop, interpolate = interpolate)
}
## Plot vignettes page by page (25, 5x5 each page)
zidbPlotPage <- function(zidbfile, page = 1, title = NULL, type = "guess",
method = NULL, class = NULL) {
  db <- zidbLink(zidbfile)
  # Default title
  if (is.null(title))
    title <- zidbSampleRead(zidbfile)$Label
  # Get the list of all vignettes for this sample
  items <- ls(db)
  vigs <- items[-grep("_dat1", items)]
  # Do we use data from a _valid.RData file, and sort for given class(es)?
  valid_file <- NULL
  if (!is.null(method)) {
    # Look for a _valid.RData file there
    valid_file <- file.path(dirname(zidbfile), "_analyses", method,
      paste0(sub("\\.zidb$", "", basename(zidbfile)), "_valid.RData"))
  }
  if (!is.null(valid_file)) {
    if (!file.exists(valid_file)) {
      stop("file '", valid_file, "' not found")
    } else {
      obj <- load(valid_file)
      valid <- get(obj)
      # Only keep objects in class
      if (!is.null(class)) {
        valid <- valid[valid$Class %in% class, ]
        if (!length(valid)) {
          stop("No items found for 'class' in 'valid_file'")
        } else {
          # Only keep these items
          keep <- valid$Id
          if (!all(keep %in% vigs)) {
            stop("Mismatch between the .zidb file and the _valid.RData file")
          } else {
            vigs <- as.character(keep)
          }
        }
      }
    }
  }
  l <- length(vigs)
  if (l < 1)
    stop("No items in class, or class not found in this sample")
  pages <- ceiling(l / 25)
  if (!is.numeric(page) || page < 1 || page > pages)
    stop("'page' must be a number between 1 and ", pages)
  if (page == pages) {
    n <- l %% 25
    if (n == 0) n <- 25
  } else n <- 25
  offset <- (page - 1) * 25
  # Plot the graph
  zidbPlotNew(paste0(title, " - page ", page))
  for (i in 1:n)
    zidbDrawVignette(db[[vigs[offset + i]]], item = i, nx = 5, ny = 5,
      type = type)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/zidb.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Make .zim files and import images, using a .zie import file for specifs
zieMake <- function (path = ".", Filemap = "Import_Table.zie", check = TRUE,
replace = FALSE, move.to.raw = TRUE, zip.images = "[.]tif$")
{
	## Example of use:
	## Import Digicam RAW files (currently, only Canon .CR2 files)
	## and transform them into .pgm file with correct names in _work subdirectory
	## move processed .cr2 files into _raw; create associated .zim files
	## This requires the 'dc_raw' and 'ppmtopgm' programs plus a couple of others!
	## TODO: change this to eliminate external programs dependencies!
	## We need 'identify' and 'convert' from ImageMagick 16 bits!
	## Make sure they are available
	if (isTRUE(as.logical(check))) {
		#checkCapable("identify")
		#checkCapable("convert")
		#checkCapable("dc_raw")
		#checkCapable("ppmtopgm")
		#checkCapable("zip")
	}
	## First, switch to the root directory
	if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- getwd()	# Indicate we are now in the right path
	### TODO If last subdir of path is "_raw", then, work with parent dir
	## and do not move files in _raw subdir
	## Read the Filemap
	cat("Reading Filemap...\n")
	if (!checkFileExists(Filemap, extension = "zie", force.file = TRUE))
		return(invisible(FALSE))
	## Check first line for ZI1-5
	if (!checkFirstLine(Filemap)) return(invisible(FALSE))
	## Read the file and check it is not empty
	## Note: we don't use comment.char = '#' because we want to read and rewrite
	## those comments!
	Lines <- scan(Filemap, character(), sep = "\t", skip = 0,
		blank.lines.skip = FALSE, flush = TRUE, quiet = TRUE, comment.char = "")
	if (length(Lines) < 2) {
		warning('filemap empty or corrupted!')
		return(invisible(FALSE))
	}
	ZImark <- Lines[1]
	Lines <- Lines[-1]
	## Get the position of a section
	getSectionPos <- function (section = "Map",
		message = "section '[%s]' found") {
		rx <- sprintf("[[]%s[]]", section)
		out <- grep(rx, Lines)
		if (length(out) != 1) {
			warning(sprintf(message, section))
			NULL
		} else out
	}
	getSection <- function (section = "Map", to = c("next","end"),
		message = "The [Map] section is empty!") {
		to <- match.arg(to)
		start <- getSectionPos(section)[1]
		if (!length(start)) return(NULL)
		end <- switch(to,
			"next" = {
				ends <- getSectionPos(".*")
				if (!length(ends)) return(NULL)
				ends[ends > start][1] - 1
			},
			"end" = length(Lines)
		)
		out <- Lines[seq.int(from = start + 1, to = end)]
		if (!length(out)) {
			warning(message)
			NULL
		} else out
	}
	## Get everything before '[Map]' as template data for the .zim file
	posMap <- getSectionPos("Map",
		"The file is corrupted: no or duplicated [Map] section found!")
	if (!length(posMap)) {
	  message("No [Map] section found in the .zie file: either incorrect file,",
	    " or data provided in a separate .txt file (select it instead)")
	  return(invisible(FALSE))
	}
	## Setup the zim data
	zimData <- Lines[1:(posMap - 1)]
	attr(zimData, "Sample") <- NULL	# Currently, there is no sample!
	attr(zimData, "MakeZim") <- FALSE
	## Extract various properties
	## Property extractor
	property <- function (property = "FilenamePattern", default = "") {
		rx <- sprintf("^%s[[:space:]]*=[[:space:]]*(.*)", property)
		if (any(gl <- grepl(rx, Lines))) {
			sub(rx, "\\1", Lines[gl][1])
		} else default
	}
	FilePat <- property("FilenamePattern")
	FracPat <- property("FractionPattern")
	SubPat <- property("SubsamplePattern")
	Convert <- property("Convert")
	Return <- property("Return")
	FileExt <- property("FileExt")
	FileC <- property("FileC")
	FileExt2 <- property("FileExt2", FileExt)
	MoveToWork <- tolower(property("MoveToWork")) %in% c("true", "yes", "1")
	Exif <- property("[<]exif[>]") != ""
	attr(zimData, "Exif") <- "" # Nothing yet here
	## Get the [Map] section
	Lines <- getSection("Map", to = "end", "The [Map] section is empty!")
	if (!length(Lines)) return(invisible(FALSE))
	message("Reading Filemap... OK!")
	## Make sure _raw, and _work subdirectories exists and have write access
	if (!forceDirCreate("_raw")) return(invisible(FALSE))
	if (Convert != "" || MoveToWork)
		if (!forceDirCreate("_work")) return(invisible(FALSE))
	## This function constructs image filename using possibly a FilenamePattern
	MakeImageName <- function(x, pattern = FilePat) {
		if (pattern == "") return(x)
		## Do we need to format a number?
		Format <- sub("^.*[<]([1-9]?)[>].*$", "\\1", pattern)
		if (Format != "")
			x <- formatC(as.integer(x), width = as.integer(Format), flag = "0")
		## Make the replacement according to the pattern
		File <- gsub("[<][1-9]?[>]", x, pattern) # Do we have to use FilePattern?
		return(File)
	}
	## Make sure that all image files are there, and there is no duplicated use
	## of the same image
	### TODO: indicate progression with exact line number in the zie file!
	### TODO: allow restarting from a given point!
	message("Checking all lines in the ZIE file for raw images...")
	allImages <- character(0)
	nLines <- length(Lines)
	for (i in 1:nLines) {
		### TODO: allow restarting from a given point and eliminate previous
		###       lines for which there are no images (considered as already
		###       processed!)
		progress(i, nLines)
		if (!grepl("^[-][>]", Lines[i])) {	# This is not a state change command
			File <- MakeImageName(trimString(sub("[=].*$", "", Lines[i])))
			checkFileExists(File)
			if (File %in% allImages) {
				warning(sprintf("Duplicated use of the same file : '%s' !",
					File))
				return(invisible(FALSE))
			}
			allImages <- c(allImages, File)
		}
	}
	progress(101) # Clear progression indicator
	message("...OK!")
	## Now that we know all image files are there, process the [Map] section
	## line-by-line
	message("Processing all lines in the ZIE file (import images and make ZIM files)...")
	ok <- TRUE
	## BuildZim : This function builds the zim file and check it
	BuildZim <- function (zimData, FracPat, SubPat, ZImark) {
		## Calculate the name of the zim file
		zimFileName <- paste(Smp, "zim", sep = ".")
		zimFile <- file.path(getwd(), zimFileName)
		## If the zim file already exists, skip this
		if (!replace && file.exists(zimFile)) {
			warning(".zim file already exists for '", Smp, "'")
			return(TRUE)
		}
		## Make necessary replacement in Fraction and Subsample
		Smp <- attr(zimData, "Sample")
		if (is.null(Smp) || Smp == "") return(FALSE)
		zimD <- zimData
		## Clear a whole section, starting from its header to the next header
		ClearSection <- function (Data, fromLine) {
			n <- length(Data)
			if (fromLine > n) return(Data)
			## Locate the next header (line starting with "[")
			NextHeader <- grep("^[[]", Data[(fromLine + 1):n])
			if (length(NextHeader) == 0) {
				toLine <- n
			} else {
				toLine <- NextHeader[1] + fromLine - 1
			}
			## Strip out this section
			return(Data[-(fromLine:toLine)])
		}
		## Deal with FracPat
		if (FracPat != "") {
			## This is the header to consider
			if (length(grep(FracPat, Smp)) == 0) {
				stop( paste("Sample '", Smp,
					"' is incompatible\nwith FractionPattern '", FracPat, "'",
					sep = ""))
			}
			Frac <- paste("[[]Fraction_", sub(FracPat, "\\1", Smp), "\\]",
				sep = "")
			posFrac <- grep(Frac, zimD)
			if (length(posFrac) < 1) {
				warning("[Fraction] section not found (", Frac, ")!")
				return(FALSE)
			}
			if (length(posFrac) > 1) {
				warning("multiple", Frac, "sections for sample '", Smp, "'")
				return(FALSE)
			}
			zimD[posFrac] <- "[Fraction]"
			## Strip out all other [Fraction_XXX] sections
			otherFrac <- grep("[[]Fraction_", zimD)
			if (length(otherFrac) > 0)
				for (i in 1:length(otherFrac))
					zimD <- ClearSection(zimD, otherFrac[i])
		}
		if (SubPat != "") {
			## This is the header to consider
			if (!length(grep(SubPat, Smp))) {
				warning("Sample '", Smp,
					"' is incompatible\nwith SubsamplePattern '", SubPat, "'")
				return(FALSE)
			}
			Sub <- paste("[[]Subsample_", sub(SubPat, "\\1", Smp), "\\]",
				sep = "")
			posSub <- grep(Sub, zimD)
			if (!length(posSub)) {
				warning("[Subsample] section not found (", Sub, ")!")
				return(FALSE)
			}
			if (length(posSub) > 1) {
				warning("multiple", Sub, "sections found for this sample!")
				return(FALSE)
			}
			zimD[posSub] <- "[Subsample]"
			## Strip out all other [Subsample_XXX] sections
			otherSub <- grep("[[]Subsample_", zimD)
			if (length(otherSub) > 0)
				for (i in 1:length(otherSub))
					zimD <- ClearSection(zimD, otherSub[i])
		}
		## Possibly insert Exif data
		if (Exif && !is.null(attr(zimData, "Exif"))) {
		    pos <- grep("^[<]exif[>]", zimD)
			# pos is recalculated here, because it may have changed!
		    if (length(pos) > 0)
				zimD <- c(zimD[1:(pos - 1)], attr(zimData, "Exif"),
					zimD[(pos+1):length(zimD)])
		}
		## Write the zim file
		#cat("\n")
		#message("Writing .zim file for sample '", Smp, "'")
		#cat("\n")
		cat(paste(c(ZImark, zimD), collapse = "\n"), file = zimFile)
		return(TRUE)
	}
	## UpdateZim ; This function looks if the line asks for updating zimData and
	## does it (returns TRUE), or it returns FALSE
	UpdateZim <- function (dat, zimData) {
		### TODO: Strip out comments (not done here, because we want to process
		### strings with '#' correctly!
		if (length(grep("^[-][>]", dat)) == 0) return(FALSE)
		## This line starts with "->" => we update zimData
		Key <- sub("^[-][>]([^ =]+).*$", "\\1", dat)
		## Special treatment if Key == "Sample"
		if (Key == "Sample") {
			attr(zimData, "Sample") <- trimString(sub("^[^=]+=", "", dat))
			## Indicate that we process another sample
			attr(zimData, "MakeZim") <- TRUE # Tell to make the zim file
			attr(zimData, "Exif") <- ""
		} else { # This is an usual key
			## Replace every line corresponding to this key in zimData
			MatchLines <- grep(paste("^", Key, sep = ""), zimData)
			if (length(MatchLines > 0))
				zimData[MatchLines] <- sub("^[-][>]", "", dat)
		}
		return(zimData)
	}
	## SetCalib : Add or change an entry in [Calibration] section
	SetCalib <- function (Data, Key, Entry) {
		Line <- paste(Key, Entry, sep = "=")
		## Is this key already defined?
		posKey <- grep(paste("^\\s*", Key, "\\s*=", sep = ""), Data)
		## If defined => change it now
		if (length(posKey) > 0) {
			Data[posKey] <- Line
			return(Data)
		}
		## Is the [Calibration] section already defined?
		posCalib <- grep("[[]Calibration\\]", Data)
		if (length(posCalib) > 0) {
			## Add this new entry in the [Calibration] section
			Data <- c(Data[1:posCalib[1]], Line,
				Data[(posCalib[1] + 1):length(Data)])
		} else {
			## Create the [Calibration] section at the end and add this entry
			## inside it
			if (Data[length(Data)] != "")
				Data <- c(Data, "")
			## Make sure that the section is separated with a blank
			Data <- c(Data, "[Calibration]", Line)
		}
		return(Data)
	}
	## Main Loop
	BlankField <- NULL  # The name of the blank-field image to use
	for (i in 1:nLines) {
		progress(i, nLines)
		res <- UpdateZim(Lines[i], zimData)
		if (!length(res)) {
			warning("problem while updating .zim files")
			return(invisible(FALSE))
		}
		## This is not a state change command
		if (length(res) == 1 && res == FALSE) {
			File <- MakeImageName(trimString(sub("[=].*$", "", Lines[i])))
			## Determine the name of the converted file
			if (Convert != "") {
				if (FileC == "") { # Construct the name of the converted file
					FileConv <- paste(noExtension(File), FileExt, sep = ".")
				} else {
					## Make sure that previous file is deleted
					unlink(FileC)
					FileConv <- FileC
				}
			} else {
				FileConv <- File
			}
			## Determine the final name to give to this converted file,
			## and check if it is a calibration file
			FileConvExt <- tolower(sub("^.*[.]", "", FileConv))
			## Calculate the final name we want for the converted file
			NewFile <- trimString(sub("^.*[=]", "", Lines[i]))
			## 1) If this is 'key' or 'key=' (NeWFile == ""), then,
			##    the file is not renamed!
			if (NewFile == "") {
				FileConvName <- paste(noExtension(File), FileExt2, sep = ".")
				## 2) If the new name starts with "_Calib", then, never use the
				##    Sample part and add a CalibXX entry in .zim file
			} else if (length(grep("^_Calib", NewFile)) > 0) {
                ## If this is a blank-field image, use it for further process
				if (length(grep("^_CalibBF", NewFile)) > 0) {
					FileConvName <- paste(NewFile, FileExt, sep = ".")
					## Remove previous blank-field from root directory
					## (not needed any more!)
					if (!is.null(BlankField)) {
						## Delete blank-field images (.pgm and .img)
						## in the root directory
						unlink(BlankField)
						unlink(paste(noExtension(BlankField), "img", sep = "."))
					}
					BlankField <- FileConvName
				} else {
				    FileConvName <- paste(NewFile, FileExt2, sep = ".")
				}
				## Add or change the calibration information
				## (_CalibSP01 => CalibSP=_CalibSP01.ext)
				Key <- sub("^_(Calib[A-Z]+).*$", "\\1", NewFile)
				zimData <- SetCalib(zimData, Key, FileConvName)
				## 3) Name is Sample + name + ext
			} else {
				Smp <- attr(zimData, "Sample")
				if (is.null(Smp)) Smp <- ""
				FileConvName <- paste(Smp, NewFile, ".", FileExt2, sep = "")
			}
			## Possibly read Exif data and place it in the zim file
			## (or check correspondance)
			if (Exif) {
				ExifData <- attr(zimData, "Exif")
				ExifData2 <- .readExifRaw(File, check = FALSE)
				if (!is.null(ExifData) && length(ExifData) > 0 &&
					ExifData != "") { # Do a comparison of Exif data
				    compa <- .compareExif(ExifData, ExifData2)
				    if (length(compa) > 0)
						warning("Exif seems to be different from the rest in '",
							File, "'")
				} else { # Just set Exif data
				    attr(zimData, "Exif") <- ExifData2
				}
			}
			## Possibly write a zim file?
			MakeZim <- attr(zimData, "MakeZim")
			if (!is.null(MakeZim) && MakeZim) {
				if (BuildZim(zimData, FracPat, SubPat, ZImark)) {
					attr(zimData, "MakeZim") <- FALSE
				} else {
					return(invisible(FALSE))
				}
			}
			## Possibly convert this file
			if (Convert != "") {
				if (zip.images != "" &&
					length(grep(zip.images, FileConvName)) != 0 &&
					length(grep("^_Calib", FileConvName)) == 0) {
					finalname <- paste(noExtension(FileConvName), "zip",
						sep = ".")
				} else finalname <- FileConvName
				#message("Converting image '", File, "' into '", finalname, "'")
				if (replace || !file.exists(FileExt)) {
					## Create variables Rawbase and Rawnoext
					Rawbase <- File
					Rawnoext <- noExtension(File)
					## Run the command
					res <- eval(parse(text = Convert))
					if (Return != "" && length(grep(Return, res)) == 0) {
						## Check that result matches
						ok <- FALSE
						warning("result after conversion does not match for '",
							File, "'")
					}
					## Look if the converted file is created
					if (!file.exists(finalname)) {
						ok <- FALSE
						warning("problem: converted file not found '", finalname, "',",
						  " from '", File, "'")
					}
				}
			} else {
				if (Return != "")
					message("Processing image '", File, "'")
			}
			## If this is a blank-field, then test it
            if (length(grep("^_CalibBF", NewFile)) > 0) {
				msg <- .checkBF(FileConv)
				if (!is.null(msg) && length(msg) > 0 && msg != "") {
					warning(paste(c(
						"Warning! Problem(s) detected with blank-field image:",
						msg), collapse = "\n\t"))  # Report the problem
				}
				## Eliminate dusts and smooth the image with a median filter
				## on a 10 times reduced version of the image
				## We need identify and convert form ImageMagick...16
####				Size <- imagemagick_identify(FileConv)
				Size <- 0  ### TODO: calculate this differently!
				Size2 <- round(Size / 10) # size of the resized image
####				imagemagick_convert(FileConv, Size, Size2)
			} else { # make blank-field correction
			    if (!is.null(BlankField)) {
					tryCatch({
						.BFcorrection(FileConv, BlankField, deleteBF = FALSE)
						}, error = function (e) {
							warning(as.character(e))
						})
					## Delete the uncorrected file
					unlink(FileConv)
					## Now, FileConv is the same file, but with a .tif extension
					FileConv <- paste(noExtension(FileConv), "tif", sep = ".")
					if (!file.exists(FileConv)) {
						ok <- FALSE
						warning("problem: blank-field corrected file not found: '",
							File, "'")
					}
			    }
			}
			## If this is an optical density calibration, proceed with it
			if (length(grep("^_CalibOD", NewFile)) > 0) {
				Cal <- calibrate(FileConv)
				Msg <- attr(Cal, "msg")
				## Report the problem
				if (!is.null(Msg) && length(Msg) > 0 && Msg != "") {
					warning(paste(c("problem(s) detected with O.D. calibration image:",
						attr(Cal, "msg")), collapse = "\n\t"))
				}
				## Put calibration data in the .zim file
				zimData <- SetCalib(zimData, "WhitePoint", round(Cal[1]))
                zimData <- SetCalib(zimData, "BlackPoint", round(Cal[2]))
			}
			### TODO: do the same for the spatial calibration image...
			if (Convert == "") {
				## If a second extention is provided, we need to rename and
				## place the original into _raw subdir
				if (FileExt2 != "" && FileExt2 != FileExt) {
					## Copy the original image (indeed, same image, but with
					## original name) into _raw
                	RawFile <- file.path(getwd(), "_raw", File)
                	file.copy(File, RawFile)
                	## And rename the original copy
                	## Possibly move it to _work subdirectory
                	if (MoveToWork)
						FileConvName <- file.path(dirname(FileConvName),
							"_work", basename(FileConvName))
                	file.rename(File, FileConvName)
                }
            } else { # This image was converted
				## Save the original file in _raw subdir
				RawFile <- file.path(getwd(), "_raw", File)
				file.rename(File, RawFile)
				## Rename the converted file and place it in _work
				WorkFileConv <- file.path(getwd(), "_work", FileConvName)
				## Move it, except if it is a blank-field file, then, copy it!
				if (length(grep("^_CalibBF", FileConvName)) > 0) {
          file.copy(FileConvName, WorkFileConv)
				} else {
					file.rename(FileConvName, WorkFileConv)
				}
				if (!file.exists(WorkFileConv)) {
					warning("problem moving the converted file into '_work' subdirectory for '",
						FileConvName, "'")
					return(invisible(FALSE))
				} else {
					## Do we zip the resulting images, using the zim file
					## as zip comment?
					if (length(grep("^_Calib", FileConvName)) == 0) {
						## Only images, not calib files!
						if (zip.images != "" &&
							length(grep(zip.images, FileConvName)) != 0) {
							curdir <- getwd()
							setwd(file.path(curdir, "_work"))
							zimfile <- paste(attr(zimData, "Sample"), "zim",
								sep = ".")
							# file.copy(file.path(curdir, zimfile), zimfile)
							zipfile <- paste(noExtension(FileConvName), "zip",
								sep = ".")
							zip(zipfile, FileConvName, flags = "-rq9X")
							unlink(FileConvName, recursive = TRUE)
							## Add .zim data as comment to the .zip file
							## Note: except for a warning,
							## we don't care about not adding .zim data
							if (!zipNoteAdd(zipfile,
								file.path(curdir, zimfile))) {}
							## Verify that the .zip file is created
							if (!file.exists(zipfile)) {
								warning(sprintf(
									"problem creating the file : '%s' !",
									zipfile))
								return(invisible(FALSE))
							}
							setwd(curdir)
						} else {
							### TODO: what do we have to do here????
						}
		    		}
				}
			}
		} else zimData <- res
		## Update zimData with value returned by UpdateZim()
	}
	progress(101) # Clear progression indicator
	## Possibly remove latest blank-field from root directory (not needed any more!)
	if (!is.null(BlankField)) {
		## Delete blank-field images (.pgm and .img) in the root directory
		unlink(BlankField)
		unlink(paste(noExtension(BlankField), "img", sep = "."))
	}
	if (ok) {
	  message("...OK!")
		if (move.to.raw)
			file.rename(Filemap, file.path(getwd(), "_raw", Filemap))
		## There is a bug: a 'fileconv.tif' file is created,
		## delete it for the moment
		unlink("fileconv.tif")
	} else {
	  message("...There were error: not all .zim files are correctly created")
	}
	invisible(TRUE)
}
## example:
## setwd("g:/zooplankton/Madagascar2Macro")	# My example directory
## zieMake(path = ".", Filemap = "Import_Madagascar2Macro.zie")
zieCompile <- function (path = ".", Tablefile = "Table.txt",
Template = "ImportTemplate.zie", Filemap = paste("Import_", noExtension(Tablefile),
".zie", sep = ""), Nrange = c(1, 1000), replace = TRUE, make.it = FALSE,
zip.images = "[.]tif$")
{
	message("Creating .zie file...")
	## Full path for Filemap
	FilemapPath <- file.path(path, Filemap)
	## First, switch to the root directory
	if (!checkDirExists(path)) return(NULL)
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- getwd() # Indicate we are now in the right path
    ## Check if needed files exist
    if (!checkFileExists(Tablefile)) return(NULL)
	if (!checkFileExists(Template)) return(NULL)
	## Check if the zie file already exists
    if (!isTRUE(as.logical(replace)) && file.exists(Filemap)) {
		warning("'", Filemap,
			"' already exists and is not replaced (replace = FALSE)!")
		return(FilemapPath)
	}
	## Read the data from the table
    Data <- read.table(Tablefile, header = TRUE, sep = "\t", dec = getDec(),
		as.is = TRUE)
	## Possibly get Nmin and Nmax from the template file
	Nmin <- Nrange[1] # Min number of images for each sample
	Nmax <- Nrange[2] # Max number of images for each sample
	## We start from the template
    file.copy(Template, Filemap, overwrite = TRUE)
    Cat <- function (...) cat(..., sep = "", file = Filemap, append = TRUE)
	Cat("\n")
    Cat("[Map]\n")
	CBF <- -1; CBFNum <- 0
	COD <- -1; CODNum <- 0
	CSp <- -1; CSpNum <- 0
	for (i in 1:nrow(Data)) {
        ## Get calibration data
        CalibBF <- Data$CalibBF[i]
        if (!is.null(CalibBF) && !is.na(CalibBF) && CalibBF != "" &&
			CalibBF != CBF) {
			CBFNum <- CBFNum + 1
			text <- paste(CalibBF, sprintf("_CalibBF%3.3d", CBFNum), sep = "=")
			Cat(text, "\n")
			CBF <- CalibBF
        }
        CalibOD <- Data$CalibOD[i]
        if (!is.null(CalibOD) && !is.na(CalibOD) && CalibOD != "" &&
			CalibOD != COD) {
			CODNum <- CODNum + 1
			text <- paste(CalibOD, sprintf("_CalibOD%3.3d", CODNum), sep = "=")
			Cat(text, "\n")
			COD <- CalibOD
        }
        CalibSp <- Data$CalibSP[i]
        if (!is.null(CalibSp) && !is.na(CalibSp) && CalibSp != "" &&
			CalibSp != CSp) {
			CSpNum <- CSpNum + 1
			text <- paste(CalibSp, sprintf("_CalibSP%3.3d", CSpNum), sep = "=")
			Cat(text, "\n")
			CSp <- CalibSp
        }
        ## Calculate list of all images
        num <- Data$Image[i]
        num <- gsub(";", ",", num, fixed = TRUE)
        num <- gsub("-", ":", num, fixed = TRUE)
        num <- paste("c(", num, ")", sep = "")
        num <- eval(parse(text = num))
        ## Check if the number is correct
		### TODO: add this in the template file!
		if (length(num) < Nmin || length(num) > Nmax) {
			warning("Wrong number of images in 'Image' field for ",
				Data$Sample[i], "!")
			return(NULL)
		}
		## Update several fields according to definitions in the samples table
		###TODO: add the other fields + define this option
		Fields <- c("Sample", "SubPart", "PixelSize", "VolIni", "VolPrec")
        Cols <- names(Data)
        for (j in 1:length(Fields)) {
            if (Fields[j] %in% Cols) {
                value <- Data[i, Fields[j]]
                if (!is.null(value) && !is.na(value) && value != "") {
                    text <- paste("->", Fields[j], "=", value, sep = "")
                    Cat(text, "\n")
                }
            }
        }
        ## Insert corresponding images
        for (j in 1:length(num)) {
            text <- paste(num[j], "=.", j, sep = "")
            Cat(text, "\n")
        }
    }
	## Do we make it also?
	if (isTRUE(make.it)) {
		res <- zieMake(path = path, Filemap = Filemap, check = TRUE,
		  zip.images = zip.images)
		if (res) { # Everything is fine...
			## Move the table and copy the template to the '_raw' subdir too
			file.rename(Tablefile, file.path(path, "_raw", basename(Tablefile)))
			## Move also possibly the .xls equivalent
			Tablexls <- sub("\\.[tT][xX][tT]$", ".xls", Tablefile)
			if (Tablexls != Tablefile && file.exists(Tablexls))
			    file.rename(Tablexls, file.path(path, "_raw",
					basename(Tablexls)))
			file.rename(Template, file.path(path, "_raw", basename(Template)))
		}
	}
	## Everything is fine, return the path of the vcreated filemap file
	FilemapPath
}
## example:
## setwd("g:/zooplankton/Madagascar2Macro") # Directory with the example dataset
## zieCompile(Tablefile = "Madagascar2Macro.txt", Nrange = c(2,2))
## Create .zim files and the FitVisParameters.csv file for FlowCAM images
zieCompileFlowCAM <- function (path = ".", Tablefile,
Template = "ImportTemplate.zie", check.names = TRUE)
{
	## Import data from the FlowCAM
	if (!is.character(path) || !file.exists(path) || !file.info(path)$isdir) {
		warning("You must select a path containing text file for FlowCAM images")
		return(invisible(FALSE))
	}
	Tablefile <- file.path(path, basename(Tablefile))
	if (!checkFileExists(Tablefile, "txt", force.file = TRUE)) {
		warning("Tablefile not found: '", basename(Tablefile), "'")
		return(invisible(FALSE))
	}
	## Read this file
	ImportFile <- read.table(Tablefile, header = TRUE, sep = "\t", dec = ".")
	## Check colnames
	if (any(c("Image", "SubPart", "CellPart", "Replicates", "DepthMax", "VolIni",
	  "PixelSize", "WhitePoint", "BlackPoint") %in% colnames(ImportFile))) {
	  message("Note: ZooImage import file (not FlowCAM)")
	  return(invisible(zieCompile(path = path, Tablefile = Tablefile,
	    Template = Template, make.it = TRUE, zip.images = "")))
	}
	if (isTRUE(as.logical(check.names))) {
		ColNames <- c("Station", "Date", "FlowCell", "Mode", "Magnification",
			"Exp_Name", "Sample", "Dilution", "Sieve", "Volume", "Pump_Speed",
			"Duration", "Temperature", "Salinity", "Gain_Fluo_Ch1",
			"Threshold_Fluo_Ch1", "Gain_Fluo_Ch2", "Threshold_Fluo_Ch2",
			"Threshold_Scatter", "Min", "Max", "Size")
		if (!all(ColNames %in% colnames(ImportFile))) {
			warning("Your import file contains missing columns among Station,",
				" Date, FlowCell, Mode, Magnification, Exp_Name, Sample,",
				" Dilution, Sieve, Volume, Pump_Speed, Duration, Temperature,",
				" Salinity, Gain_Fluo_Ch1, Threshold_Fluo_Ch1, Gain_Fluo_Ch2,",
				" Threshold_Fluo_Ch2, Threshold_Scatter, Min, Max, or Size")
			return(invisible(FALSE))
		}
	}
	message("Creating .zim files and FitVisParameters.csv...")
	## Check if the ImportTemplate.zie is present in the directory
	Zie <- file.path(dirname(path), basename(Template))
	if (!file.exists(Zie)) {
		warning("Your directory must contain an 'ImportTemplate.zie' file")
		return(invisible(FALSE))
	}
	## Check if all samples are in the directory and export missing files
	notThere <- character(0)
	for (i in 1:length(ImportFile$Exp_Name))
		if (!file.exists(file.path(dirname(path), ImportFile$Exp_Name[i]))) {
			notThere <- c(notThere, as.character(ImportFile$Exp_Name[i]))
			warning(ImportFile$Exp_Name[i], " is not in the process directory")
		}
	## Select only samples present in the process directory
	if (length(notThere)) {
		ImportFile <- ImportFile[!ImportFile$Exp_Name %in% notThere, ]
		warning("import only samples in the process dir or import text file")
	}
	## Read ctx files of the samples from
	Ctx <- file.path(dirname(path), ImportFile$Exp_Name,
		paste(ImportFile$Exp_Name, "ctx", sep = "."))
	lCtx <- length(Ctx)
	if (!lCtx) CtxFile <- NULL else .ctxReadAll(ctxfiles = Ctx)
	## Create fields to generate a table as txt format for the importation
	Experiment <- ImportFile$Exp_Name
	Sample <- ImportFile$Sample
	Image <- CtxFile$Sample_Name
	PixelSize <- CtxFile$pixelsize
	minsize <- CtxFile$minsize
	maxsize <- CtxFile$maxsize
	VolumeDigitized <- CtxFile$VolumeDigitized
	Dilution_VIS <- CtxFile$Dilution_VIS
	SubPart <- ImportFile$Dilution / 100
	VolIni <- ImportFile$Volume
	CellPart <- VolumeDigitized / VolIni
	## Table with value to change
	ImportTxt <- data.frame(Experiment, Sample, Image, PixelSize, SubPart,
		minsize, maxsize, VolumeDigitized, Dilution_VIS, VolIni, CellPart)
	## Read the "ImportTemplate.zie" file
	ZieFileOrig <- scan(Zie, character(), sep = "\t", skip = 0,
		blank.lines.skip = FALSE, flush = TRUE, quiet = TRUE, comment.char = "")
	## Loop to create a .zim file
	for (i in 1:nrow(CtxFile)) {
		ZieFile <- ZieFileOrig
		## Complete fields using ImportTxt
		ImageLine <- grep("^Sample", ZieFile)
		Sample <- as.numeric(sub("[ ]*$", "", sub("^Sample[ ]*[=][ ]*", "",
			ZieFile[ImageLine[1]])))
		if (is.na(Sample)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$Sample[i], sep = "")
		ImageLine <- grep("^Experiment", ZieFile)
		Experiment <- as.numeric(sub("[ ]*$", "",
			sub("^Experiment[ ]*[=][ ]*", "", ZieFile[ImageLine[1]])))
		if (is.na(Experiment)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$Experiment[i], sep = "")
		ImageLine <- grep("^SubPart", ZieFile)
		SubPart <- as.numeric(sub("[ ]*$", "", sub("^SubPart[ ]*[=][ ]*", "",
			ZieFile[ImageLine[1]])))
		if (is.na(SubPart)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$SubPart[i], sep = "")
		ImageLine <- grep("^CellPart", ZieFile)
		CellPart <- as.numeric(sub("[ ]*$", "", sub("^CellPart[ ]*[=][ ]*", "",
			ZieFile[ImageLine[1]])))
		if (is.na(CellPart)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$CellPart[i], sep = "")
		ImageLine <- grep("^Dilution_VIS", ZieFile)
		Dilution_VIS <- as.numeric(sub("[ ]*$", "",
			sub("^Dilution_VIS[ ]*[=][ ]*", "", ZieFile[ImageLine[1]])))
		if (is.na(Dilution_VIS)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$Dilution_VIS[i], sep = "")
		ImageLine <- grep("^VolIni", ZieFile)
		VolIni <- as.numeric(sub("[ ]*$", "", sub("^VolIni[ ]*[=][ ]*", "",
			ZieFile[ImageLine[1]])))
		if (is.na(VolIni)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$VolIni[i], sep = "")
		ImageLine <- grep("^VolumeDigitized", ZieFile)
		VolumeDigitized <- as.numeric(sub("[ ]*$", "",
			sub("^VolumeDigitized[ ]*[=][ ]*", "", ZieFile[ImageLine[1]])))
		if (is.na(VolumeDigitized)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$VolumeDigitized[i], sep = "")
		ImageLine <- grep("^minsize", ZieFile)
		minsize <- as.numeric(sub("[ ]*$", "", sub("^minsize[ ]*[=][ ]*", "",
			ZieFile[ImageLine[1]])))
		if (is.na(minsize)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$minsize[i], sep = "")
		ImageLine <- grep("^maxsize", ZieFile)
		maxsize <- as.numeric(sub("[ ]*$", "", sub("^maxsize[ ]*[=][ ]*", "",
			ZieFile[ImageLine[1]])))
		if (is.na(maxsize)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$maxsize[i], sep = "")
		ImageLine <- grep("^PixSize", ZieFile)
		PixSize <- as.numeric(sub("[ ]*$", "", sub("^PixSize[ ]*[=][ ]*", "",
			ZieFile[ImageLine[1]])))
		if (is.na(PixSize)) ZieFile[ImageLine[1]] <-
			paste(ZieFile[ImageLine[1]], ImportTxt$PixelSize[i], sep = "")
		## Read all context file
		ContextFile <- scan(Ctx[i], character(), sep = "\t", skip = 0,
			blank.lines.skip = FALSE, flush = TRUE, quiet = TRUE,
			comment.char = "")
		## Read note
		Note <- file.path(dirname(path), ImportFile$Exp_Name[i],
			paste(ImportFile$Exp_Name[i], "_notes.txt", sep = ""))
		NoteFile <- scan(Note, character(), sep = "\t", skip = 0,
			blank.lines.skip = FALSE, flush = TRUE, quiet = TRUE,
			comment.char = "")
		## Write the resulting table in the sample directory
		Tab <- c(ZieFile, "", ContextFile, "", "[Notes]", NoteFile)
		Export <- file.path(dirname(path), CtxFile$Sample_Name[i],
			paste(CtxFile$Sample_Name[i], "zim", sep = "."))
		write(Tab, file = Export)
	}
	## Create a batch file for FlowCAM image analysis using FitVis
	## Select a directory containing a series of FlowCAM runs
	ContextList <- .ctxReadAll(path = path, fill = FALSE, largest = FALSE,
		vignettes = TRUE, scalebar = TRUE, enhance = FALSE, outline = FALSE,
		masks = FALSE, verbose = TRUE)
	## Write the table of importation in that directory
	write.table(ContextList, sep = ",", dec = ".", row.names = FALSE,
		file = file.path(path, "FitVisParameters.csv"), quote = TRUE,
		col.names = TRUE)
	message("Import data table has been created in FitVisParameters.csv")
	invisible(TRUE)
}
## The function that eases creation of a ZIE object
### TODO: add a 'message' entry = message to display after of the importation
ZIE <- function (title, filter, description, pattern, command, author,
version, date, license, url, depends = "R (>= 2.4.0), zooimage (>= 1.0-0)",
type = c("import", "export"))
{
	if (!is.character(title) || !is.character(filter) ||
		!is.character(description) || !is.character(pattern) ||
		!is.character(command) || !is.character(author) ||
		!is.character(version) || !is.character(date) ||
		!is.character(license) || !is.character(url) ||
		!is.character(depends))
		stop("All arguments must be character strings!")
	obj <- list(title = title[1], filter = filter[1],
		description = paste(description, collapse = "\n"), pattern = pattern[1],
		command = paste(command, collapse = "\n"), author = author[1],
		version = version[1], license = license[1], depends = depends[1])
	type <- match.arg(type, several.ok = FALSE)
	class(obj) <- switch(type,
		import = c("ZIEimport", "ZIE"),
		export = c("ZIEexport", "ZIE"))
	return(obj)
}
print.ZIE <- function (x, ...)
{
	SubClass <- class(x)[1]
	cat("A", getTemp("ZIname"),
		"Import/Export definition object of subclass:", SubClass, "\n")
	cat("\n", x$description, "\n\n")
	cat("Title:  ", x$title, "\n")
	cat("Filter: ", x$filter, "\n")
	cat("Pattern:", x$pattern, "\n")
	cat("Command:", x$command, "\n")
	cat("Author: ", x$author, "\n")
	cat("Version:", x$version, "\n")
	cat("Date:    ", x$date, "\n")
	cat("License:", x$license, "\n")
	cat("Depends:", x$depends, "\n")
	cat("URL:    ", x$url, "\n")
	return(invisible(x))
}
## Import plain .tif files, with manual creation of associated .zim files
.ZIEimportTif <- ZIE(
	title       = "Tiff image files (*.tif)",
	filter      = "*.tif",
	description = c("Manual creation of ZooImage Metadata files (.zim)",
				    "given a list of directly usable TIFF images",
				    "that is, no conversion required and image names",
				    "already follow the ZooImage convention"),
	pattern     = "\\.[tT][iI][fF]$",
	command     = "zimMake(dir = Dir, pattern = Pattern, images = Files, show.log = TRUE)",
	author      = "Philippe Grosjean ([email protected])",
	version     = "1.1-0",
	date        = "2007-02-20",
	license     = "GPL 2 or above",
	url         = "",
	depends     = "R (>= 2.4.0), zooimage (>= 1.1-0)",
	type        = "import")
## Import plain .jpg files, with manual creation of associated .zim files
.ZIEimportJpg <- ZIE(
	title       = "Jpeg image files (*.jpg)",
	filter      = "*.jpg",
	description = c("Manual creation of ZooImage Metadata files (.zim)",
				    "given a list of directly usable JPEG images",
				    "that is, no conversion required and image names",
				    "already follow the ZooImage convention"),
	pattern     = "\\.[jJ][pP][gG]$",
	command     = "zimMake(dir = Dir, pattern = Pattern, images = Files, show.log = TRUE)",
	author      = "Philippe Grosjean ([email protected])",
	version     = "1.1-0",
	date        = "2007-02-20",
	license     = "GPL 2 or above",
	url         = "",
	depends     = "R (>= 2.4.0), zooimage (>= 1.1-0)",
	type        = "import")
## Complex import of images (conversion, renaming, etc.) with automatic creation
## of associated .zim files using a .zie file
.ZIEimportZie <- ZIE(
	title       = "ZooImage Import Extension (Import_*.zie)",
	filter      = "Import_*.zie",
	description = c("Run a ZIE import specification file to convert",
				    "and/or rename images and automatically create",
				    "associated ZIM files (ZooImage Metadata)"),
	pattern     = "\\.[zZ][iI][eE]$",
	command     = "zieMake(path = Dir, Filemap = Files[1], check = TRUE))",
	author      = "Philippe Grosjean ([email protected])",
	version     = "1.1-0",
	date        = "2007-02-20",
	license     = "GPL 2 or above",
	url         = "",
	depends     = "R (>= 2.4.0), zooimage (>= 1.1-0)",
	type        = "import")
## Compile a .zie file from TemplateImport.zie and a table.txt and then compute it
.ZIEimportTable <- ZIE(
	title       = "Table and ImportTemplate.zie (*.txt)",
	filter      = "*.txt",
	description = c("Create a ZIE file by interpretting a table,",
				    "using a template file in the same directory",
				    "and named 'ImportTemplate.zie'. The resulting",
				    "ZIE file is then run to make images + metadata"),
	pattern     = "\\.[tT][xX][tT]$",
	command     = "zieCompile(path = Dir, Tablefile = Files[1], make.it = TRUE))",
	author      = "Philippe Grosjean ([email protected])",
	version     = "1.1-0",
	date        = "2007-02-20",
	license     = "GPL 2 or above",
	url         = "",
	depends     = "R (>= 2.4.0), zooimage (>= 1.1-0)",
	type        = "import")
## Read most important EXIF data from a Digicam RAW file
.readExifRaw <- function (rawfile, full = FALSE, check = TRUE)
{
	## Make sure dc_raw is available and rawfile exists
	if (!checkFileExists(rawfile)) return(NULL)
	## Temporary change directory to the one where the file is located
	filedir <- dirname(rawfile)
	if (filedir != ".") {
		inidir <- getwd()
		setwd(filedir)
		on.exit(setwd(inidir))
		rawfile <- basename(rawfile)
	}
	temp <- "exifdata.txt"
####	misc_dcraw(rawfile, '-i -v ', temp)
	if (!checkFileExists(temp, message = "Error while reading exif data for '%s'"))
		return(NULL)
	res <- scan(temp, character(), sep = "\n", quiet = TRUE)
	if (length(res) < 6)
		return("Error getting EXIF data from '", rawfile, "'")
	## We replace ": " with "="
	res <- sub(": ", "=", res)
	## We replace all spaces by '_' (except for Filename and Timestamp,
	## first two lines!)
	res[-2] <- gsub(" ", "_", res[-2])
	if (full) {
		## Rewrite date time into yyyy-mm-dd hh:mm:ss
		datetime <- sub("^Timestamp=", "", res[2])
		lct <- Sys.getlocale("LC_TIME"); Sys.setlocale("LC_TIME", "C")
		newdate <- as.character(as.Date(datetime,
			format = "%a %b %d %H:%M:%S %Y"))
		Sys.setlocale("LC_TIME", lct)
		newtime <- sub("^.* (.*) [0-9]{4}$", "\\1",
			"Wed Jul 12 09:45:38 2006")
		res[2] <- paste("Timestamp=", newdate, " ", newtime, sep = "")
	} else { # Keep only most important Exif data
	    res <- res[3:7]
	}
	unlink(temp)
	return(res)
}
## example:
## setwd("g:/zooplankton/Madagascar2Macro") # Directory with the example dataset
## (Res <- .readExifRaw("Image_0742.CR2"))
## Make a comparison of two exif datasets on sensible entries
.compareExif <- function (Exif1, Exif2)
{
	dif <- character(0)
	## Need same 'Camera', 'ISO_speed', 'Shutter', 'Aperture', 'Focal_Length'
	### TODO: make it work for larger Exif dataset. Currently requires that the
	###       fields are restricted to strict equal data
	if (length(Exif1) != length(Exif2)) {
	    dif <- "Not same size for both Exif data!"
	} else {
	    difpos <- sort(Exif1) != sort(Exif2)
	    if (any(difpos)) dif <- "Exif data are not identical!"
	}
	return(dif)
}
.isTestFile <- function (File)
{
	## Determine if a given file is a test file (a file with first line being
	## 'ZI1est' and with size < 1000)
	if (file.info(File)$size > 1000) return(FALSE)
	checkFirstLine(File, "ZItest")
}
## Check a blank-field image, either in .pgm or in .tif format
.checkBF <- function (BFfile)
{
	if (!checkFileExists(BFfile, message = "Blank-field file '%s' not found!"))
		return(NULL)
	## Is it a test file?
	if (.isTestFile(BFfile))
		return(character(0)) # We behave like if the file was correct!
	msg <- character(0)
	filedir <- dirname(BFfile)
	if (filedir != ".") {
		## Temporary change directory to the one where the file is located
		inidir <- getwd()
		setwd(filedir)
		on.exit(setwd(inidir))
		BFfile <- basename(BFfile)
	}
	## The command to use depends on the format of the image (determined on the
	## extension)
	ext <- tolower(rev(strsplit(BFfile, "\\.")[[1]])[1])
	pgmfile <- BFfile
	if (ext == "tif") {
		## First, convert into a .pgm file
		pgmfile <- paste(BFfile, "pgm", sep = ".")
####		netpbm_tifftopnm( BFfile, pgmfile )
		delfile <- TRUE
		ext <- "pgm"
	} else delfile <- FALSE
	if (ext != "pgm")
		return(paste("Unrecognized image format for '", BFfile, "'", sep = ""))
####	BF <- netpbm_pgmhist(pgmfile, delete = delfile)
	## Make sure we work with 16bit images
	if (max(BF$Gray) < 256) {
		msg <- c(msg, "Blank-field seems to be a 8bit image (16bit required)")
	} else {
		## Look at darkest value with at least 10 points
		BF <- BF[BF$Count >= 10, ]
		darkpart <- min(BF$Count)
		## Eliminate values with low number of points
		BF <- BF[BF$Count >= 100, ]
		## Check range for these values
		rngBF <- range(BF$Gray)
		if (rngBF[2] > 65500)
			msg <- c(msg, "Blank-field is overexposed")
		if (rngBF[2] < 60000)
			msg <- c(msg, "Blank-field is underexposed or contains too dark areas")
		if ((rngBF[2] - rngBF[1]) > 15000)
			msg <- c(msg, "Blank-field is too heterogeneous")
		if ((rngBF[1] - darkpart) > 15000)
			msg <- c(msg, "Blank-field contains dark zones (dust?)")
	}
	return(msg)
}
## example:
## setwd("g:/zooplankton/Madagascar2Macro") # Directory with the example dataset
## .checkBF("test.pgm")
## .checkBF("test.tif")
## Make a blank-field correction on File, given a BFfile (blank-field)
## Both files must be 16bit gray PGM images
## The resulting file has same name as File, but with a .tif extension instead
## of .pgm
## The function returns TRUE in case of success... or an explicit error message
.BFcorrection <- function (File, BFfile, deleteBF = TRUE, check = TRUE)
{
	on.exit({
		unlink(imgFile)
		if (deleteBF) unlink(imgBFfile)
	})
	if (!checkFileExists(File, "pgm")) return(NULL)
	if (!checkFileExists(BFfile, "pgm", message = "Blank-field file '%s' not found"))
		return(NULL)
	## Check that the various scripts are available
	#checkCapable("pnm2biff")
	#checkCapable("statistics")
	#checkCapable("divide")
	#checkCapable("biff2tiff")
	## Switch to the directory of File
	filedir <- dirname(File)
	if (filedir != ".") {
		## Temporary change directory to the one where the file is located
		inidir <- getwd()
		setwd(filedir)
		on.exit(setwd(inidir), add = TRUE)
		File <- basename(File)
	}
	## Determine the name of the various files
	fileNoExt <- noExtension(File)
	imgFile <- paste(fileNoExt, "img", sep = ".")
	imgcorrFile <- paste(fileNoExt, "coor.img", sep = "")
	tifFile <- paste(fileNoExt, "tif", sep = ".")
	imgBFfile <- paste(noExtension(BFfile), "img", sep = ".")
    ## Is File a test file?
	if (.isTestFile(File)) {
		## We behave like if the file was corrected, but just copy the content
		## of File into tifFile
		file.copy(File, tifFile)
		## Simulate creation of the .img blank-field
		if (!deleteBF) file.copy(BFfile, imgBFfile)
		return(TRUE)
	}
	## Convert PGM files into BIFF
####	xite_pnm2biff(File, imgFile)
####	xite_pnm2biff(BFfile, imgBFfile)
	## Get the mean gray level of the blank-field
####	meangray <- xite_statistics(imgBFfile)
	## Eliminate the blank field
####	res <- xite_divide(meangray, imgFile, imgBFfile, imgcorrFile)
	## Make the tiff file
####	xite_biff2tiff(imgcorrFile, tifFile)
	return(TRUE) # Everything is fine!
}
## example:
## setwd("g:/zooplankton/madagascar2macro")
## .BFcorrection("_CalibOD03.pgm", "_CalibBF03.pgm")
## Convert a RAW file (digital camera) into a pgm file
### TODO: can we not do this in the the ImageJ plugin directly
.rawConvert <- function (RawFile, OutputFile = "fileconv.pgm",
DcRawArgs = "-v -c -4 -q 3 -t 0 -k 0", fake = FALSE, replace = FALSE,
check = TRUE)
{
	## Check if the output file already exists
	if (file.exists(OutputFile)) {
		## If we want to replace existing file, delete it, otherwise, we are done
		if (replace) unlink(OutputFile) else return(TRUE)
	}
	## Check if RawFile exists
	if (!checkFileExists(RawFile)) return(FALSE)
	## Do a fake convert
	if (fake) { # Create a test file with just ZItest in it
		cat("ZItest\n", file = OutputFile)
		return(TRUE)
	}
	## Do the conversion using dc_raw
	## Check that the system is capable of doing the conversion
	if (check) {
		#checkCapable("dc_raw")
		#checkCapable("ppmtopgm")
	}
	if (isWin()) {
		## Convert the RAW file into PPM file (48bit color)
		## We have to do it in two steps because windows lack of proper piping
####		misc_dcraw(RawFile, DcRawArgs , "RAWTEMP.PPM")
		## Convert from 48bit color to 16bit grayscale
####		netpbm_ppmtopgm("RAWTEMP.PPM", OutputFile)
	} else {
		## One step conversion (no tempfile)
		cmd <- sprintf('dcraw %s %s | ppmtopgm > "%s"' ,
			DcRawArgs, RawFile, OutputFile)
		res <- try(system(cmd), silent = TRUE)
		if (!checkFileExists(OutputFile, message = "Error while converting"))
			return(FALSE)
	}
	## Everything was fine
	return(TRUE)
}
## example:
## setwd("d:/ZI examples/MacroG16-example")
## .rawConvert("Image_3822.CR2", fake = TRUE)
## .rawConvert("Image_3822.CR2")
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/zie.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Functions for manipulating .zim files (ZooImage Metadata/measurements)
## These.zim files contain metadata required to analyze plankton images
## and to record the way they were processed. Measurements on each identified
## object can also be appended in a table at the end of this file (in this case,
## the usual extension is '_dat1.zim' to indicate that data processed with
## ZooImage version 1 are present in the file).
## Check if a file is a "(_dat1).zim" file (must start with "ZI1", "ZI2"
## or "ZI3" and have a '.zim' extension)
isZim <- function (zimfile)
{
	## Check if the file does not exist or is a directory
	if (!checkFileExists(zimfile, force.file = TRUE, extension = "zim")) {
		FALSE
	} else {
		## Check the first line
		checkFirstLine(zimfile)
	}
}
## Make required .zim files for one or more images
zimMake <- function (dir = ".", pattern = extensionPattern("tif"),
images = list.files(dir, pattern))
{
	## Check that there are images to process
	if (!length(images)) {
		warning("no images to process!")
		return(invisible(FALSE))
	}
	## Name of images is something like SCS.xxxx-xx-xx.SS+Ann.tif
	## We make the same .zim file for all ...+Ann images, so, reduce the list
	zims <- sort(unique(sampleInfo(images, type = "fraction",
		ext = pattern)))
	zims <- file.path(dir, paste0(zims, ".zim"))
		
	## The process to run in batch
	makeZim <- function (zim) {
		if (!file.exists(zim)) {
			message("Processing ZIM file ", basename(zim), "...")
			zimCreate(zim, template = getTemp(".template"), wait = TRUE)
			## Get this zim as new template
			assignTemp(".template", zim)
		} else message("Checking ZIM file ", basename(zim), "...")
		## Verify that the .zim file is correct
		zimVerify(zim) >= 0
	}
	on.exit(rmTemp(".template"))
	message("Creating ZIM files...")
	flush.console()
	ok <- batch(zims, makeZim, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(images),
			" ZIM files created (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Verify a "(_dat1).zim" file (all required fields + return the number of items
## in it). If it succeeds, return the number of measured items as numeric
## Otherwise, return -1... If there is no data, return 0
zimVerify <- function (zimfile, is.dat1 = hasExtension(zimfile, "_dat1.zim"),
check.table = FALSE)
{
	## Required fields
	## Here are predefined required fields before measurements
	reqfields <- c("[Image]", "Author", "Hardware", "Software",
        "ImageType", "[Fraction]", "Code", "Min", "Max", "[Subsample]",
        "SubPart", "SubMethod", "CellPart", "Replicates", "VolIni",
        "VolPrec")
	## Then required fields when measurements are done
	reqfields2 <- c("[Process]")
	## Finally, required column headers
    reqcols <- c("!Item", "Label", "BX", "BY", "Width", "Height")
	## Determine if there are custom verification rules defined and if they
	## are active
    newRules <- getOption("ZI.zim")
    if (length(newRules) && newRules$active == TRUE) {
        ## Do we delegate the whole process to a custom verification function?
		verifyAll <- newRules$verify.all
        if (!is.null(verifyAll) && inherits(verifyAll, "function"))
            return(verifyAll(zimfile = zimfile, is.dat1 = is.dat1,
				check.table = check.table))
		## Should we use additional verification code instead?
		verify <- newRules$verify
        reqfields <- c(reqfields, newRules$zim.required)
        reqfields2 <- c(reqfields2, newRules$dat1.zim.required)
        reqcols <- c(reqcols, newRules$dat1.data.required)
    } else verify <- NULL
	## Check that it is a zimfile
	if (!isZim(zimfile)) return(-1)
	## Run first the extra verification code
	if (!is.null(verify) && inherits(verify, "function")) {
		## We need to grab the error here and call stop from here to maintain
		## the old API and to allow the custom version of stop to be called
		## with the correct context of the zimVerify() function
		res <- try(verify(zimfile, is.dat1 = is.dat1,
            check.table = check.table), silent = TRUE)
		if (inherits(res, "try-error") || (is.character(res) && nchar(res))) {
			warning(as.character(res))
			return(-1)
		}
    }
	## Read the file...
	## Equal sign is used as comment char, in order to read only the field names
    Lines <- scan(zimfile, character(), sep = "\t", skip = 1, flush = TRUE,
		quiet = TRUE, blank.lines.skip = FALSE, comment.char = "=")
	## Trim leading and trailing white spaces
	Lines <- trimString(Lines)
	## Check that all required fields are present for a simple .zim file
    misfields <- reqfields[!(reqfields %in% Lines)]
    if (length(misfields) > 0) {
        warning(paste("Missing fields:", paste(misfields, collapse = ", ")))
		return(-1)
	}
	## Check if this is a _dat1.zim file with measurements
    if ("[Data]" %in% Lines) {
        ## Check for missing fields
		misfields2 <- reqfields2[!(reqfields2 %in% Lines)]
        if (length(misfields2) > 0) {
            warning(paste("Missing [Process] fields:", paste(misfields2,
				collapse = ", ")))
			return(-1)
		}
		## Check for required column headers
		posHeaders <- grep("^\\[Data\\]$", Lines)[1] + 1
		LineHeader <- scan(zimfile, character(), sep = "%", skip = posHeaders,
			nmax = 1, flush = TRUE, quiet = TRUE, comment.char = "=")
		Headers <- trimString(strsplit(LineHeader, "\t")[[1]])
		misHeaders <- reqcols[!(reqcols %in% Headers)]
		if (length(misHeaders) > 0) {
		    warning(paste("Missing columns in the table:", paste(misHeaders,
				collapse = ", ")))
			return(-1)
		}
		## Check that the table can be read
        if (isTRUE(as.logical(check.table))) {
			## Check the [Data] section
            posMes <- grep("^\\[Data\\]$", Lines)
            if (length(posMes) == 0) {
                warning("Trying to read the table of measurements but no [Data] section found!")
				return(-1)
            } else { # The [Data] section is found
				## we try to call read.table, catch the error, and throw it again
				## from here, because stop might have a different meaning
				## in the context of the zimVerify() function
				## allowing to use the zooImage calling handlers,
				## see errorHandling.R
				Mes <- try(read.table(zimfile, sep = "\t", header = TRUE,
					skip = posMes + 1), silent = TRUE)
				if (inherits(Mes, "try-error")) {
					warning(paste("Unable to read the table of measurements! : ",
						Mes))
					return(-1)
				} else { 	# Successful reading of the table of measurements
					return(nrow(Mes))	# Return the number of items measured
				}
            }
        } else {
			## Alternative method that does not read the table
			## We don't read the table, use a different method to get the number
			## of entries in it
			## Read the last entry in Lines and convert it to a numeric value:
			## should be the number of items measured
			nItems <- Lines[length(Lines)]
			if (sub("^[0-9]+$", "", nItems) != "") {
			    warning("Impossible to determine the number of items measured!")
				return(-1)
			}
			return(as.integer(nItems))
        }
    } else if (isTRUE(as.logical(is.dat1))) {
		warning("No measurements found in this file")
		return(-1)
	} else return(0)
}
## Extract notes from .zip files and place them in .zim files
zimExtractAll <- function (zipdir = ".", zipfiles = zipList(zipdir),
path = NULL, replace = FALSE)
{
	## Make sure all zipfiles are in the same directory
	zipdirs <- dirname(zipfiles)
	if (length(unique(zipdirs)) > 1) {
		warning("all ZIP files must be located in the same directory!")
		return(invisible(FALSE))
	}
	## Check that the dir exists!
	if (!checkDirExists(zipdir)) return(invisible(FALSE))
	## Move to zipdir
	initdir <- setwd(zipdir)
	on.exit(setwd(initdir))
	zipdir <- getwd()   # That way, if we had ".", it is now expanded
	## Use only basenames for zip files
	zipfiles <- sort(basename(zipfiles))
	## Check that zipfiles exist
	if (!all(file.exists(zipfiles))) {
		stop("one or several ZIP files not found!")
		return(invisible(FALSE))
	}
	## Look at the path where to place .zim files
	if (!length(path)) {
		## The rule is the following one:
		## 1) if last subdir is "_raw", then place .zim file up one level
		## 2) else, place them in the same dir as the zip files
		path <- zipdir
		if (tolower(basename(path)) == "_raw") path <- dirname(path)
	} else {    # Look if this is a valid directory
		path <- path[1]
		if (!checkDirExists(path)) return(invisible(FALSE))
	}
	## Compute the names of .zim files from the names of .zip files
	## Note: use only the fraction, that is, SCS.xxxx-xx-xx.SS+F from
	## SCS.xxxx-xx-xx.SS+Fnn)
	## If there are duplicates, only extract first one
	zimfiles <- paste(sampleInfo(zipfiles, "fraction",
		ext = extensionPattern(".zip")), "zim", sep = ".")
	keep <- !duplicated(zimfiles)
	zimfiles <- zimfiles[keep]
	zipfiles <- zipfiles[keep]
	## Make full path name for zimfiles
	zimfiles <- file.path(path, zimfiles)
	## If replace == FALSE, eliminate existing .zim files from the list
	if (!isTRUE(as.logical(replace))) {
		keep <- !file.exists(zimfiles)
		zimfiles <- zimfiles[keep]
		zipfiles <- zipfiles[keep]
	}
	## Are there files left
	if (!length(zimfiles)) {
		message("done: no file to process!")
		return(invisible(TRUE))
	}
	## Extract .zim files, one at a time, and check them
	items <- 1:length(zimfiles)
	zimExtract <- function (item, zipfiles, zimfiles) {
		## Extract the .zim file
		if (is.null(zipNoteGet(zipfiles[item], zimfiles[item])))
			return(FALSE)
		
		## Check that the .zim file is created and return result accordingly
		zimVerify(zimfiles[item]) >= 0
	}
	## Batch process all files
	message("Extraction of ZooImage metadata (.zim) from compressed .zip images...")
	flush.console()
	ok <- batch(items, fun = zimExtract, zipfiles = zipfiles,
		zimfiles = zimfiles, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(zimfiles),
			" metadata were extracted into ZIM files (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Given a list of .zip files and a path where .zim files are located,
## update comment fields of the .zip files with latest .zim content
zimUpdateAll <- function (zipdir = ".", zipfiles = zipList(zipdir),
zimdir = NULL, check.zim = TRUE)
{
    ## Make sure we have full path for zip files
	if (zipdir == ".") zipdir <- getwd()
	zipfiles <- file.path(zipdir, zipfiles)
    ## Check that zipfiles exist
	if (!all(file.exists(zipfiles))) {
		warning("one or several ZIP files not found!")
		return(invisible(FALSE))
	}
	## Look for the path where .zim files are located
	if (!length(zimdir)) {
		## The rule is the following one:
		## 1) if last subdir of .zip files is "_raw", then .zim files
		##    should be up one level
		## 2) else, look at the same dir
		zimdir <- zipdir
		if (tolower(basename(zimdir)) == "_raw")
			zimdir <- dirname(zimdir)
	} else {    # Look if this is valid directory
		zimdir <- zimdir[1]
		if (!checkDirExists(zimdir, message = "'%s' is not a valid directory!"))
			return(invisible(FALSE))
	}
	## Switch to that dir
	initdir <- setwd(zimdir)
	on.exit(setwd(initdir))
	## Compute the names of zim files from the names of zip files
	## Note: use only the fraction, that is, SCS.xxxx-xx-xx.SS+F from
	## SCS.xxxx-xx-xx.SS+Fnn)
	## If there are duplicates, only extract first one
	zimfiles <- sprintf( "%s.zim",
		sampleInfo(zipfiles, "fraction", ext = extensionPattern("zip")))
	## Eliminate path for zimfiles
	zimfiles <- basename(zimfiles)
	## Keep only existing .zim files
	keep <- file.exists(zimfiles)
	zimfiles <- zimfiles[keep]
	zipfiles <- zipfiles[keep]
	## Are there files left?
	if (!length(zimfiles)) {
		message("done: no file to update!")
		return(invisible(TRUE))
	}
	## Check the zim files
	ok <- TRUE
	if (isTRUE(as.logical(check.zim))) {
		message("Verification of ZIM files...")
		flush.console()
		zfiles <- unique(zimfiles)
		zimCheck <- function (zim) {
			message("Verifying '", basename(zim), "' ...")
			zimVerify(zim) >= 0
		}
		ok <- batch(zfiles, zimCheck, verbose = FALSE)
	}
	if (ok) {
		message("-- Done! --")
	} else {
		warning("corrupted ZIM file(s) found, update not started (see .last.batch)")
		return(invisible(FALSE))
	}
	## If everything is OK, update comments in the zip files with the content
	## of the .zim files
	message("Update of metadata in ZIP files from ZIM data...")
	flush.console()
	items <- 1:length(zipfiles)
	updateZip <- function (item, zipfiles, zimfiles) {
		zipNoteAdd(zipfiles[item] , zimfiles[item])
	}
	ok <- batch(items, zipfiles = zipfiles, zimfiles = zimfiles,
		verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(zipfiles),
			" ZIP files were updated (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Create a .zim file
zimCreate <- function (zimfile, template = NULL, edit = TRUE,
editor = getOption("fileEditor"), wait = FALSE)
{
	## Create a .zim file from a template and edit it
	if (missing(zimfile) || is.null(zimfile) || zimfile == "") {
		zimfile <- dlgInput("Give a name for the new ZIM file:",
			title = "ZIM file creation", default = "myfile.zim")$res
		if (!length(zimfile)) return(invisible(FALSE))
		if (!hasExtension(zimfile, "zim"))
			zimfile <- paste(zimfile, ".zim", sep = "")
	}
	## If the file exists, edit existing version instead
    if (file.exists(zimfile))
		if (isTRUE(as.logical(edit))) {
			return(zimEdit(zimfile, editor = editor, wait = wait))
		} else return(invisible(TRUE))
	## Look for the template
	if (!length(template))
		template <- file.path(getOption("ZITemplates"), "default.zim")
	if (!isZim(template)) return(invisible(FALSE))
	## Copy the template into the new file
	file.copy(template, zimfile)
	
	## Possibly edit this new file
	if (isTRUE(as.logical(edit))) {
		return(zimEdit(zimfile, editor = editor, wait = wait))
	} else return(invisible(TRUE))
}
## Edit a .zim file
zimEdit <- function (zimfile, editor = getOption("fileEditor"), wait = FALSE,
...)
{
	if (missing(zimfile) || !length(zimfile) || zimfile == "") {
		zimfile <- selectFile("Zim")
		if (zimfile == "") return(invisible(FALSE))
	} else if (!isZim(zimfile)) return(invisible(FALSE))
	fileEdit(zimfile, editor = editor, wait = wait, ...)
}
## Create a dat1.zim file by pooling lst and results.csv tables
zimDatMakeFlowCAM <- function (zimfile)
{
	## Check argument
	if (length(zimfile) != 1 || !is.character(zimfile)) {
		warning("you must select one ZIM file")
		return(invisible(FALSE))
	}
	if (!checkFileExists(zimfile, extension = "zim", force.file = TRUE))
		return(invisible(FALSE))
	message("compiling measurements and '", basename(zimfile), "'")
	
	## Dir containing the .zim file
	zidir <- dirname(zimfile)
	## Read list file
	## Read visual spreadsheet data (from the FlowCAM)
	#visdata <- .lstRead(file.path(zidir,
	#	paste(basename(zidir), "lst", sep = ".")), skip = 2)
	lstfile <- sub("\\.zim$", ".lst", zimfile)
    visdata <- readFlowCAMlst(lstfile, skip = 2, read.ctx = FALSE)
	
	## Read ImageJ results (from FITVis)
	fitdata <- read.table(file.path(zidir, "results.csv"),
		sep = ",", header = TRUE, dec = ".")
	
	## Create a General table of mesurements
	alldata <- cbind(visdata, fitdata)
	
	## Add Label columns
	sample <- sub("\\.zim$", "", basename(zimfile))
	alldata$Label <- rep(sample, nrow(alldata))
	## Transform Id column in !Item column
	names(alldata)[grep("Id", names(alldata))] <- "!Item"
	## Select only useful columns
	alldata$FIT_Filename <- NULL
	
	## Create _dat1.zim file
	zidatfile <- file.path(zidir, sample,
		paste(sample, "dat1.zim", sep = "_"))
	if (!file.exists(dirname(zidatfile))) {
		warning("directory ", dirname(zidatfile), " does not exist")
		return(invisible(FALSE))
	}
	file.copy(from = zimfile, to = zidatfile, overwrite = FALSE)
	
	## Add table of measurements at the end
	cat("\n[Data]\n", file = zidatfile, append = TRUE)
	suppressWarnings(write.table(alldata, file = zidatfile, append = TRUE,
		quote = FALSE, sep = "\t", col.names = TRUE, row.names = FALSE,
		dec = ".")) # In R 3.0.2: "appending column names to file" warning
	
	invisible(TRUE)
}
## Create several dat1.zim files
zimDatMakeFlowCAMAll <- function (path = ".", zimfiles = NULL)
{
	## First, switch to that directory
	if (!checkDirExists(path)) return(invisible(FALSE))
	
	if (!length(zimfiles)) { # Compute them from path
		zimfiles <- dir(path, pattern = extensionPattern("zim"),
			full.names = TRUE, all.files = FALSE, recursive = TRUE)
	}	
	## Check at least one .zim file is found or provided
	if (!length(zimfiles)) {
		warning("you must select a dir containing ZIM file(s)")
		return(invisible(FALSE))
	}
	
	## Batch process creation of all dat1.zim files
	message("Creating _dat1.zim files...")
	flush.console()
	ok <- batch(zimfiles, zimDatMakeFlowCAM, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(zimfiles),
			" files were completed (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
## FlowCAM special treatment because the plugin doesn't export dat1.zim!
## Read list file
## TODO: eliminate this. Code is now in readFlowCAMlst() function!
#.lstRead <- function (lstfile, skip = 2)
#{
#	## Determine the version of the FlowCAM
#	ncol <- length(read.table(lstfile, header = FALSE, sep = ":", dec = ".",
#		skip = skip, nrows = 1))
#	if (ncol <= 44) {
#		## FlowCAM II with 44 columns
#		## Read the table
#		tab <- read.table(lstfile, header = FALSE, sep = ":", dec = '.',
#			col.names = c("Id", "FIT_Cal_Const", "FIT_Raw_Area",
#				"FIT_Raw_Feret_Max", "FIT_Raw_Feret_Min", "FIT_Raw_Feret_Mean",
#				"FIT_Raw_Perim", "FIT_Raw_Convex_Perim", "FIT_Area_ABD",
#				"FIT_Diameter_ABD", "FIT_Length", "FIT_Width",
#				"FIT_Diameter_ESD", "FIT_Perimeter", "FIT_Convex_Perimeter",
#				"FIT_Intensity", "FIT_Sigma_Intensity", "FIT_Compactness",
#				"FIT_Elongation", "FIT_Sum_Intensity", "FIT_Roughness",
#				"FIT_Feret_Max_Angle", "FIT_Avg_Red", "FIT_Avg_Green",
#				"FIT_Avg_Blue", "FIT_PPC", "FIT_Ch1_Peak", "FIT_Ch1_TOF",
#				"FIT_Ch2_Peak", "FIT_Ch2_TOF", "FIT_Ch3_Peak", "FIT_Ch3_TOF",
#				"FIT_Ch4_Peak", "FIT_Ch4_TOF", "FIT_Filename", "FIT_SaveX",
#				"FIT_SaveY", "FIT_PixelW", "FIT_PixelH", "FIT_CaptureX",
#				"FIT_CaptureY", "FIT_High_U32", "FIT_Low_U32", "FIT_Total"),
#			skip = skip)
#		## Add columns present in list files from FlowCAM III
#		tab$FIT_Feret_Min_Angle <- NA
#		tab$FIT_Edge_Gradient <- NA
#		tab$FIT_Timestamp1 <- NA
#		tab$FIT_Timestamp2 <- NA
#		tab$FIT_Source_Image <- NA
#		tab$FIT_Calibration_Image <- NA
#		tab$FIT_Ch2_Ch1_Ratio <- tab$FIT_Ch2_Peak / tab$FIT_Ch1_Peak
#		## New variables calc (present in dataexport.csv from the FlowCAM)
#		tab$FIT_Volume_ABD <- (4/3) * pi * (tab$FIT_Diameter_ABD/2)^3
#		tab$FIT_Volume_ESD <- (4/3) * pi * (tab$FIT_Diameter_ESD/2)^3
#		tab$FIT_Aspect_Ratio <- tab$FIT_Width / tab$FIT_Length
#		tab$FIT_Transparency <- 1 - (tab$FIT_Diameter_ABD/tab$FIT_Diameter_ESD)
#		tab$FIT_Red_Green_Ratio <- tab$FIT_Avg_Red / tab$FIT_Avg_Green
#		tab$FIT_Blue_Green_Ratio <- tab$FIT_Avg_Blue / tab$FIT_Avg_Green
#		tab$FIT_Red_Blue_Ratio <- tab$FIT_Avg_Red / tab$FIT_Avg_Blue
#	} else { # FlowCAM III with 47 columns
#		## Read the table
#		tab <- read.table(lstfile, header = FALSE, sep = ":", dec = '.',
#			col.names = c("Id", "FIT_Cal_Const", "FIT_Raw_Area",
#				"FIT_Raw_Feret_Max", "FIT_Raw_Feret_Min", "FIT_Raw_Feret_Mean",
#				"FIT_Raw_Perim", "FIT_Raw_Convex_Perim", "FIT_Area_ABD",
#				"FIT_Diameter_ABD", "FIT_Length", "FIT_Width",
#				"FIT_Diameter_ESD", "FIT_Perimeter", "FIT_Convex_Perimeter",
#				"FIT_Intensity", "FIT_Sigma_Intensity", "FIT_Compactness",
#				"FIT_Elongation", "FIT_Sum_Intensity", "FIT_Roughness",
#				"FIT_Feret_Max_Angle", "FIT_Feret_Min_Angle", "FIT_Avg_Red",
#				"FIT_Avg_Green", "FIT_Avg_Blue", "FIT_PPC", "FIT_Ch1_Peak",
#				"FIT_Ch1_TOF", "FIT_Ch2_Peak", "FIT_Ch2_TOF", "FIT_Ch3_Peak",
#				"FIT_Ch3_TOF", "FIT_Ch4_Peak", "FIT_Ch4_TOF", "FIT_Filename",
#				"FIT_SaveX", "FIT_SaveY", "FIT_PixelW", "FIT_PixelH",
#				"FIT_CaptureX", "FIT_CaptureY", "FIT_Edge_Gradient",
#				"FIT_Timestamp1", "FIT_Timestamp2", "FIT_Source_Image",
#				"FIT_Calibration_Image"),
#			skip = skip)
#		## Add columns present in list files from FlowCAM II
#		tab$FIT_High_U32 <- NA
#		tab$FIT_Low_U32 <- NA
#		tab$FIT_Total <- NA
#		## New variables calcul (present in dataexport.csv from the FlowCAM)
#		tab$FIT_Volume_ABD <- (4/3) * pi * (tab$FIT_Diameter_ABD/2)^3
#		tab$FIT_Volume_ESD <- (4/3) * pi * (tab$FIT_Diameter_ESD/2)^3
#		tab$FIT_Aspect_Ratio <- tab$FIT_Width / tab$FIT_Length
#		tab$FIT_Transparency <- 1 - (tab$FIT_Diameter_ABD/tab$FIT_Diameter_ESD)
#		tab$FIT_Red_Green_Ratio <- tab$FIT_Avg_Red / tab$FIT_Avg_Green
#		tab$FIT_Blue_Green_Ratio <- tab$FIT_Avg_Blue / tab$FIT_Avg_Green
#		tab$FIT_Red_Blue_Ratio <- tab$FIT_Avg_Red / tab$FIT_Avg_Blue
#		tab$FIT_Ch2_Ch1_Ratio <- tab$FIT_Ch2_Peak / tab$FIT_Ch1_Peak
#	}
#	tab
#}
## Read context file
## TODO: avoid duplicated code between versions
.ctxRead <- function(ctxfile, fill = FALSE, largest = FALSE, vignettes = TRUE,
scalebar = TRUE, enhance = FALSE, outline = FALSE, masks = FALSE,
verbose = TRUE)
{
	## Check arguments
	if (length(ctxfile) != 1 || !is.character(ctxfile)) {
		warning("you must select one FlowCAM context (.ctx) file")
		return(NULL)
	}
	if (!checkFileExists(ctxfile, extension = "ctx", force.file = TRUE))
		return(NULL)
	## Extract information from context file
	message("reading data from FlowCAM context file '", basename(ctxfile), "'")
	## Scan the ctx file
	ctxdata <- scan(ctxfile, character(), sep = "\t", skip = 0,
		blank.lines.skip = FALSE, flush = TRUE, quiet = TRUE, comment.char = "")
	## Read version of Visual SpreadSheet
	ImageLine <- grep("^SoftwareVersion", ctxdata)
	SoftwareVersion <- as.character(sub("[ ]*$", "",
		sub("^SoftwareVersion[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
	Version <- sub("...$", "", SoftwareVersion)
	## Read right parameters
	if (Version == "1.5") {
		## Read recalibration duration
		ImageLine <- grep("^SaveIntervalMinutes", ctxdata)
		interval <- as.numeric(sub("[ ]*$", "",
			sub("^SaveIntervalMinutes[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read pixel size
		ImageLine <- grep("^CalibrationConstant", ctxdata)
		pixelsize <- as.numeric(sub("[ ]*$", "",
			sub("^CalibrationConstant[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read minimal size
		ImageLine <- grep("^MinESD", ctxdata)
		minsize <- as.numeric(sub("[ ]*$", "",
			sub("^MinESD[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read maximal size
		ImageLine <- grep("^MaxESD", ctxdata)
		maxsize <- as.numeric(sub("[ ]*$", "",
			sub("^MaxESD[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read the kind of segmentation used
		ImageLine <- grep("^CaptureDarkOrLightPixels", ctxdata)
		DarkOrLight <- as.numeric(sub("[ ]*$", "",
			sub("^CaptureDarkOrLightPixels[ ]*[=][ ]*", "",
			ctxdata[ImageLine[1]])))
		if (DarkOrLight == 0) {
			use <- "dark"
		} else if (DarkOrLight == 1) {
			use <- "light"	
		} else use <- "both"
		## Read segmentation threshold
		ImageLine <- grep("^Threshold", ctxdata)
		thresholddark <- as.numeric(sub("[ ]*$", "",
			sub("^Threshold[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		thresholdlight <- as.numeric(sub("[ ]*$", "",
			sub("^Threshold[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
    
		## Path of the export of data
		select <- file.path(basename(dirname(ctxfile)), "data_export.csv")
		## Sample name
		Sample_Name <- basename(dirname(ctxfile))
		## Read Fluo information
		ImageLine <- grep("^Ch1Gain", ctxdata)
		Gain_Fluo_Ch1 <- as.numeric(sub("[ ]*$", "",
			sub("^Ch1Gain[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^Ch1Threshold", ctxdata)
		Threshold_Fluo_Ch1 <- as.numeric(sub("[ ]*$", "",
			sub("^Ch1Threshold[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^Ch2Gain", ctxdata)
		Gain_Fluo_Ch2 <- as.numeric(sub("[ ]*$", "",
			sub("^Ch2Gain[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^Ch2Threshold", ctxdata)
		Threshold_Fluo_Ch2 <- as.numeric(sub("[ ]*$", "",
			sub("^Ch2Threshold[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read information about FlowCell
		ImageLine <- grep("^FlowCellDepth", ctxdata)
		FlowCell <- as.numeric(sub("[ ]*$", "",
			sub("^FlowCellDepth[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Distance to nearest
		ImageLine <- grep("^DistanceToNeighbor", ctxdata)
		Dist_To_Nearest <- as.numeric(sub("[ ]*$", "",
			sub("^DistanceToNeighbor[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Calculation of volume analyzed
		## Number of raw images analyzed
		ImageLine <- grep("^RawImageTotal", ctxdata)
		Raw <- as.numeric(sub("[ ]*$", "",
			sub("^RawImageTotal[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Area analysed (Length * Width) in pixels
		ImageLine <- grep("^AcceptableLeft", ctxdata)
		Left <- as.numeric(sub("[ ]*$", "",
			sub("^AcceptableLeft[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^AcceptableRight", ctxdata)
		Right <- as.numeric(sub("[ ]*$", "",
			sub("^AcceptableRight[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^AcceptableTop", ctxdata)
		Top <- as.numeric(sub("[ ]*$", "",
			sub("^AcceptableTop[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^AcceptableBottom", ctxdata)
		Bottom <- as.numeric(sub("[ ]*$", "",
			sub("^AcceptableBottom[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Calculation of area of one image in
		## micron <= (R-L * PixelSize) * (B-T * PixelSize)
		Area <- ((Right - Left) * pixelsize) * ((Bottom - Top) * pixelsize)
		## Total volume analysed (cm^3 = ml)
		VolumeDigitized <- (Area/(10^8)) * (FlowCell/10000) * Raw
		## New fields in the ctx FlowCAM III
		Threshold_Scatter <- NA
		VolumeDigitized_VIS <- NA
		Dilution_VIS <- NA
		AutoImageRate <- NA
		FlashDuration <- NA
	} else if (Version == "2.1" || Version == "2.2") {
		## Fields not present in new version
		Gain_Fluo_Ch1 <- NA
		Gain_Fluo_Ch2 <- NA
		## Read recalibration duration
		ImageLine <- grep("^RecalibrationIntervalMinutes", ctxdata)
		interval <- as.numeric(sub("[ ]*$", "",
			sub("^RecalibrationIntervalMinutes[ ]*[=][ ]*", "",
			ctxdata[ImageLine[1]])))
		## Read pixel size
		ImageLine <- grep("^CalibrationConstant", ctxdata)
		pixelsize <- as.numeric(sub("[ ]*$", "",
			sub("^CalibrationConstant[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read minimal size
		ImageLine <- grep("^MinESD", ctxdata)
		minsize <- as.numeric(sub("[ ]*$", "",
			sub("^MinESD[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read maximal size
		ImageLine <- grep("^MaxESD", ctxdata)
		maxsize <- as.numeric(sub("[ ]*$", "",
			sub("^MaxESD[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read the kind of segmentation used
		ImageLine <- grep("^CaptureDarkOrLightPixels", ctxdata)
		CaptureDarkOrLightPixels <- as.numeric(sub("[ ]*$", "",
			sub("^CaptureDarkOrLightPixels[ ]*[=][ ]*", "",
			ctxdata[ImageLine[1]])))
		if (CaptureDarkOrLightPixels == 0) {
			use <- "dark"
		} else if (CaptureDarkOrLightPixels == 1) {
			use <- "light"
		} else use <- "both"
		## Read segmentation threshold
		ImageLine <- grep("^ThresholdDark", ctxdata)
		thresholddark <- as.numeric(sub("[ ]*$", "",
			sub("^ThresholdDark[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^ThresholdLight", ctxdata)
		thresholdlight <- as.numeric(sub("[ ]*$", "",
			sub("^ThresholdLight[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Path of the export of data
		ImageLine <- grep("^AutoExportList", ctxdata)
		AutoExportList <- as.numeric(sub("[ ]*$", "",
			sub("^AutoExportList[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		if (AutoExportList == 1) {
			select <- file.path(basename(dirname(ctxfile)),
				paste(basename(dirname(ctxfile)),"csv", sep = "."))
		} else {
			select <- file.path(basename(dirname(ctxfile)), "data_export.csv")
		}
		## Sample name
		Sample_Name <- basename(dirname(ctxfile))
		## Read Fluo information
		ImageLine <- grep("^Ch1Threshold", ctxdata)
		Threshold_Fluo_Ch1 <- as.numeric(sub("[ ]*$", "",
			sub("^Ch1Threshold[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^Ch2Threshold", ctxdata)
		Threshold_Fluo_Ch2 <- as.numeric(sub("[ ]*$", "",
			sub("^Ch2Threshold[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read Scatter information
		ImageLine <- grep("^ScatterThreshold", ctxdata)
		Threshold_Scatter <- as.numeric(sub("[ ]*$", "",
			sub("^ScatterThreshold[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Read information about FlowCell
		ImageLine <- grep("^FlowCellDepth", ctxdata)
		FlowCell <- as.numeric(sub("[ ]*$", "",
			sub("^FlowCellDepth[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Distance to nearest
		ImageLine <- grep("^DistanceToNeighbor", ctxdata)
		Dist_To_Nearest <- as.numeric(sub("[ ]*$", "",
			sub("^DistanceToNeighbor[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Calculation of volume analyzed
		## Number of raw images analyzed
		ImageLine <- grep("^RawImageTotal", ctxdata)
		Raw <- as.numeric(sub("[ ]*$", "",
			sub("^RawImageTotal[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Area analysed (Length * Width) in pixels
		ImageLine <- grep("^AcceptableLeft", ctxdata)
		Left <- as.numeric(sub("[ ]*$", "",
			sub("^AcceptableLeft[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^AcceptableRight", ctxdata)
		Right <- as.numeric(sub("[ ]*$", "",
			sub("^AcceptableRight[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^AcceptableTop", ctxdata)
		Top <- as.numeric(sub("[ ]*$", "",
			sub("^AcceptableTop[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		ImageLine <- grep("^AcceptableBottom", ctxdata)
		Bottom <- as.numeric(sub("[ ]*$", "",
			sub("^AcceptableBottom[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Calculation of area of one image in microns
		## <= (R-L * PixelSize) * (B-T * PixelSize)
		Area <- ((Right - Left) * pixelsize) * ((Bottom - Top) * pixelsize)
		## Total volume analysed (cm^3 = ml)
		VolumeDigitized <- (Area/(10^8)) * (FlowCell/10000) * Raw
		## Total volume analyzed calculated by Visual Spreadsheet
		ImageLine <- grep("^TotalVolumeML", ctxdata)
		VolumeDigitized_VIS <- as.numeric(sub("[ ]*$", "",
			sub("^TotalVolumeML[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Dilution
		ImageLine <- grep("^VolumeCalibrationFactor", ctxdata)
		Dilution_VIS <- as.numeric(sub("[ ]*$", "",
			sub("^VolumeCalibrationFactor[ ]*[=][ ]*", "",
			ctxdata[ImageLine[1]])))
		## AutoImage
		ImageLine <- grep("^AutoImageRate", ctxdata)
		AutoImageRate <- as.numeric(sub("[ ]*$", "",
		sub("^AutoImageRate[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
		## Flash Duration
		ImageLine <- grep("^FlashDuration", ctxdata)
		FlashDuration <- as.numeric(sub("[ ]*$", "",
		sub("^FlashDuration[ ]*[=][ ]*", "", ctxdata[ImageLine[1]])))
	}
	## Create the table and return it
	data.frame(select, interval, pixelsize, minsize, maxsize, use,
		thresholddark, thresholdlight, fill = fill, largest = largest,
		vignettes = vignettes, scalebar = scalebar, enhance = enhance,
		outline = outline, masks = masks, verbose = verbose, Sample_Name,
		FlowCell, Gain_Fluo_Ch1, Threshold_Fluo_Ch1, Gain_Fluo_Ch2,
		Threshold_Fluo_Ch2, Threshold_Scatter, Dist_To_Nearest, VolumeDigitized,
		VolumeDigitized_VIS, SoftwareVersion, Dilution_VIS, AutoImageRate,
		FlashDuration)
}
## Read several ctx files
.ctxReadAll <- function (path = ".", ctxfiles = NULL, fill = FALSE,
largest = FALSE, vignettes = TRUE, scalebar = TRUE, enhance = FALSE,
outline = FALSE, masks = FALSE, verbose = TRUE)
{
	## First, switch to that directory
	if (!checkDirExists(path)) return(NULL)
	
	if (!length(ctxfiles)) { # Compute them from path
		ctxfiles <- dir(path, pattern = extensionPattern("ctx"),
			full.names = TRUE, all.files = FALSE, recursive = TRUE)
	}	
	## Check at least one .ctx file is found or provided
	if (!length(ctxfiles)) {
		warning("you must select a directory containing FlowCAM data")
		return(NULL)
	}
	
	## Read first ctx file
	res <- .ctxRead(ctxfiles[1], fill = FALSE, largest = FALSE, vignettes = TRUE,
		scalebar = TRUE, enhance = FALSE, outline = FALSE, masks = FALSE,
		verbose = TRUE)
	## Make a loop to read each one
	nfiles <- length(ctxfiles)
	if (nfiles > 1) for (i in 2:nfiles)
		res <- rbind(res, .ctxRead(ctxfiles[i], fill = fill,
			largest = largest, vignettes = vignettes, scalebar = scalebar,
			enhance = enhance, outline = outline, masks = masks,
			verbose = verbose))
	res
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/zim.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Zip a .tif image and embed the corresponding .zim file as comment
## This requires the 'zip' program!
zipImg <- function (imagefile, zimfile = NULL, check.zim = TRUE,
replace = FALSE, delete.source = FALSE)
{
	## We need to switch to the image dir for correct path in the zip file
	imagefile <- as.character(imagefile)
	if (length(imagefile) != 1) {
		warning("you must provide exactly one image file name")
		return(invisible(FALSE))
	}
	## Check if imagefile exists
	if (!checkFileExists(imagefile, force.file = TRUE,
		message = "%s doesn't exist, or is a directory!"))
		return(invisible(FALSE))
		
	## Switch directory to the one of the image
	initdir <- setwd(dirname(normalizePath(imagefile)))
	on.exit(setwd(initdir))
	## Simplify image file path, since we are now in the right dir
	imagefile <- basename(imagefile)
	## Is there an associated .zim file?
	if (!length(zimfile)) {
		fraction <- sampleInfo(imagefile, "fraction",
			ext = extensionPattern("tif"))
		zimfile <- paste(fraction, "zim", sep = ".")
	} else {
		zimfile <- as.character(zimfile)
		if (length(zimfile) > 1) {
			warning("you cannot provide more than one ZIM file")
			return(invisible(FALSE))
		}
	}
	### TODO: the zim file can be other parts of it , like Sample+A1.zim,
	###       instead of Sample+A.zim!
	if (!checkFileExists(zimfile, force.file = TRUE,
		message = "%s doesn't exist; cannot process the corresponding image"))
		return(invisible(FALSE))
	## Verify the content of the .zim file (returns -1 in case of error)
	if (isTRUE(as.logical(check.zim)) && zimVerify(zimfile) < 0)
		return(invisible(FALSE))
	## Zip the image in the '_raw' subdir and add the information from the .zim
	## file as comment
	message("Zipping image '", imagefile, "' ...")
	zipfile <- paste(noExtension(imagefile), "zip", sep = ".")
	zipfile <- file.path(".", "_raw", zipfile)
	## Make sure that "_raw" subdir exists
	if (!forceDirCreate("_raw")) return(invisible(FALSE))
	## Copy or move the image to a .zip compressed file
	if (isTRUE(as.logical(replace)) && file.exists(zipfile))
		unlink(zipfile)
	
	## zip() function returns status zero if everything is fine
	if (zip(zipfile, imagefile, flags = "-rq9X") != 0) {
		warning("error while zipping '", basename(imagefile), "'")
		return(invisible(FALSE))
	}
	## Add comment to the zip file
	## Note: the .zim file is never deleted, because it can be used for other
	## purposes!
	## Note2: except for a warning, we don't care about not adding .zim data
	if (!zipNoteAdd(zipfile, zimfile)) {}
	## Do we delete source image? (not much a problem if it fails too)
	if (isTRUE(as.logical(delete.source))) unlink(imagefile)
	## Invisibly indicate success
	invisible(TRUE)
}
## Compress all .tif images in the corresponding directory
## (at least those with an associated .zim file)
zipImgAll <- function (path = ".", images = NULL, check.zim = TRUE,
replace = FALSE, delete.source = FALSE)
{
	## First, switch to that directory
	if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- "."	# Indicate we are now in the right path
	## Get the list of images to process
	if (!length(images))	# Compute them from path
		images <- dir(path, pattern = extensionPattern("tif")) # All .tif files
	## If there is no images in this dir, exit now
	if (!length(images)) {
		warning("There is no images to process in ", getwd())
		return(invisible(FALSE))	
	}
	## Make sure there is no path associated
	if (!all(images == basename(images))) {
		warning("You cannot provide paths for 'images', just file names")
		return(invisible(FALSE))
	}
	## Look at associated .zim files
	zimfiles <- paste(sampleInfo(images, "fraction",
		ext = extensionPattern("tif") ), ".zim", sep = "")
	keep <- file.exists(zimfiles)
	if (!any(keep)) {
		warning("You must create ZIM files first (ZooImage Metadata)!")
		return(invisible(FALSE))	
	}
	if (!all(keep)) {
    	warning(sum(!keep), " on ", length(keep),
			" images have no ZIM file associated and will not be processed!")
		images <- images[keep]
		zimfiles <- zimfiles[keep]
	}
	## Check the zim files
	ok <- TRUE
	if (isTRUE(as.logical(check.zim))) {
		message("Verification of ZIM files...")
		flush.console()
		zfiles <- unique(zimfiles)
		zimCheck <- function (zim) {
			message("Verifying '", basename(zim), "' ...")
			zimVerify(zim) >= 0
		}
		ok <- batch(zfiles, zimCheck, verbose = FALSE)
	}
	if (ok) {
		message("-- Done! --")
	} else {
		warning("corrupted ZIM file(s) found, compression not started!")
		return(invisible(FALSE))
	}
	## If everything is ok compress these files
	message("Compression of images...")
	flush.console()
	ok <- batch(images, zipImg, check.zim = FALSE,
		replace = replace, delete.source = delete.source, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(images),
			" images were compressed (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
## Uncompress .tif image and .zim file from a .zip archive file
unzipImg <- function (zipfile, replace = FALSE, delete.source = FALSE)
{
	# Extract .zim file, .tif file or both from a .zip archive
	zipfile <- as.character(zipfile)
	if (length(zipfile) != 1) {
		warning("you must provide one file path in 'zipfile'")
		return(invisible(FALSE))
	}
	
	## Check if zipfile exists
	if (!checkFileExists(zipfile, force.file = TRUE,
		message = "%s doesn't exist, or is a directory!"))
		return(invisible(FALSE))
	
	## Special case: if dir is _raw, then extract into parent dir (..)
	isRawDir <- basename(dirname(normalizePath(zipfile))) == "_raw"
	
	## Switch directory to the one of the zip archive
	initdir <- setwd(dirname(normalizePath(zipfile)))
	on.exit(setwd(initdir))
	## Simplify zip file path, since we are now in the right dir
	zipfile <- basename(zipfile)
	
	## Determine the name of the corresponding .zim file
	fraction <- sampleInfo(zipfile, "fraction",
		ext = extensionPattern("zip"))
	zimfile <- paste(fraction, "zim", sep = ".")
	if (isRawDir) zimfile <- file.path("..", zimfile)
	
	message("Unzipping '", zipfile, "' ...")
	
	## Do we replace existing .zim files?
	replace <- isTRUE(as.logical(replace))
	if (replace || !file.exists(zimfile)) {
		## Extract data from the zimfile
		if (!length(zipNoteGet(zipfile, zimfile)))
			return(invisible(FALSE))
	}
	
	## Unzip the .tif image
	if (isRawDir) exdir <- ".." else exdir <- "."
	if (!length(tryCatch(unzip(zipfile, overwrite = replace, junkpaths = TRUE,
		exdir = exdir), error = function (e) warning(e),
			warning = function (w) return()))) {
		message("    ... not done!")
		return(invisible(FALSE))
	}
	## Do we delete zip archive? (not much a problem if it fails here)
	if (isTRUE(as.logical(delete.source))) unlink(zipfile)
	## Invisibly indicate success
	invisible(TRUE)
}
## Extract all .zim, .tif or both from .zip files
unzipImgAll <- function (path = ".", zipfiles = NULL, replace = FALSE,
delete.source = FALSE)
{
	## First, switch to that directory
	if (!checkDirExists(path)) return(invisible(FALSE))
	initdir <- setwd(path)
	on.exit(setwd(initdir))
	path <- "."	# Indicate we are now in the right path
	## Get the list of zip archives to process
	if (!length(zipfiles))	# Compute them from path
		zipfiles <- dir(path, pattern = extensionPattern("zip")) # All .zip
	## If there is no .zip files in this dir, exit now
	if (!length(zipfiles)) {
		warning("There is no ZIP archives to process in ", getwd())
		return(invisible(FALSE))	
	}
	## Make sure there is no path associated
	if (!all(zipfiles == basename(zipfiles))) {
		warning("You cannot provide paths for 'zipfiles', just file names")
		return(invisible(FALSE))
	}
	## Uncompress these files
	message("Decompression of .zip archives...")
	flush.console()
	ok <- batch(zipfiles, unzipImg, replace = replace,
		delete.source = delete.source, verbose = FALSE)
	if (!ok) {
		warning(sum(attr(ok, "ok")), "/", length(zipfiles),
			" archives were uncompressed (see .last.batch)")
		invisible(FALSE)
	} else {
		message("-- Done! --")
		invisible(TRUE)
	}
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/zip.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Read data from a .zis file
zisRead <- function (zisfile = "Description.zis",
expected.sections = c("Description", "Series", "Cruises", "Stations", "Samples"))
{
    if (!checkFileExists(zisfile, extension = "zis", force.file = TRUE))
		return(NULL)
	if (!checkFirstLine(zisfile)) return(NULL)
	rl <- readLines(zisfile,  encoding = "UTF-8")
	if (!length(rl) > 1) {
		warning("the file is empty or corrupted!")
		return(NULL)
	}
	positions <- grep("^[[].*[]]", rl)
	sections <- sub("^[[](.*)[]]", "\\1", rl[positions])
	if (!all(expected.sections %in% sections)) {
		warning("incorrect ZIS file; it does not contain all expected sections")
		return(NULL)
	}
	start <- positions + 1
	end <- c(tail(positions, -1) - 2, length(rl))
	readData <- lapply(1:length(start), function (i) {
		if (sections[i] == "Description") {
			rx <- "^(.*?)=(.*)$"
			txt <- rl[start[i] : end[i]]
			variables <- sub(rx, "\\1", txt)
			values <- sub(rx, "\\2", txt)
			out <- data.frame(matrix(values, nrow = 1))
			names(out) <- variables
		} else {
			con <- textConnection(rl[start[i] : end[i]])
			on.exit(close(con))
			out <- read.table(con, sep = "\t", header = TRUE, dec = getDec(),
				blank.lines.skip = FALSE)
			names(out)[1] <- sub("^X\\.", "", names(out)[1])
			out <- out[, !grepl("^X\\.[0-9]+", names(out))]
		}
		return(out)
	})
	names(readData) <- sections
	Samples <- readData[["Samples"]]
	# I may have <<<DATE>>> indicator too!
    if (length(Samples$Date) == 1 && Samples$Date != "<<<DATE>>>") {
         res <- try(Samples$Date <- as.Date(Samples$Date), silent = TRUE)
         if (inherits(res, "try-error"))
             warning(res)
    }
	Series <- readData[["Series"]]
	Cruises <- readData[["Cruises"]]
	res <- try(Cruises$Start <- as.Date(Cruises$Start), silent = TRUE)
	if (inherits(res, "try-error")) {
	  warning("Cruise start is not interpretable as a valid date")
	  Cruises$Start <- as.Date(NA)
	}
	res <- try(Cruises$End <- as.Date(Cruises$End), silent = TRUE)
	if (inherits(res, "try-error")) {
	  warning("Cruise end is not interpretable as a valid date")
	  Cruises$End <- as.Date(NA)
	}
	Stations <- readData[["Stations"]]
	res <- try(Stations$Start <- as.Date(Stations$Start), silent = TRUE)
	if (inherits(res, "try-error")) {
	  warning("Station start is not interpretable as a valid date")
	  Stations$Start <- as.Date(NA)
	}
	res <- try(Stations$End <- as.Date(Stations$End), silent = TRUE)
	if (inherits(res, "try-error")) {
	  warning("Station end is not interpretable as a valid date")
	  Stations$End <- as.Date(NA)
	}
	Description <- readData[["Description"]]
	## Combine all this in a data frame + metadata
	structure(Samples,
		metadata =  list(Desc = Description, Series = Series, Cruises = Cruises,
		Stations = Stations), class = c("ZIDesc", "data.frame"))
}
## Create a .zis file
zisCreate <- function (zisfile, template = NULL,
edit = TRUE, editor = getOption("fileEditor"), wait = FALSE)
{
	## Use a ui to get the file name
	if (missing(zisfile) || !length(zisfile) || zisfile == "") {
		zisfile <- dlgInput("Give a name for the new ZIS file:",
			title = "ZIS file creation", default = "Description.zis")$res
		if (!length(zisfile)) return(invisible(FALSE))
		if (!hasExtension(zisfile, "zis"))
			zisfile <- paste(zisfile, ".zis", sep = "")
	}
    ## If the file already exists, edit current version
	if (file.exists(zisfile))
		if (isTRUE(edit)) {
			return(zisEdit(zisfile, editor = editor, wait = wait))
		} else return(invisible(TRUE))
	## Look for the template
	if (is.null(template))
		template <- file.path(getOption("ZITemplates"), "Description.zis")
	if (!checkFileExists(template, "template '%s' not found", extension = "zis"))
		return(invisible(FALSE))
	## Copy the template into the new file
	file.copy(template, zisfile)
	## Possibly edit this new file
	if (isTRUE(edit)) {
		return(zisEdit(zisfile, editor = editor, wait = wait))
	} else return(invisible(TRUE))
}
## Edit a .zis file
zisEdit <- function (zisfile, editor = getOption("fileEditor"), wait = FALSE, ...)
{
    if (missing(zisfile) || !length(zisfile) || zisfile == "") {
		zisfile <- selectFile("Zis")
		if (zisfile == "") return(invisible(FALSE))
	} else if (!checkFileExists(zisfile,
		message = "the file '%s' is not found!", extension = "zis"))
		return(invisible(FALSE))
	fileEdit(zisfile, editor = editor, wait = wait, ...)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/zis.R 
 | 
					
	## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage.  If not, see <http://www.gnu.org/licenses/>.
## Loading and unloading ZooImage
.onLoad <- function (libname, pkgname)
{
	if (!interactive()) options(ZIAssistant  = FALSE)
	## Use the SciViews style for dialog boxes
	options(guiStyle = "SciViews")
	## Did we redefined the ZooImage config?
	redef <- getOption("ZI.redefine")
	if (is.null(redef)) redef <- FALSE else redef <- TRUE
	options(ZI.redefine = NULL)
	## Create some strings in TempEnv
	ZIversion <- packageDescription("zooimage", fields = "Version")
	assignTemp("ZIversion", ZIversion)
	ZIname <- getTemp("ZIname")
	if (!redef || is.null(ZIname)) ZIname <- "ZooImage"
	assignTemp("ZIname", ZIname)
	assignTemp("ZIverstring", paste(ZIname, "version", ZIversion))
	ZIetc <- getTemp("ZIetc")
	if (!redef || is.null(ZIetc))
		ZIetc <- system.file("etc", package = "zooimage")
	assignTemp("ZIetc", ZIetc)
	ZIgui <- getTemp("ZIgui")
	if (!redef || is.null(ZIgui))
		ZIgui <- system.file("gui", package = "zooimage")
	assignTemp("ZIgui", ZIgui)
	## Windows specific things
	#if (isWin()) {
		#if (interactive()) {
		#	ZIico <- getTemp("ZIico")
		#	if (!redef || is.null(ZIgui))
		#		ZIico <- tk2ico.create(file.path(getTemp("ZIgui"),
		#			"ZooImage.ico"))
		#	assignTemp("ZIico", ZIico)
		#}
		## Make sure there is a key for ZooImage in the registry
		## PhG: what is the purpose of this code?
		#ZIkey <- "HKEY_LOCAL_MACHINE\\Software\\ZooImage"
		#res <- try(tk2reg.setkey(ZIkey), silent = TRUE)
		#assignTemp("ZIkey", ZIkey)
	#}
	## Load the various image resources
	#if (!redef && interactive()) ImgReadPackage("zooimage")
	## Load the menus
	#if (!redef && interactive()) MenuReadPackage("zooimage")
	## Possibly create the ZIguiPackage variable to indicate from where to load
	## other GUI resources
	ZIguiPackage <- getTemp("ZIguiPackage")
	if (!redef || is.null(ZIguiPackage))
		ZIguiPackage <- "zooimage"
	assignTemp("ZIguiPackage", ZIguiPackage)
	## The directory that contains binary executables
	#bindir <- system.file("bin", package = "zooimage")
	## PhG: executables are not provided anymore with zooimage (not allowed by
	## CRAN where it is distributed now), but you must install them manually
	## in a given directory...
	if (isWin()) {
		bindir <- "c:/progra~1/Zooimage/bin"
		if (!file.exists(bindir))
			bindir <- "c:/progra~2/Zooimage/bin"
		if (!file.exists(bindir)) {
			bindir <- ""
		} else options(zooimage.bindir = bindir)
	}
	## Determine where to find ImageJ
	## TODO... currently, it is in a fixed position
	## TODO: no need to ship the exe file, we can just ship a simple
	## bat file with java -jar ij.jar -ijpath=./plugins
	if (interactive()) {
		if (isWin()) {
			ImageJExe <- file.path(bindir, "Fiji.app", "ImageJ.exe")
			if (!file.exists(ImageJExe))
				ImageJExe <- file.path(bindir, "ImageJ", "ImageJ.exe")
		} else if (isMac()) {
			#ImageJExe <- "/Applications/Fiji/Fiji.app/Contents/MacOS/fiji-macosx"
			ImageJExe <- "open /Applications/Fiji/Fiji.app"
		} else {
			## TODO... Get ImageJ executable
			ImageJExe <- "fiji"
		}
		if (file.exists(ImageJExe)) options(ImageEditor = ImageJExe)
	} else options(ImageEditor = "")
	## Determine where to find XnView
	## TODO... currently, it is in a fixed position
	if (interactive()) {
		if (isWin()) {
			XnViewExe <- file.path(bindir, "XnView", "XnView.exe")
		} else if (isMac()) {
			XnViewExe <- "/Applications/Utilities/XnViewMP.app/Contents/MacOS/xnview"
		} else {
			XnViewExe <- "nautilus --geometry 600x600"
		}
		if (file.exists(XnViewExe)) options(ImageViewer = XnViewExe)
	} else options(ImageViewer = "")
	
	## Determine where to find VueScan
	## TODO... currently, it is in a fixed position
	if (interactive()) {
		if (isWin()) {
			VueScanExe <- file.path(bindir, "VueScan", "VueScan.exe")
		} else if (isMac()) {
			VueScanExe <- "/Applications/VueScan.app/Contents/MacOS/VueScan"
		} else {
			## TODO: other locations for Mac or Linux?!
			VueScanExe <- "vuescan"
		}
		if (file.exists(VueScanExe)) options(VueScan = VueScanExe)
	} else options(VueScan = "")
	
	## Under Windows, define the metadata editor
	if (interactive()) {
		if (isWin()) {
			Metaeditor <- file.path(bindir, "MetaEditor", "Sc1.exe")
		} else if (isMac()) {
			## TODO: which one to use?
			Metaeditor <- ""
		} else {
			## TODO: other locations for Linux?!
			Metaeditor <- ""
		}
		if (file.exists(Metaeditor)) options(fileEditor = Metaeditor)
	} else options(fileEditor = "")
	
	## Possibly load the ZooImage assistant
	LoadIt <- getOption("ZIAssistant")
	if (is.null(LoadIt) || LoadIt == TRUE) ZIDlg()
	## Set the default template directory
	if (is.null(getOption("ZITemplates")))
		options(ZITemplates = system.file("templates", package = "zooimage"))
	## Switch to the default directory, if defined
	defdir <- getOption("ZI.DefaultDirectory", "")
    if (defdir != "" && file.exists(defdir) && file.info(defdir)$isdir)
        setwd(defdir)
}
## Unloading ZooImage
.onUnload <- function (libpath)
{
	## Eliminate the ZooImage menu entries
	if (.Platform$GUI[1] == "Rgui") {
		try(menuDel("$ConsoleMain/ZooImage"), silent = TRUE)
		try(menuDel("$ConsolePopup/ZooImage"), silent = TRUE)
	}
	closeAssistant()
}
## R version < 2.15.0 does not have paste0 => create it here
if (compareRVersion("2.15.0") < 0) {
	paste0 <- function (..., collapse = NULL)
		paste(..., sep = "", collapse = collapse)
} 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/R/zzz.R 
 | 
					
	## Zoo/PhytoImage simplified analysis UI (run the application)
## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
## 
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
## 
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
## 
## You should have received a copy of the GNU General Public License
## along with ZooImage. If not, see <http://www.gnu.org/licenses/>.
## TODO: allow for placing samples in subdirs + use tree view 
## Get the working directory
if (!exists(".ZI"))
    stop("You must run this app from within a method script!")
inidir <- dirname(.ZI$wdir)
cat("Directory:", inidir, "\n")
## Used to print a report after exiting the shiny app
print.reportObj <- function (x, ...) {
    line <- paste0(c("\n", rep('-', getOption("width")), "\n"))
    cat(line, paste0(x, collapse = "\n"), line, sep = "")
    invisible(x)
}
## Additional functions required by the UI
## Same a headerPanel, but taking less space, using h5 instead of h1
smallHeaderPanel <- function (title, windowTitle = title) {
    tagList(tags$head(tags$title(windowTitle)), div(class = "span12", 
        style = "padding: 2px 0px;", strong(title)))
}
#smallTitlePanel <- function (title, windowTitle = title) {
#    tagList(tags$head(tags$title(windowTitle)), h5(style = "padding: 2px 0px;", 
#        title))
#}
## Define UI for default process using a config .R script in zooimage
## TODO: change the title according to actual name and version of the software
## TODO: translate UI strings (English and French interfaces)
uiTitle <- paste0("Zoo/PhytoImage version 5.4-6 (UMONS/IFREMER rephy release) - ",
    .ZI$method, " - ", .ZI$user)
### List all available methods
#Methods <- dir(file.path(inidir, "_analyses"), pattern = "\\.R$")
#if (!length(Methods)) stop("No methods defined in that directory")
### Eliminate .R
#Methods <- sub("\\.R$", "", Methods)
#Methods <- .ZI$method
### Prepare for first method
#source(paste(file.path(inidir, "_analyses", .ZI$method), "R", sep = "."), chdir = TRUE)
## List all samples currently available
listSamples <- function (path, method, unanalyzed.only = FALSE) {
    res <- dir(path)
    if (!length(res)) return(character(0))
    ## Eliminate hidden dirs and files (starting with "_")
    res <- res[substr(res, 1, 1) != "_"]
    if (!length(res)) return(character(0))
    ## Keep only dirs or .zidb files
    res <- res[grepl("\\.zidb$", res) | file.info(file.path(inidir, res))$isdir]
    if (!length(res)) return(character(0))
    ## Copy res to files, and eliminate .zidb extensions from res
    files <- rev(res)
    res <- rev(sub("\\.zidb$", "", res))
    ## Where there is a dir and a .zidb file for the same sample, eliminate dir
    keep <- !duplicated(res)
    ## Select files and dir, rereverting rev and files
    res <- rev(res[keep])
    if (!length(res)) return(character(0))
    files <- rev(files[keep])
    ## Determine which sample is imported (has a .zidb file)
    imp <- grepl("\\.zidb$", files)
    ## Determine if some of these files are already processed 
    proc <- dir(file.path(path, "_analyses", method),
        pattern = "\\_valid.RData$")
    if (length(proc)) {
        ## Keep only those items that are in res
        procsmp <- sub("_valid\\.RData$", "", proc)
        proc <- (res %in% procsmp)
    } else proc <- rep(FALSE, length(res))
    ## Create names with smp [ ]/[I]/[A]
    status <- rep("[ ]", length(res))
    status[imp] <- "[I]"
    status[proc] <- "[A]"
    nms <- paste(status, res)
  
    ## If keep unanalyzed only, select corresponding items
    #    if (isTRUE(as.logical(unanalyzed.only))) {
    #        res <- res[!proc]
    #        nms <- nms[!proc]
    #        files <- files[!proc]
    #        imp <- imp[!proc]
    #    }
  
    ## Create a list with samples, files and processed
    list(samples = res, names = nms,  files = files, imported = imp,
        analyzed = proc)
}
AllSamples <- listSamples(inidir, method = .ZI$method)
calcSample <- function (Sample, input, output, session)
{
    ## Is this sample already imported?
    ## Try to import it anyway with replace = FALSE
    if (file.exists(file.path(inidir, Sample))) {
        ## Get .lst file first
        Lst <- dir(file.path(inidir, Sample), pattern = "\\.lst$",
            full.names = TRUE)[1]
        if (length(Lst)) {
            res <- try(importFlowCAM(Lst, rgb.vigs = FALSE, replace = FALSE),
                silent = TRUE)
            if (inherits(res, "try-error")) {
                stop("Error importing sample", Sample)
            } else { # Update list
                Method <- .ZI$method #input$method
                AllSamples <- listSamples(inidir, method = Method)
                #, input$newonlyCheck)
                ## Is this sample validated?
                ## TODO: if reimported => backup validation data and clear it now!
                if (file.exists(file.path(inidir, "_analyses", Method,
                    paste(Sample, "valid.RData", sep = "_")))) {
                    tag <- "[A]"
                } else tag <- "[I]"
        
                updateSelectInput(session, "sample", choices = AllSamples$names,
                    selected = paste(tag, Sample))
            }
        }
    }
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/inst/gui/errorcorrection/global.R 
 | 
					
	## Zoo/PhytoImage simplified analysis UI (server code)
## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
## & Guillaume Wacquet <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage. If not, see <http://www.gnu.org/licenses/>.
## TODO: allow for placing samples in subdirs + use tree view
## TODO: add "Stat" button for fully validated samples
## TODO: translate server messages (English and French interfaces)
## TODO: allow downloading the data with something like:
## In server.R:
#output$downloadData <- downloadHandler(
#  filename = function() {
#    paste('data-', Sys.Date(), '.csv', sep='')
#  },
#  content = function(file) {
#    write.csv(data, file)
#  }
#)
#
## In ui.R:
#downloadLink('downloadData', 'Download')
##
## - Use includeMarkdown()
##
## - Use renderDataTable(), e.g.,
## Pass a callback function to DataTables using I()
#renderDataTable(iris, options = list(
#  iDisplayLength = 5,
#  fnInitComplete = I("function(oSettings, json) {alert('Done.');}")
#))
shinyServer(function (input, output, session) {
    doAnalysis <- reactive({
        generalMessage <- function(message) {
            paste0("______________________________________________________________________",
                #"\nTotal samples:     ", length(AllSamples$names),
                "\nSamples to process: ", sum(!AllSamples$analyzed),
                "\nProcessed samples:  ", sum(AllSamples$analyzed),
                "\n\n", message, "\n",
                "______________________________________________________________________\n")
        }
        if (input$goButton == 0)
            return(generalMessage("(No samples have been analyzed yet during this session)."))
        isolate({
            Sample <- substring(input$sample, 5)
            ZIDB <- file.path(inidir, paste(Sample, "zidb", sep = "."))
            ## Determine if we already got some data...
            ## First look at "demo" data _valid0.RData
            DemoFile <- file.path(inidir, "_analyses", .ZI$method, #input$method,
                paste(Sample, "valid0.RData", sep = "_"))
            SampleFile <- file.path(inidir, "_analyses", .ZI$method, #input$method,
                paste(Sample, "valid.RData", sep = "_"))
            MetaFile <- file.path(inidir, "_analyses", .ZI$method, #input$method,
                paste(Sample, "valid.txt", sep = "_"))
            ResFile <- file.path(inidir, "_analyses", .ZI$method, #input$method,
                paste(Sample, "res.RData", sep = "_"))
            ValidData <- paste(Sample, "valid", sep = "_")
            ResData <- paste(Sample, "res", sep = "_")
            if (exists(ValidData, inherits = FALSE)) rm(list = ValidData)
#             CtxSmp <- contextSelection()
#             if (length(CtxSmp) < 1) {
#                 warning("No contextual samples selected! Initial training set will be used.")
#             } else {
#                 ## TODO: merge with activeLearningGUI
#                 .ZITrain <- addItemsToTrain(.ZITrain, CtxSmp,
#                     dropItemsToTrain = dropItemsToTrain)
#             }
            # PhG: This is problematic with scanner data, so, inactivate it for now
            #.ZITrain <- activeLearning(.ZITrain)
            assign(.ZI$classif, eval(parse(text = .ZI$classifcmd)))
            .ZIClass <- get(.ZI$classif)
            if (file.exists(DemoFile)) { # Run in demo mode
                res <- load(DemoFile)
                DemoData <- get(res)
                rm(list = res)
                ce <- correctError(zidb = ZIDB, classifier = .ZIClass,
                    data = DemoData, mode = "demo")
                ## Note: we save just nothing, because we are in demo mode?
                ## or do we save data?
            } else {
                ## Are there some data already available?
                if (file.exists(SampleFile)) { # Reanalyze the sample
                    res <- load(SampleFile)
                    SampleData <- get(res)
                    rm(list = res)
                    ce <- correctError(zidb = ZIDB, classifier = .ZIClass,
                        data = SampleData)
                } else { # Nothing available: start from scratch
                    ce <- correctError(zidb = ZIDB, classifier = .ZIClass)
                }
            } #x <- "Demo found" else x <- "Demo not found"
            ## Backup sample and metadata files if they exist
            if (file.exists(SampleFile))
                file.copy(SampleFile, paste(SampleFile, "bak", sep = "."))
            unlink(SampleFile)
            if (file.exists(MetaFile))
                file.copy(MetaFile, paste(MetaFile, "bak", sep = "."))
            unlink(MetaFile)
            if (file.exists(ResFile))
                file.copy(ResFile, paste(ResFile, "bak", sep = "."))
            unlink(ResFile)
            ## The following code fails while we are still validating items...
            ## TODO: associate name of validator + date
            res <- try(save(list = ValidData, file = SampleFile), silent = TRUE)
            while (inherits(res, "try-error")) {
                Sys.sleep(0.5) # Wait 1/2 sec
                res <- try(save(list = ValidData, file = SampleFile),
                    silent = TRUE)
            }
            ## Save associated metadata
            cat("zooimage version: 5.4-12\n", file = MetaFile)
            cat("method: ", .ZI$method, "\n", sep = "",
                file = MetaFile, append = TRUE)
            cat("user: ", .ZI$user, "\n", sep = "",
                file = MetaFile, append = TRUE)
            cat("date: ", as.character(Sys.time()), "\n", sep = "",
                file = MetaFile, append = TRUE)
            cat("training set: ", .ZI$train, "\n", sep = "",
                file = MetaFile, append = TRUE)
            if("AddedItems" %in% names(.ZITrain)) {
                cat("contextual samples: ",
                    as.character(unique(.ZITrain$Label[.ZITrain$AddedItems == TRUE])),
                    sep = "\n", file = MetaFile, append = TRUE)
            }
            ## should be../ more
            #cat("training file: ", .ZI$trainfile, "\n", sep = "",
            #    file = MetaFile, append = TRUE)
            cat("classifier: ", .ZI$classif, "\n", sep = "",
                file = MetaFile, append = TRUE)
            ## should be../ more
            #cat("classifier file: ", .ZI$classifile, "\n", sep = "",
            #    file = MetaFile, append = TRUE)
            cat("classifier cmd: ", .ZI$classifcmd, "\n", sep = "",
                file = MetaFile, append = TRUE)
            cat("size breaks: ", paste(.ZI$breaks, collapse = "-"), "\n", sep = "",
                file = MetaFile, append = TRUE)
            cat("biovolume conversion: \n", sep = "",
                file = MetaFile, append = TRUE)
            write.table(.ZI$biovolume, sep = "\t", dec = ".", row.names = FALSE,
                col.names = TRUE, file = MetaFile, append = TRUE)
            ## Calculate results for this sample
            dat2 <- get(ValidData)
            cl <- levels(dat2$Class) # All classes
            ## We used first uppercase for classes of interest, thus:
            cl <- cl[grepl("^[A-Z]", cl)]
            ## Now, we also want to calculate separate abundances for most abundant classes
            ## i.e., those with at least 50 individuals measured
            detail <- cl[cl %in% levels(dat2$Class)[table(dat2$Class) >= 50]]
            ## Calculate results for this sample
            ## TODO: correct the bug with keep = cl => replacement has different number of rows
            #assign(ResData, processSample(dat2, keep = cl, detail = detail,
            #    biomass = .ZI$biovolume, breaks = .ZI$breaks, classes = "Class"))
            #assign(ResData, processSample(dat2, keep = NULL, detail = detail,
            #    biomass = .ZI$biovolume, breaks = .ZI$breaks, classes = "Class"))
            ## With cellModels...
            assign(ResData, processSample(dat2, keep = NULL, detail = detail, cells = .ZI$cellModelsfile,
                biomass = .ZI$biovolume, breaks = .ZI$breaks, classes = "Class"))
            ## Save it
            save(list = ResData, file = ResFile)
            ## Report success
            x <- paste("The sample", Sample, "had just been analyzed).")
            Method <- .ZI$method #input$method
            AllSamples <- listSamples(inidir, method = Method)
            if (file.exists(file.path(inidir, "_analyses", Method,
                paste(Sample, "valid.RData", sep = "_")))) {
                tag <- "[A]"
            } else tag <- "[I]"
            updateSelectInput(session, "sample", choices = AllSamples$names,
                selected = paste(tag, Sample))
            return(generalMessage(x))
        })
    })
    #output$generalSummary <- renderText({
    #  if (input$stopButton) { # Manage clean closing of the page
    #    ## Réactiver R
    #    ## TODO: change this code to get the name of R application under Mac OS X
    #    GUI <- .Platform$GUI
    #    if (GUI == "Rgui") { # Code for RGui under Windows
    #        try(bringToTop(-1), silent = TRUE)
    #    } else if (GUI == "AQUA") { # Code for R/R64/SciViews R64.app
    #        ## This works from Mac OS X 10.5 Leopard:
    #        try(system("osascript -e 'tell application id \"Rgui\" to activate'",
    #            ignore.stdout = TRUE, ignore.stderr = TRUE), silent = TRUE)
    #        #try(system("osascript -e 'tell application \"R\" to activate'",
    #        #    ignore.stdout = TRUE, ignore.stderr = TRUE), silent = TRUE)
    #        #try(system("osascript -e 'tell application \"R64\" to activate'",
    #        #    ignore.stdout = TRUE, ignore.stderr = TRUE), silent = TRUE)
    #        #try(system("osascript -e 'tell application \"SciViews R64\" to activate'",
    #        #    ignore.stdout = TRUE, ignore.stderr = TRUE), silent = TRUE)
    #    } else if (grepl("^mac", .Platform$pkgType)) { # Try code for Terminal.app
    #        try(system("osascript -e 'tell application \"Terminal\" to activate'",
    #            ignore.stdout = TRUE, ignore.stderr = TRUE), silent = TRUE)
    #    }
    #
    #    ## Stop the application, returning a short report of what was done
    #    report <- structure("Content of my report here...", class = "reportObj")
    #    stopApp(report)
    #
    #    ## Indicate the app is disconnected
    #    paste(strong(em("Application déconnectée!")))
    #
    #  } else { # Indicate number of samples to process and number analyzed
    #    ## TODO: make this reactive to the change to the list of samples
    #    paste(em("A traiter:"), strong(em(sum(!AllSamples$analyzed))),
    #      em(" -  analysés:"), strong(em(sum(AllSamples$analyzed))))
    #  }
    #})
    output$sampleSummary <- renderPrint(width = 80, {
      if (input$stopButton) {
        #updateTabsetPanel(session, "mainTabset", selected = "Résumé")
      } else {
            ## Also update the list of samples, depending on both method and newonlyCheck
         #   AllSamples <- listSamples(inidir, method = .ZI$method, input$newonlyCheck)
         #   updateSelectInput(session, "sample", choices = AllSamples$names)
            Sample <- substring(input$sample, 5)
            calcSample(Sample, input, output, session)
            ## Link to the .zidb file and provide a summary of this sample
            cat("===", Sample, "===\n")
            ZIDB <- file.path(inidir, paste(Sample, "zidb", sep = "."))
            Dat <- zidbDatRead(ZIDB)
            cat("Sample containing", nrow(Dat), "digitized particules.\n")
            if (substr(input$sample, 1, 3) == "[A]") {
                ## Get analysis statistics about this sample
                #if (!exists("SampleData")) {
                    ## Download the data!
                    SampleFile <- file.path(inidir, "_analyses", .ZI$method, #input$method,
                        paste(Sample, "valid.RData", sep = "_"))
                    if (file.exists(SampleFile)) {
                        res <- load(SampleFile)
                        SampleData <- get(res)
                        rm(list = res)
                    }
                #}
                res <- try(print(table(SampleData$Class)), silent = TRUE)
                if (inherits(res, "try-error"))
                    cat("\nStatistical analysis not available for the sample\n")
            } else cat("\nThis sample is not yet analyzed with the method '", .ZI$method, "'.", sep = "")
            #head(Dat)
            #print(summary(Dat[, c("ECD")]))
            #print(attr(Dat, "metadata"))
            #plot(Dat$Area, Dat$Perim.)
            #cat("Ici, le résumé de", Sample)
            cat("\n", doAnalysis())
        }
    })
    output$sampleTable <- renderDataTable(options = list(pageLength = 50), {  #renderTable({
        if (input$stopButton) {
            updateTabsetPanel(session, "mainTabset", selected = "Summary")
        } else {
            doAnalysis()
            Sample <- substring(input$sample, 5)
            calcSample(Sample, input, output, session)
            ## Link to the .zidb file and provide a summary of this sample
            #cat("===", Sample, "===\n")
            ZIDB <- file.path(inidir, paste(Sample, "zidb", sep = "."))
            ## Depending if the file is analyzed or not, we look at the
            ## ZITest or ZIDat object
            if (substr(input$sample, 1, 3) == "[A]") {
                ## Get analysis statistics about this sample
                #if (!exists("SampleData")) {
                    ## Download the data!
                    SampleFile <- file.path(inidir, "_analyses", .ZI$method, #input$method,
                        paste(Sample, "valid.RData", sep = "_"))
                    if (file.exists(SampleFile)) {
                        res <- load(SampleFile)
                        SampleData <- get(res)
                        rm(list = res)
                    }
                #}
                res <- try(Dat50 <- head(SampleData, n = 50), silent = TRUE)
                if (inherits(res, "try-error")) {
                    Dat <- zidbDatRead(ZIDB)
                    Dat50 <- head(Dat, n = 50)
                    Dat50b <- Dat50
                    Dat50b$Label <- NULL
                    Dat50b$Item <- NULL
                    Dat50b$ECD <- NULL
                    data.frame(Label = Dat50$Label, Item = Dat50$Item,
                        ECD = Dat50$ECD, Dat50b)
                } else {
                    Dat50b <- Dat50
                    Dat50b$Label <- NULL
                    Dat50b$Item <- NULL
                    Dat50b$ECD <- NULL
                    Dat50b$Class <- NULL
                    Dat50b$Predicted <- NULL
                    Dat50b$Id <- NULL
                    Dat50b$Id.1 <- NULL
                    data.frame(Label = Dat50$Label, Item = Dat50$Item,
                        ECD = Dat50$ECD, Class = Dat50$Class, Dat50b)
                        #Dat50$Predicted,Dat50$ECD, Dat50b)
                }
            } else {
                Dat <- zidbDatRead(ZIDB)
                Dat50 <- head(Dat, n = 50)
                Dat50b <- Dat50
                Dat50b$Label <- NULL
                Dat50b$Item <- NULL
                Dat50b$ECD <- NULL
                data.frame(Label = Dat50$Label, Item = Dat50$Item,
                    ECD = Dat50$ECD, Dat50b)
            }
        }
    })
    output$samplePlot <- renderPlot({
        if (input$stopButton) {
            updateTabsetPanel(session, "mainTabset", selected = "Résumé")
        } else {
            ## This is only in shiny 0.10.2!!
            #withProgress(message = 'Calculation in progress',
            #    detail = '...', value = 0, {
            #    for (i in 1:15) {
            #        incProgress(1/15, detail = paste0("...", i, "/15"))
            #        Sys.sleep(0.25)
            #    }
            #})
            Sample <- substring(input$sample, 5)
            calcSample(Sample, input, output, session)
            ## Link to the .zidb file and provide a summary of this sample
            #cat("===", Sample, "===\n")
            ZIDB <- file.path(inidir, paste(Sample, "zidb", sep = "."))
            Dat <- zidbDatRead(ZIDB)
            hist(Dat$ECD, col = "cornsilk", breaks = "FD",
                main = "Particule size distribution",
                xlab = "ECD", ylab = "Frequency")
        }
    })
    output$vignettesPlot <- renderPlot({
        if (input$stopButton) {
            updateTabsetPanel(session, "mainTabset", selected = "Summary")
        } else {
            Sample <- substring(input$sample, 5)
            calcSample(Sample, input, output, session)
            ## Link to the .zidb file and provide a summary of this sample
            #cat("===", Sample, "===\n")
            ZIDB <- file.path(inidir, paste(Sample, "zidb", sep = "."))
            DB <- zidbLink(ZIDB)
            Items <- ls(DB) # Contains data in *_dat1 and vignettes in *_nn
            ## Eliminate items that are not vignettes
            noVig <- grep("_dat1", Items)
            if (length(noVig)) Vigs <- Items[-noVig] else Vigs <- Items
            ## Display a 5*5 thumbnail of the first 25 vignettes
            zidbPlotNew(Sample)
            ImgType <- DB$.ImageType
            for (i in 1:30)
                zidbDrawVignette(DB[[Vigs[i]]], type = ImgType, item = i,
                    nx = 6, ny = 5)
        }
    })
    output$sampleResults <- renderPrint({
        if (input$stopButton) {
            updateTabsetPanel(session, "mainTabset", selected = "Summary")
        } else {
            ## Also update the list of samples, depending on both method and newonlyCheck
         #   AllSamples <- listSamples(inidir, method = .ZI$method, input$newonlyCheck)
         #   updateSelectInput(session, "sample", choices = AllSamples$names)
            Sample <- substring(input$sample, 5)
            calcSample(Sample, input, output, session)
            ## Link to the .zidb file and provide a summary of this sample
            cat("===", Sample, "===\n")
            ZIDB <- file.path(inidir, paste(Sample, "zidb", sep = "."))
            Dat <- zidbDatRead(ZIDB)
            cat("Sample containing", nrow(Dat), "digitized particules.\n")
            if (substr(input$sample, 1, 3) == "[A]") {
                ## Get analysis statistics about this sample
                #if (!exists("SampleData")) {
                    ## Download the data!
                    SampleFile <- file.path(inidir, "_analyses", .ZI$method, #input$method,
                        paste(Sample, "valid.RData", sep = "_"))
                    if (file.exists(SampleFile)) {
                        res <- load(SampleFile)
                        SampleData <- get(res)
                        rm(list = res)
                    }
                #}
                # Show results for this sample...
                ResFile <- file.path(inidir, "_analyses", .ZI$method, #input$method,
                    paste(Sample, "res.RData", sep = "_"))
                if (file.exists(ResFile)) {
                    res <- load(ResFile)
                    ResData <- get(res)
                    rm(list = res)
                    ## Print results
                    print(ResData)
                } else {
                    cat("No results found for this samples!\n")
                }
            } else cat("\nThis sample is not yet analyzed with the method '", .ZI$method, "'.", sep = "")
            #head(Dat)
            #print(summary(Dat[, c("ECD")]))
            #print(attr(Dat, "metadata"))
            #plot(Dat$Area, Dat$Perim.)
            #cat("Here, summary of", Sample)
            cat("\n", doAnalysis())
        }
    })
})
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/inst/gui/errorcorrection/server.R 
 | 
					
	## Zoo/PhytoImage simplified analysis UI (UI definition)
## Copyright (c) 2004-2015, Ph. Grosjean <[email protected]>
##
## This file is part of ZooImage
##
## ZooImage is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 2 of the License, or
## (at your option) any later version.
##
## ZooImage is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ZooImage. If not, see <http://www.gnu.org/licenses/>.
## TODO: add "Stat" button for fully validated samples
#library(shinysky)
shinyUI(fluidPage(title = uiTitle,
    smallHeaderPanel(uiTitle),
    fluidRow(
        sidebarPanel(
            ##TODO: put this in header! helpText("Session name + link to inidir"),
            selectInput("sample", "Sampling/samples:", AllSamples$names,
                width = "100%"),
            #selectInput("method", "Méthode:", Methods, width = "100%"),
            #checkboxInput("newonlyCheck", "Seulement les échantillon non analysés"),
            #actionButton("importButton", "Reimporter"),
            actionButton("goButton", "(Re)analyser"),
            actionButton("stopButton", "Return to R")#,
#            hr(),
#            htmlOutput("generalSummary")#,
            #busyIndicator(text = "Analyse de XXX en cours..",
            #    wait = 1000)
        ),
        mainPanel(
            tabsetPanel(id = "mainTabset",
                tabPanel("Summary", icon = icon("list-alt"),
                    verbatimTextOutput("sampleSummary")),
                tabPanel("Table", icon = icon("table"),
                    dataTableOutput("sampleTable")),
                tabPanel("Vignettes", icon = icon("calendar"),
                    plotOutput("vignettesPlot")),
                tabPanel("Plot", icon = icon("bar-chart-o"),
                    plotOutput("samplePlot")),
                tabPanel("Results", icon = icon("refresh"),
                    verbatimTextOutput("sampleResults"))
            )
        )
    )
))
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/inst/gui/errorcorrection/ui.R 
 | 
					
	## Zoo/PhytoImage process REPHY version 1.0
## Copyright (c) 2014, Philippe Grosjean ([email protected])
## Note: we need to start with the directory containing this script as default one!
# e.g., setwd(~/Desktop/ZooPhytoImage/_analyses)
## Should use source("~/dir/file.R", chdir = TRUE) to get there temporarily
################################################################################
#### Parameters for this method
## This is the name of this method
.ZI <- list(user = "", date = Sys.time(), method = "Rephy 4X lugol v.1.0",
    wdir = getwd(), system = "")
.ZI$scriptfile <- paste(.ZI$method, "R", sep = ".")
## This is the training set to use
.ZI$train <- "trainRephy_4Xlugol.01"
.ZI$traindir <- file.path("..", "_train", .ZI$train)
.ZI$trainfile <- paste(.ZI$traindir, "RData", sep = ".")
.ZI$classif <- "classrfRephy_4Xlugol.01"
.ZI$classifile <- file.path("..", "_train",
    paste(.ZI$classif, "RData", sep = "."))
.ZI$classifcmd <- paste0('ZIClass(Class ~ ., data = ', .ZI$train,
    ', method = "mlRforest", calc.vars = calcVars, cv.k = 10)')
.ZI$cellsModelsfile <- file.path(.ZI$traindir, "_cellModels.RData", sep = "")
## Conversion factors for biovolume
## Biovolume calculation is P1 * ECD^P3 + P2
## TODO: fill this table, or use read.delim() on a text file
## TODO: also use number of cells per colony here...
.ZI$biovolume <- data.frame(
    Class = c("Chaetoceros_spp", "[other]"),
    P1 = c(1, 1),
    P2 = c(0, 0),
    P3 = c(1, 1)
)
.ZI$breaks <- seq(0, 200, by = 10) # In um
################################################################################
if (!require(zooimage)) stop("Please, install the 'zooimage' package")
if (!require(svDialogs)) stop("Please, install the 'svDialogs' package")
if (!require(shiny)) stop("Please, install the 'shiny' package")
## First of all, get system info and ask for the user name
.ZI$system <- paste(ZIverstring, R.version$version.string, R.version$platform,
    sep = "; ")
.ZI$user <- dlgInput("Who are you?", Sys.info()["user"])$res
if (!length(.ZI$user) || .ZI$user == "") { # The user clicked the 'cancel' button
    stop("You must identify yourself!")
}
## Change the way warnings are displayed
.owarn <- getOption("warn")
options(warn = 1) # Immediate issue of warnings
## Start... check that I am in the right directory
## The directory should be '_analyses', there must be a file named <method>.R
## in it, and a file named "../_train/<train>[.RData] must be available too!
if (basename(.ZI$wdir) != "_analyses")
    stop("I am not in the right directory (should be '_analyses')")
if (!file.exists(.ZI$scriptfile))
    stop("A .R script file named '", .ZI$scriptfile,
        "' is not found in the current directory")
if (!file.exists(.ZI$traindir) & !file.exists(.ZI$trainfile))
    stop("Training set '", .ZI$train, "' not found in '",
        dirname(.ZI$traindir), "'") 
    
## Make sure the subdirectory for this method is created
if (!file.exists(.ZI$method)) {
    dir.create(.ZI$method)
} else if (!file.info(.ZI$method)$isdir) {
    stop("A file exists for the method '", .ZI$method,
        "', but it is not a directory!")
}
## Start reporting results
cat("\n=== Session with method", .ZI$method, "===\n\n")
## Do we need to import the training set?
if (!file.exists(.ZI$trainfile)) {
    cat("Please wait: we import the training set data now...")
    assign(.ZI$train, getTrain(.ZI$traindir))
    cat(" done!\n")
    cat("\nThe training set is saved as native R data for faster access.\n")
    save(list = .ZI$train, file = .ZI$trainfile)
} else { # Load the training set now
    cat("Loading the training set '", .ZI$train, "'...", sep = "")
    load(.ZI$trainfile)
    cat(" done!\n")
}
.ZITrain <- get(.ZI$train) # Copied into .ZITrain for easier use
## Give some stats about the training set
cat("The initial training set contains:\n")
print(table(.ZITrain$Class))
## Do we need to recreate the classifier?
if (!file.exists(.ZI$classifile)) {
    ## TODO: recreate it!
    cat("\nPlease wait: we build the classifier now...")
    assign(.ZI$classif, eval(parse(text = .ZI$classifcmd)))
    cat(" done!\n")
    cat("\nThe classifier is saved as native R data for faster access.\n")
    save(list = .ZI$classif, file = .ZI$classifile)
} else { # Load the classifier now
    cat("\nLoading the classifier '", .ZI$classif, "'...", sep = "")
    load(.ZI$classifile)
    cat(" done!\n")
}
.ZIClass <- get(.ZI$classif) # Copied into .ZIClass for easier use
attr(.ZIClass, "ActiveLearning") <- FALSE # No active learning yet!
## Give some stats about the classifier
cat("The classifier is:\n\n")
print(.ZIClass)
## Launch the errorcorrection Shiny app
cat("\nStarting error correction session...\n")
runApp(system.file("gui", "errorcorrection", package = "zooimage"))
## Reset the system
options(warn = .owarn)
## Done
cat("\n================================= done! =====\n")
## TODO: if we have .zid files + description.zis => convert first into .zidb!
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zooimage/inst/templates/method.R 
 | 
					
	#' Assemble Reference
#'
#' Function to build a reference dataframe selecting a case for each taxon from
#' the available specimens in the references' database.
#'
#' @param combination A dataframe or named list. Each (column) name identifies a
#' taxon. Each column or list element must have a single element of type
#' character, identifying one of the sources included in the references'
#' database.
#' @param ref.db A reference database. This is a named list of named lists of
#' dataframes. The first level is named by taxon and the second level is named
#' by reference source. Each dataframe includes the reference for the
#' corresponding taxon and source. The default
#' \code{ref.db = \link{referencesDatabase}} is provided as package
#' \pkg{zoolog} data.
#' @param thesaurus A thesaurus for taxa.
#'
#' @return
#' A reference dataframe.
#'
#' @examples
#' ## `referenceSets` includes a series of predefined reference compositions.
#' referenceSets
#' ## Actually the package `references` is build from them.
#' ## We can rebuild any of them:
#' referenceCombi <- AssembleReference(referenceSets["Combi", ])
#'
#' ## Define an altenative reference combining differently the references'
#' ## database:
#' refComb <- list(cattle = "Nieto", sheep = "Davis", Goat = "Clutton",
#'                 pig = "Albarella", redDeer = "Basel")
#' userReference <- AssembleReference(refComb)
#'
#' @export
AssembleReference <- function(combination, ref.db = referencesDatabase,
                              thesaurus = zoologThesaurus$taxon)
{
  reference <- NULL
  taxIds <- sapply(names(combination),
                   function(x) which(InCategory(names(ref.db), x, thesaurus) |
                                       names(ref.db) %in% x))
  if(any(duplicated(taxIds) & sapply(taxIds, length)>0 -> dup))
  {
    stop(paste("The taxon", names(ref.db)[taxIds[dup]],
               "is duplicated in the requested combination."))
  }
  for(tax in names(combination))
  {
    source <- combination[[tax]]
    taxId <- taxIds[[tax]]
    catchError.AssembleReference(source, taxId, tax, ref.db)
    reference <- rbind(reference, ref.db[[taxId]][[source]])
  }
  reference
}
catchError.AssembleReference <- function(source, taxId, tax, ref.db)
{
  if(length(source) > 1)
  {
    stop(paste0("More than one component requested for taxon ", tax, "."))
  }
  if(length(taxId) == 0)
  {
    stop(paste("The name", tax, "does not correspond to any taxon",
               "in the references' database."))
  }
  if(length(taxId) > 1)
  {
    stop(paste("The refereces' database is badly formatted: Taxon", tax,
               "appears more than once."))
  }
  if(!(is.na(source) || source == "" || source %in% names(ref.db[[taxId]])))
  {
    stop(paste0("The references' database does not include any component ",
                source, " for the taxon ", tax, "."))
  }
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/AssembleReference.R 
 | 
					
	# Collapsing dataframes columns in a single vector.
# This allows to find coincidences of all column values in a single comparison
CollapseColumns <- function(df, ..., sepMark = "--&&--")
{
  apply(cbind(df, ...), 1, paste, collapse = sepMark)
}
SplitColumns <- function(x, colNames = NULL, sepMark = "--&&--")
{
  res <- as.data.frame(t(simplify2array(strsplit(x, sepMark, fixed = TRUE))))
  colnames(res) <- colNames
  return(res)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/CollapseColumns.R 
 | 
					
	#' Condense Measure Log-Ratios
#'
#' This function condenses the calculated log ratio values into a reduced number
#' of features by grouping log ratio values and selecting or calculating a
#' feature value. By default the selected groups each represents a single dimension,
#' i.e. \code{Length} and \code{Width}. Only one feature is extracted per group.
#' Currently, two methods are possible: priority (default) or average.
#'
#' This operation is motivated by two circumstances. First, not all measurements
#' are available for every bone specimen, which obstructs their direct comparison
#' and statistical analysis. Second, several measurements can be strongly
#' correlated (e.g. SD and Bd both represent bone width).
#' Thus, considering them as independent would
#' produce an over-representation of bone remains with more measurements per
#' axis. Condensing each group of measurements into a single feature
#' (e.g. one measure per axis) palliates both problems.
#'
#' Observe that an important property of the log-ratios from a reference is that
#' it makes the different measures comparable. For instance, if a bone is
#' scaled with respect to the reference, so that it homogeneously doubles its
#' width, then all width related measures
#' (\emph{BT}, \emph{Bd}, \emph{Bp}, \emph{SD}, ...) will give the
#' same log-ratio (\code{log(2)}). In contrast, the
#' absolute measures are not directly comparable.
#'
#' The measurement names in the grouping list are given without the
#' \code{logPrefix}. But the selection is made from the log-ratios.
#'
#' The default method is \code{"priority"}, which selects the first available
#' measure log-ratio in each group. The method \code{"average"} extracts the
#' mean per group, ignoring the non-available measures.
#' We provide the following by-default group and prioritization:
#' For lengths, the order of priority is: GL, GLl, GLm, HTC.
#' For widths, the order of priority is: BT, Bd, Bp, SD, Bfd, Bfp.
#' For depths, the order of priority is: Dd, DD, BG, Dp
#' This order maximises the robustness and reliability of the measurements,
#' as priority is given to the most abundant, more replicable, and less age
#' dependent measurements.
#'
#' This method was first used in:
#' Trentacoste, A., Nieto-Espinet, A., & Valenzuela-Lamas, S. (2018).
#' Pre-Roman improvements to agricultural production: Evidence from livestock
#' husbandry in late prehistoric Italy.
#' PloS one, 13(12), e0208109.
#'
#' Alternatively, a user-defined \code{method} can be provided as a function
#' with a single argument (data.frame) assumed to have as columns the measure
#' log-ratios determined by the \code{grouping}.
#'
#' @inheritParams LogRatios
#' @param grouping A list of named character vectors. The list includes a vector
#' per selected group. Each vector gives the group of measurements in order of
#' priority. By default the groups are
#' \code{Length = c("GL", "GLl", "GLm", "HTC")},
#' \code{Width = c("BT", "Bd", "Bp", "SD", "Bfd", "Bfp")}, and
#' \code{Depth = c("Dd", "DD", "BG", "Dp")}.
#' The order is irrelevant for \code{method = "average"}.
#' @param method Character string indicating which method to use for extracting
#' the condensed features. Currently accepted methods: \code{"priority"}
#' (default) and \code{"average"}.
#' @return A dataframe including the input dataframe and additional columns, one
#' for each extracted condensed feature, with the corresponding name given in
#' \code{grouping}.
#' @examples
#' ## Read an example dataset:
#' dataFile <- system.file("extdata", "dataValenzuelaLamas2008.csv.gz",
#'                         package="zoolog")
#' dataExample <- utils::read.csv2(dataFile,
#'                                 na.strings = "",
#'                                 encoding = "UTF-8")
#' ## For illustration purposes we keep now only a subset of cases to make
#' ## the example run sufficiently fast.
#' ## Avoid this step if you want to process the full example dataset.
#' dataExample <- dataExample[1:1000, ]
#'
#' ## Compute the log-ratios and select the cases with available log ratios:
#' dataExampleWithLogs <- RemoveNACases(LogRatios(dataExample))
#' ## We can observe the first lines (excluding some columns for visibility):
#' head(dataExampleWithLogs)[, -c(6:20,32:63)]
#'
#' ## Extract the default condensed features with the default "priority" method:
#' dataExampleWithSummary <- CondenseLogs(dataExampleWithLogs)
#' head(dataExampleWithSummary)[, -c(6:20,32:63)]
#'
#' ## Extract only width with "average" method:
#' dataExampleWithSummary2 <- CondenseLogs(dataExampleWithLogs,
#'                                grouping = list(Width = c("BT", "Bd", "Bp", "SD")),
#'                                method = "average")
#' head(dataExampleWithSummary2)[, -c(6:20,32:63)]
#' @export
CondenseLogs <- function(data,
                         grouping = list(
                             Length = c("GL", "GLl", "GLm", "HTC"),
                             Width = c("BT", "Bd", "Bp", "SD", "Bfd", "Bfp"),
                             Depth = c("Dd", "DD", "BG", "Dp") ),
                         method = "priority"
                        ) {
  if(!is.data.frame(data)) stop("data must be a data.frame.")
  if(is.character(method) && (method %in% names(condenseMethod)))
    method <- condenseMethod[[method]]
  if(!is.function(method))
    stop(paste0("Not recognized method.\n",
                "Predefined accepted methods are ",
                paste0(paste0("\"", names(condenseMethod), "\""),
                       collapse = ", "), ".\n",
                "Alternatively, it can be a user defined function."))
  summaryMeasures <- names(grouping)
  data[, summaryMeasures] <- NA
  for (sumMeasure in summaryMeasures)
  {
    groupingWithLog <- paste0(logPrefix, grouping[[sumMeasure]])
    logMeasuresInData <- intersect(groupingWithLog, colnames(data))
    dataSelected <- as.data.frame(data[, logMeasuresInData])
    data[, sumMeasure] <- method(dataSelected)
  }
  return(data)
}
condenseMethod <- list(
  priority = function(data)
  {
    alreadySelected <- FALSE
    res <- rep(NA, nrow(data))
    for(logMeasure in colnames(data))
    {
      selected <- !alreadySelected & !is.na(data[, logMeasure])
      res[selected] <- data[selected, logMeasure]
      alreadySelected <- alreadySelected | selected
    }
    return(res)
  },
  average = function(data)
  {
    avLog <- rowMeans(data, na.rm = TRUE)
    avLog[is.nan(avLog)] <- NA
    return(avLog)
  }
)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/CondenseLogs.R 
 | 
					
	Data2Reference <- function(data,
                           identifiers = c("TAX", "EL"),
                           refMeasuresName = "Measure",
                           refValuesName = "Standard",
                           thesaurusSet = zoologThesaurus)
{
  measureColumns <- which(InCategory(names(data), names(thesaurusSet$measure),
                                    thesaurusSet$measure))
  idColumns <- which(InCategory(names(data), identifiers,
                               thesaurusSet$identifier))
  ref <- do.call(rbind, apply(data, 1, function(x)
  {
    measures <- x[measureColumns]
    measures <- measures[!is.na(measures)]
    res <- data.frame(array(NA, dim=c(length(measures),
                                      ncol(thesaurusSet$identifier))))
    names(res) <- c(names(x)[idColumns], refMeasuresName, refValuesName)
    if(length(measures) == 0) return(res)
    res[, 1:length(idColumns)] <- rep(x[idColumns], each = length(measures))
    res[, refMeasuresName] <- names(measures)
    res[, refValuesName] <- measures
    return(res)
  }))
  as.data.frame(lapply(ref, utils::type.convert))
}
Reference2Data <- function(ref,
                           identifiers = c("TAX", "EL"),
                           refMeasuresName = "Measure",
                           refValuesName = "Standard",
                           thesaurusSet = zoologThesaurus)
{
  idColumns <- which(InCategory(names(ref), identifiers,
                                thesaurusSet$identifier))
  measureColumn <- which(InCategory(names(ref), refMeasuresName,
                                   thesaurusSet$identifier))
  valueColumn <- which(InCategory(names(ref), refValuesName,
                                thesaurusSet$identifier))
  refMeasures <- unique(ref[, measureColumn])
  refIdentification <- CollapseColumns(ref[, idColumns])
  refSamples <- unique(refIdentification)
  n <- length(refSamples)
  m <- length(idColumns)+length(refMeasures)
  data <- data.frame(array(NA, dim = c(n, m)))
  idNames <- names(ref)[idColumns]
  names(data) <- c(idNames, refMeasures)
  ref[, c(idColumns, measureColumn)] <-
    sapply(ref[, c(idColumns, measureColumn)], as.character)
  for(i in 1:n)
  {
    cases <- ref[refIdentification == refSamples[i], ]
    data[i, idNames] <- cases[1, idNames]
    data[i, cases[, measureColumn]] <- cases[, valueColumn]
  }
  as.data.frame(lapply(data, utils::type.convert))
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/Data2Reference.R 
 | 
					
	#' Value Matching by Thesaurus Category
#'
#' Function to check if an element belongs to a category according to a
#' thesaurus. It is similar to \code{\link[base]{%in%}} and
#' \code{\link[base]{is.element}}, returning a logical vector indicating if each
#' element in a given vector is included in a given set. But \code{InCategory}
#' checks for equality assuming the equivalencies defined in the given thesaurus.
#'
#' @inheritParams StandardizeNomenclature
#' @param x Character vector to be checked for its inclusion in the category.
#' @param category Character vector identifying the categories in which the
#' inclusion of \code{x} will be checked. Each category can be identified by
#' any equivalent name in the thesaurus.
#'
#' @return
#' A logical vector of the same length as \code{x}. Each value answers the
#' question: \emph{Does the corresponding element in \code{x} belongs to any of
#' the thesaurus categories identified by \code{category}?}
#'
#' @seealso
#' \code{\link{zoologThesaurus}}, \code{\link[base]{%in%}}
#'
#' @examples
#' InCategory(c("sheep", "cattle", "goat", "red deer"),
#'            c("ovis", "capra"),
#'            zoologThesaurus$taxon)
#'
#' @export
InCategory <- function(x, category, thesaurus)
{
  thesList <- lapply(thesaurus, function(a) a[a!=""])
  category <- StandardizeNomenclature(category, thesaurus)
  namesInCategory <- as.character(unlist(thesList[category]))
  namesInCategory <- NormalizeForSensitiveness(thesaurus, namesInCategory)$x
  x <- NormalizeForSensitiveness(thesaurus, x)$x
  x %in% namesInCategory
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/InCategory.R 
 | 
					
	JoinCategories <- function(thesaurus, categories)
{
  if(length(categories) == 0) return(thesaurus)
  categStandard <- lapply(categories, StandardizeNomenclature,
                          thesaurus, mark.unknown = TRUE)
  names(categStandard) <- lapply(names(categories), StandardizeNomenclature,
                                 thesaurus)
  if(any(is.na(unlist(categStandard))))
    stop(paste("The provided categories include names not belonging",
               "to any category in the thesaurus."))
  categStandard <- mapply(function(x,y) {
                            if(y %in% names(thesaurus) && !(y %in% x))
                              x <- c(y,x)
                            return(x)
                          },
                          categStandard, names(categStandard),
                          SIMPLIFY = FALSE)
  thesList <- lapply(thesaurus, function(a) a[a!=""])
  namesToAdd <- lapply(categStandard,
                       function(x) as.character(unlist(thesList[x])))
  namesToAdd <- mapply(function(x,y) c(x,y), names(namesToAdd), namesToAdd,
                       SIMPLIFY = FALSE)
  thesList <- thesList[!(names(thesList) %in%
                           c(names(namesToAdd),
                             as.character(unlist(categStandard))))]
  thesList <- c(thesList, namesToAdd)
  thesNew <- ThesaurusFromList(thesList, attributes(thesaurus))
  if(ambiguity <- ThesaurusAmbiguity(thesNew))
    stop(paste0("Joining these categories would result in ambiguous thesaurus.\n",
                attr(ambiguity, "errmessage")))
  RemoveRepeatedNames(thesNew)
}
SmartJoinCategories <- function(thesaurusSet, joinCategories)
{
  if(length(joinCategories)==0) return(thesaurusSet)
  coincidences <- sapply(joinCategories, function(x) {
    sapply(thesaurusSet, function(y) {
      any(x %in% as.character(unlist(lapply(y, function(a) a[a!=""]))))
    })
  })
  if(any(colSums(coincidences)>1))
    stop(paste("Provided categories are ambiguous:",
               "Some name is in more than one thesaurus."))
  if(any(colSums(coincidences)<1))
    stop(paste("Provided categories include one category",
               "not matching any thesaurus."))
  for(th in rownames(coincidences))
  {
    thesaurusSet[[th]] <- JoinCategories(thesaurusSet[[th]],
                                         joinCategories[coincidences[th,]])
  }
  return(thesaurusSet)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/JoinCategories.R 
 | 
					
	#' Log Ratios of Measurements
#'
#' Function to compute the (base 10) log ratios of the measurements
#' relative to standard reference values.
#' The default reference and several alternative references are provided with the
#' package. But the user can use their own references if desired.
#'
#' Each log ratio is defined as the decimal logarithm of the ratio of the
#' variable of interest to a corresponding reference value.
#'
#' The \code{identifiers} are expected to determine corresponding
#' columns in both data and reference. Each value in these columns identifies
#' the type of bone. By default this is determined by a taxon and a bone
#' element. For any case in the data, the log ratios are computed with respect
#' to the reference values in the same bone type. If the reference does not
#' include that bone type, the corresponding log ratios are set to \code{NA}.
#'
#' The taxonomy allows the matching of data and reference by genus, instead
#' of by species. This is the default behaviour with
#' \code{useGenusIfUnambiguous = TRUE}, unless there is some ambiguity:
#' reference including more than one species for the same genus. For instance,
#' \code{reference$Combi} includes a reference for \emph{Sus scrofa}.
#' If the data includes cases of \emph{Sus domesticus}, their
#' log ratios will be computed with respect to the provided reference for
#' \emph{Sus scrofa}.
#' However, a warning is given to inform the user of this assumption, and let
#' they know that this can be prevented by setting
#' \code{useGenusIfUnambiguous = FALSE}.
#'
#' For some applications it can be interesting to group some set of bone types
#' into the same reference category to compute the log ratios. The parameter
#' \code{joinCategories} allows this grouping. \code{joinCategories} must be a
#' list of named vectors, each including the set of categories in the data
#' which should be mapped to the reference category given by its name.
#'
#' This can be applied to group different species into a single
#' reference species. For instance \emph{sheep}, \emph{capra}, and doubtful
#' cases between both (\emph{sheep/goat}), can be grouped and matched to the
#' same reference for \emph{sheep}, by setting
#' \code{joinCategories = list(sheep = c("sheep", "goat", "oc"))}.
#' Indeed, the zoologTaxonomy can be used for that purpose using the function
#' \code{\link{SubtaxonomySet}} as
#' \code{joinCategories = list(sheep = SubtaxonomySet("Caprini"))}.
#' Similarly, \code{joinCategories} can be applied to group
#' different bone elements into a single reference (see the example below for
#' undetermined phalanges).
#'
#' Note that the \code{joinCategories} option does not remove the distinction
#' between the different bone types in the data, just indicates that for any
#' of them the log ratios must be computed from the same reference.
#'
#' Using the taxonomy, the presence of cases identified by higher taxonomic
#' ranks are also automatically detected. For instance, if some partially
#' identified cases have been recorded as "Ovis/Capra", this is recognized
#' to denote the tribe \emph{Caprini}, which includes several possible species.
#' Then a warning is given informing the user of the detection of these cases
#' and of the option to use any of the corresponding species in the reference by
#' using the argument \code{joinCategories} (unless this has been already done).
#'
#' There are some measures that, for most usual taxa, are restricted to a subset
#' of bones. For instance, for *Bos*, *Ovis*, *Capra*, and *Sus*, the measure
#' \emph{GLl} is only relevant for the \emph{astragalus}, while \emph{GL} is not
#' applicable to it.
#' Thus, there cannot be any ambiguity between both measures since they can
#' be identified by the bone element. This justifies that some users have
#' simplified datasets where a single column records indistinctly \emph{GL} or
#' \emph{GLl}. The optional parameter \code{mergedMeasures} facilitates the
#' processing of this type of simplified dataset. For the alluded example,
#' \code{mergedMeasures = list(c("GL", "GLl"))} automatically selects, for each
#' bone element, the corresponding measure present in the reference.
#'
#' Observe that if \code{mergedMeasures} is set to non mutually exclusive
#' measures, the behaviour is unpredictable.
#'
#' @param data A dataframe with the input measurements.
#' @param ref A dataframe including the measurement values used as references.
#' The default \code{ref = reference$Combi} and other \link{reference} sets are
#' provided with the package \pkg{zoolog}.
#' @param identifiers A vector of column names in \code{ref} identifying
#' a type of bone. By default \code{identifiers = c("Taxon", "Element")}.
#' @param refMeasuresName The column name in \code{ref} identifying the type of
#' bone measurement.
#' @param refValuesName The column name in \code{ref} giving the measurement
#' value.
#' @param thesaurusSet A thesaurus allowing datasets with different nomenclatures
#' to be merged. By default \code{thesaurusSet = \link{zoologThesaurus}}.
#' @param taxonomy A taxonomy allowing the automatic detection of data and
#' reference sharing the same genus (or higher taxonomic rank), although of
#' different species. By default \code{taxonomy = \link{zoologTaxonomy}}.
#' @param joinCategories A list of named character vectors. Each vector is named
#' by a category in the reference and includes a set of categories in the data
#' for which to compute the log ratios with respect to that reference.
#' When \code{NULL} (default) no grouping is considered.
#' @param mergedMeasures A list of character vectors or a single character vector.
#' Each vector identifies a set of measures that the data presents merged in the
#' same column, named as any of them. This practice only makes sense if only one
#' of the measures can appear in each bone element.
#' @param useGenusIfUnambiguous Boolean. If \code{TRUE} (default), data cases
#' are matched to reference sharing the same genus, instead of sharing the same
#' species.
#'
#' @return
#' A dataframe including the input dataframe and additional columns, one
#' for each extracted log ratio for each relevant measurement in the reference.
#' The name of the added columns are constructed by prefixing each measurement by
#' the internal variable \code{logPrefix}.
#'
#' If the input dataframe includes additional S3 classes (such as "tbl_df"),
#' they are also passed to the output.
#'
#' @examples
#' ## Read an example dataset:
#' dataFile <- system.file("extdata", "dataValenzuelaLamas2008.csv.gz",
#'                         package="zoolog")
#' dataExample <- utils::read.csv2(dataFile,
#'                                 na.strings = "",
#'                                 encoding = "UTF-8")
#' ## For illustration purposes we keep now only a subset of cases to make
#' ## the example run sufficiently fast.
#' ## Avoid this step if you want to process the full example dataset.
#' dataExample <- dataExample[1:400, ]
#' ## We can observe the first lines (excluding some columns for visibility):
#' head(dataExample)[, -c(6:20,32:64)]
#'
#' ## Compute the log-ratios with respect to the default reference in the
#' ## package zoolog:
#' dataExampleWithLogs <- LogRatios(dataExample)
#' ## The output data frame include new columns with the log-ratios of the
#' ## present measurements, in both data and reference, with a "log" prefix:
#' head(dataExampleWithLogs)[, -c(6:20,32:64)]
#'
#' ## Compute the log-ratios with respect to a different reference:
#' dataExampleWithLogs2 <- LogRatios(dataExample, ref = reference$Basel)
#' head(dataExampleWithLogs2)[, -c(6:20,32:64)]
#'
#' ## Define an altenative reference combining differently the references'
#' ## database:
#' refComb <- list(cattle = "Nieto", sheep = "Davis", Goat = "Clutton",
#'                 pig = "Albarella", redDeer = "Basel")
#' userReference <- AssembleReference(refComb)
#' ## Compute the log-ratios with respect to this alternative reference:
#' dataExampleWithLogs3 <- LogRatios(dataExample, ref = userReference)
#'
#' ## We can be interested in including the first and second phalanges without
#' ## anterior-posterior identification ("phal 1" and "phal 2"), by computing
#' ## their log ratios with respect to the reference of the corresponding
#' ## anterior phalanges ("phal 1 ant" and "phal 2 ant", respectively).
#' ## For this we use the optional argument joinCategories:
#' categoriesPhalAnt <- list('phal 1 ant' = c("phal 1 ant", "phal 1"),
#'                           'phal 2 ant' = c("phal 2 ant", "phal 2"))
#' dataExampleWithLogs4 <- LogRatios(dataExample,
#'                                   joinCategories = categoriesPhalAnt)
#' head(dataExampleWithLogs4)[, -c(6:20,32:64)]
#' @export
LogRatios <- function(data,
                      ref = reference$Combi,
                      identifiers = c("Taxon", "Element"),
                      refMeasuresName = "Measure",
                      refValuesName = "Standard",
                      thesaurusSet = zoologThesaurus,
                      taxonomy = zoologTaxonomy,
                      joinCategories = NULL,
                      mergedMeasures = NULL,
                      useGenusIfUnambiguous = TRUE) {
  thesaurusSetJoined <- thesaurusSet
  if(!is.null(joinCategories))
    thesaurusSetJoined <- SmartJoinCategories(thesaurusSetJoined,
                                              joinCategories)
  dataStandard <- StandardizeDataSet(data, thesaurusSetJoined)
  identifiers <- StandardizeNomenclature(identifiers,
                                         thesaurusSet$identifier)
  refStandard <- StandardizeDataSet(ref, thesaurusSet)
  refMeasuresName <- StandardizeNomenclature(refMeasuresName,
                                             thesaurusSet$identifier)
  refValuesName <- StandardizeNomenclature(refValuesName,
                                           thesaurusSet$identifier)
  dataStandard <- HandleTaxonAmbiguity(dataStandard, refStandard,
                                       identifiers, taxonomy,
                                       thesaurusSetJoined,
                                       useGenusIfUnambiguous)
  refMeasures <- unique(refStandard[, refMeasuresName])
  refMeasuresInData <- intersect(names(dataStandard), refMeasures)
  # Merging tax, element, and measure combinations in a single vector.
  # This combination identifies a single reference value.
  refIdentification <- CollapseColumns(refStandard[, c(identifiers,
                                                       refMeasuresName)])
  # Computation of the log ratios for all tax, elements, and measures.
  for (measure in refMeasuresInData)
  {
    refMeasures <- GetGroup(measure, mergedMeasures)
    for(refMeasure in refMeasures)
    {
      dataIdentification <- CollapseColumns(dataStandard[, identifiers],
                                            refMeasure)
      coincident <- match(dataIdentification, refIdentification)
      matched <- !is.na(coincident)
      x <- dataStandard[matched, measure]
      y <- refStandard[coincident[matched], refValuesName]
      measureUserName <- names(data)[which(names(dataStandard) == measure)]
      logMeasure <- paste0(logPrefix, measureUserName)
      data[matched, logMeasure] <- log10(x / y)
    }
  }
  return(data)
}
#Namespace Variable
logPrefix <- "log"
GetGroup <- function(x, groups)
{
  if(!is.list(groups)) groups <- list(groups)
  xInGroup <- which(as.logical(lapply(groups, is.element, el=x)))
  if(length(xInGroup)==0) return(x)
  if(length(xInGroup)>1) stop(paste(x, "is included in more than one group."))
  groups[[xInGroup]]
}
JoinGenusForReference <- function(dataStandard,
                                  species, taxGroup,
                                  thesaurusSetJoined)
{
  implicitJoinCategories <- list(taxGroup)
  names(implicitJoinCategories) <- species
  thesaurusSetJoined <- SmartJoinCategories(thesaurusSetJoined,
                                            implicitJoinCategories)
  StandardizeDataSet(dataStandard, thesaurusSetJoined)
}
WarnOfTaxonAmbiguity <- function(taxonomyWarning,
                                 taxaInRef, rank, taxGroup)
{
  taxonomyWarning$initialMessage <- "Data includes some cases recorded as\n"
  if(length(taxaInRef) > 0)
  {
    if(length(taxaInRef) == 1 && is.null(taxonomyWarning$message))
    {
      taxonomyWarning$finalMessage <-
        "   Set joinCategories as appropriate if you want to use it."
    }
    else
    {
      taxonomyWarning$finalMessage <-
        "   Set joinCategories as appropriate if you want to use any of them."
    }
    taxonomyWarning$message <-
      paste0(taxonomyWarning$message,
             "    * ", taxGroup, " (which is a ", rank, ")\n",
             "      for which the reference for ",
             paste(taxaInRef, collapse = " or "),
             " could be used.\n")
  }
  return(taxonomyWarning)
}
HandleTaxonAmbiguity <- function(dataStandard,
                                 refStandard,
                                 identifiers,
                                 taxonomy,
                                 thesaurusSetJoined,
                                 useGenusIfUnambiguous)
{
  taxName <- identifiers[1]
  taxonomicRanks <- names(taxonomy)
  genusWarning <- ""
  taxonomyWarning <- list()
  for(rank in taxonomicRanks)
  {
    taxGroups <- intersect(unique(dataStandard[[taxName]]),
                           unique(taxonomy[[rank]]))
    for(taxGroup in taxGroups)
    {
      if(rank == "Species")
      {
        genus <- as.character(
          taxonomy$Genus[taxonomy$Species == taxGroup])
        species <- GetSpeciesIn(genus, taxonomy)
      }
      else
      {
        species <- GetSpeciesIn(taxGroup, taxonomy)
      }
      taxaInRef <- intersect(species, unique(refStandard[[taxName]]))
      if(rank %in% c("Species", "Genus") && length(taxaInRef) == 1 &&
         taxaInRef != taxGroup)
      {
        if(useGenusIfUnambiguous)
        {
          dataStandard <- JoinGenusForReference(dataStandard,
                                                taxaInRef, taxGroup,
                                                thesaurusSetJoined)
          genusWarning <- paste0(genusWarning, "Reference for ", taxaInRef,
                                 " used for cases of ", taxGroup, ".\n   ")
        }
      }
      else if(rank != "Species")
      {
        taxonomyWarning <- WarnOfTaxonAmbiguity(taxonomyWarning,
                                                taxaInRef, rank, taxGroup)
      }
    }
  }
  if(genusWarning != "") warning(genusWarning,
                                 "Set useGenusIfUnambiguous to FALSE ",
                                 "if this behaviour is not desired.",
                                 call. = FALSE)
  if(!is.null(taxonomyWarning$message)) warning(taxonomyWarning$initialMessage,
                                                taxonomyWarning$message,
                                                taxonomyWarning$finalMessage,
                                                call. = FALSE)
  return(dataStandard)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/LogRatios.R 
 | 
					
	ReadCommentLines <- function(file, comment.char = "#")
{
  allLines <- stringi::stri_read_lines(file)
  GetCommentLines(allLines, comment.char)
}
GetCommentLines <- function(x, comment.char = "#")
{
  commentLines <- x[StartsBy(x, comment.char)]
  comments <- sub(paste0("([", comment.char, " ])+"), "", commentLines)
  return(comments)
}
GetAfterPattern <- function(x, pattern)
{
  xSelected <- x[StartsBy(x, pattern)]
  xWithoutInitialSpaces <- sub("( )+", "", xSelected)
  xWithoutPattern <- substring(xWithoutInitialSpaces, nchar(pattern)+1)
  sub("( )+", "", xWithoutPattern)
}
StartsBy <- function(x, pattern)
{
  xWithoutInitialSpaces <- sub("( )+", "", x)
  substring(xWithoutInitialSpaces, 1, nchar(pattern)) == pattern
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/ReadCommentLines.R 
 | 
					
	ReadReferenceDatabase <- function(file)
{
  referencesDatabase <- list()
  refDbStruct <- utils::read.csv2(file,
                                  quote = "\"", na.strings = "",
                                  header = TRUE,
                                  comment.char = "#",
                                  fileEncoding = "UTF-8",
                                  stringsAsFactors = FALSE)
  for(i in 1:nrow(refDbStruct))
  {
    referencesDatabase[[refDbStruct$Taxon[i]]][[refDbStruct$Source[i]]] <-
      utils::read.csv2(paste0("inst/extdata/", refDbStruct$Filename[i]),
                       quote = "\"", na.strings = "",
                       header = TRUE,
                       comment.char = "#",
                       fileEncoding = "UTF-8")
  }
  referencesDatabase
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/ReadReferenceDatabase.R 
 | 
					
	#' Remove Cases Missing All Measurements
#'
#' Function to remove the table rows for which all measurements of interest
#' are non-available (NA).
#' A particular list of measurement names can be explicitly provided
#' or selected by a common initial pattern.
#' The default setting removes the rows with no log-ratio available.
#'
#' @inheritParams LogRatios
#' @param measureNames A vector of characters with the list of measurements
#' to be considered for missing values. If \code{NULL} (default), all measurements
#' starting by \code{prefix} are considered.
#' @param prefix A character string with the initial pattern to select the
#' list of measurements. The default is given by the internal variable
#' \code{logPrefix}. It is in effect only when \code{measureNames = NULL}.
#' @return A dataframe with the same columns as the input dataframe but
#' removing the rows with missing values for all measurements in the list.
#' @examples
#' ## Read an example dataset:
#' dataFile <- system.file("extdata", "dataValenzuelaLamas2008.csv.gz",
#'                         package = "zoolog")
#' dataExample <- utils::read.csv2(dataFile,
#'                                 na.strings = "",
#'                                 encoding = "UTF-8")
#' ## We can observe the first lines (excluding some columns for visibility):
#' head(dataExample)[, -c(6:20,32:64)]
#'
#' ## Remove the cases not including any measurement present in the reference.
#' refMeasureNames <- unique(reference$Combi$Measure)
#' refMeasureNames
#' dataExamplePruned <- RemoveNACases(dataExample,
#'                                    measureNames = refMeasureNames)
#' ## The first lines of the output data frame show at least one available
#' ## measurement value in the selected list:
#' head(dataExamplePruned)[, -c(6:20,32:64)]
#'
#' ## If we compute first the log-ratios
#' dataExampleWithLogs <- LogRatios(dataExample)
#' ## the cases not including any log-ratio can be removed with the
#' ## default logPrefix
#' dataExampleWithLogsPruned <- RemoveNACases(dataExampleWithLogs)
#' head(dataExampleWithLogsPruned)[, -c(6:20,32:64)]
#' @export
RemoveNACases <- function(data, measureNames = NULL, prefix = logPrefix)
{
  if(!is.data.frame(data)) stop("data must be a data.frame.")
  originalDataClasses <- class(data)
  names <- colnames(data)
  if (is.null(measureNames))
  {
    measureNames <- names[regexpr(prefix, names) == 1]
  }
  else
  {
    measureNames <- intersect(measureNames, names)
  }
#  prunedData <- data[rowSums(!is.na(data[, measureNames])) > 0, ]
  prunedData <- data[apply(as.array(!is.na(data[, measureNames])), 1, any), ]
  rownames(prunedData) <- NULL
  # type.convert removes the non-used factors after subsetting the data.frame.
  # It takes also into account if factors in the original data.frame can
  # be considered numeric or logical in the subset one.
  prunedData <- as.data.frame(lapply(prunedData,
                                     function(x) {
                                       y <- utils::type.convert(as.character(x),
                                                                as.is = FALSE)
                                       if(is.character(x) & is.factor(y)) y <- x
                                       return(y)
                                     }), stringsAsFactors = FALSE)
  class(prunedData) <- originalDataClasses
  return(prunedData)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/RemoveNACases.R 
 | 
					
	#' Standardize Nomenclature
#'
#' Functions to map the user provided nomenclature into a standard one
#' as defined in a thesaurus.
#'
#' \code{StandardizeNomenclature} standardizes a character vector
#' according to a given thesaurus.
#'
#' \code{StandardizeDataSet} standardizes column names and values of
#' a data frame according to a thesaurus set.
#'
#' @inheritParams ThesaurusReaderWriter
#' @param x Character vector.
#' @param mark.unknown Logical. If \code{FALSE} (default) the strings not found in the
#' thesaurus are kept without change. If \code{TRUE} the strings not in the
#' thesaurus are set to \code{NA}.
#' @param data A data frame.
#'
#' @return
#' \code{StandardizeNomenclature} returns a vector of the same length as the
#' input vector \code{x}. The names present in the thesaurus are set to their
#' corresponding category. The names not in the thesaurus are kept unchanged if
#' \code{mark.unknown=FALSE} (default) and set to \code{NA} if
#' \code{mark.unknown=TRUE}.
#'
#' \code{StandardizeDataSet} returns a data frame with the same structure as
#' the input \code{data}, but standardizing its nomenclature according to a thesaurus set
#' including appropriate thesauri for its column names and for the values of
#' a set of columns.
#'
#' @examples
#' ## Select the thesaurus for taxa present in the thesaurus set
#' ## zoolog::zoologThesaurus:
#' thesaurus <- zoologThesaurus$taxon
#' thesaurus
#' ## Standardize an heterodox vector of taxa:
#' StandardizeNomenclature(c("bota", "giraffe", "pig", "cattle"),
#'                         thesaurus)
#' ## Observe that "giraffe" is kept unchanged since it is not included in
#' ## any thesaurus category.
#' ## But if mark.unknown is set to TRUE, it is marked as NA:
#' StandardizeNomenclature(c("bota", "giraffe", "pig", "cattle"),
#'                         thesaurus, mark.unknown = TRUE)
#'
#' ## This thesaurus is not case sensitive:
#' attr(thesaurus, "caseSensitive") #  == FALSE
#' ## Thus, names are recognized independently of their case:
#' StandardizeNomenclature(c("bota", "BOTA", "Bota", "boTa"),
#'                         thesaurus)
#'
#' ## Load an example data frame:
#' dataFile <- system.file("extdata", "dataValenzuelaLamas2008.csv.gz",
#'                         package = "zoolog")
#' dataExample <- utils::read.csv2(dataFile,
#'                                 na.strings = "",
#'                                 encoding = "UTF-8")
#' ## Observe mainly the first columns:
#' head(dataExample[,1:5])
#' ## Stadardize the dataset:
#' dataStandardized <- StandardizeDataSet(dataExample, zoologThesaurus)
#' head(dataStandardized[,1:5])
#'
#' @seealso
#' \code{\link{zoologThesaurus}} for a description of the thesaurus and
#' thesaurus set structure,
#'
#' \code{\link{ThesaurusReaderWriter}}, \code{\link{ThesaurusManagement}}
#' @name StandardizeNomenclature
#' @rdname StandardizeNomenclature
#' @export
StandardizeNomenclature <- function(x, thesaurus,
                                    mark.unknown = FALSE)
{
  if(is.null(thesaurus) || is.null(x) || length(thesaurus)==0) return(x)
  n <- length(x)
  x.isfactor <- is.factor(x)
  if(x.isfactor) x <- as.character(x)
  normalized <- NormalizeForSensitiveness(thesaurus, x)
  thesaurus <- lapply(normalized$thesaurus, function(a) a[a!=""])
  y <- sapply(thesaurus, is.element, el = normalized$x)
  if(mark.unknown) x[] <- NA
  if(length(x)>1) ynames <- colnames(y) else ynames<-names(y)
  x[(which(y)-1) %% n + 1] <- ynames[ceiling(which(y)/n)]
  if(x.isfactor) x <- as.factor(x)
  return(x)
}
#' @rdname StandardizeNomenclature
#' @export
StandardizeDataSet <- function(data, thesaurusSet = zoologThesaurus)
{
  if(!is.data.frame(data)) stop("data must be a data.frame.")
  originalDataClasses <- class(data)
  class(data) <- "data.frame"
  toColValues <- attr(thesaurusSet, "applyToColValues")
  for(thesaurus in thesaurusSet[attr(thesaurusSet, "applyToColNames")])
  {
    names(data) <- StandardizeNomenclature(names(data), thesaurus)
    names(thesaurusSet)[toColValues] <- sapply(names(thesaurusSet)[toColValues],
                                               StandardizeNomenclature,
                                               thesaurus)
  }
  for(i in which(toColValues & names(thesaurusSet) %in% names(data)))
  {
    type <- names(thesaurusSet)[i]
    data[, type] <- StandardizeNomenclature(data[, type], thesaurusSet[[type]])
  }
  data$Measure <- StandardizeNomenclature(data$Measure, thesaurusSet$measure)
  class(data) <- originalDataClasses
  return(data)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/StandardizeNomenclature.R 
 | 
					
	#' Subtaxonomy under taxonomical category
#'
#' Functions to obtain the subtaxonomy or the set of taxa included in a
#' particular taxonomic group, according to the \code{\link{zoologTaxonomy}}
#' by default.
#'
#' @param taxon A name of any of the taxa, at any rank included in the taxonomy
#' (from species to family in the zoolog taxonomy).
#' @param taxonomy A taxonomy from which to extract the subtaxonomy.
#' By default \code{taxonomy = \link{zoologTaxonomy}}.
#' @param thesaurus A thesaurus allowing datasets with different nomenclatures
#' to be merged. By default \code{thesaurus = \link{zoologThesaurus}$taxon}.
#'
#' @return
#' \code{Subtaxonomy} returns a data.frame with the same structure of the input
#' taxonomy but with only the species (rows) included in the queried
#' \code{taxon}, and the taxonomic ranks (columns)
#' up to its level.
#'
#' \code{SubtaxonomySet} returns a character vector including a unique copy
#' (set) of all the taxa, at any taxonomic rank, under the queried
#' \code{taxon}.
#' Equivalent to Subtaxonomy but as a set instead of a dataframe.
#'
#' \code{GetSpeciesIn} returns a character vector including the species included
#' in the queried \code{taxon}.
#'
#' @examples
#' ## Get species of genus Sus:
#' GetSpeciesIn("Sus")
#'
#' ## Get species of family Bovidae:
#' GetSpeciesIn("Bovidae")
#'
#' ## Get the subtaxonomy of the Tribe Caprini:
#' Subtaxonomy("Caprini")
#'
#' ## Use SubtaxonomySet to join categories for computing log-ratios.
#' ## For this, we read an example dataset:
#' dataFile <- system.file("extdata", "dataValenzuelaLamas2008.csv.gz",
#'                         package="zoolog")
#' dataExample <- utils::read.csv2(dataFile,
#'                                 na.strings = "",
#'                                 encoding = "UTF-8")
#' ## We illustrate with a subset of cases to make the example run
#' ## sufficiently fast:
#' dataExample <- dataExample[1:1000, ]
#' ## Compute the log-ratios joining all taxa from tribe \emph{Caprini}
#' ## to use the reference of \emph{Ovis aries}:
#' categoriesCaprini <- list('Ovis aries' = SubtaxonomySet("Caprini"))
#' dataExampleWithLogs <- LogRatios(dataExample,
#'                                  joinCategories = categoriesCaprini)
#' @name Subtaxonomy
#' @rdname Subtaxonomy
#' @export
Subtaxonomy <- function(taxon, taxonomy = zoologTaxonomy,
                        thesaurus = zoologThesaurus$taxon)
{
  taxonomyStandardized <- as.data.frame(
    sapply(taxonomy, StandardizeNomenclature, thesaurus = thesaurus),
    stringsAsFactors = FALSE
  )
  taxonStandardized <- StandardizeNomenclature(taxon, thesaurus)
  groupLevel <- which(sapply(taxonomyStandardized,
                             function(x) any(x == taxonStandardized)))
  if(length(groupLevel) == 0)
    stop(paste(taxon, " is not recognized in zoologTaxonomy."))
  if(length(groupLevel) > 1)
    stop(paste("Ambiguity detected in zoologTaxonomy: \n",
               taxon, " is in more than one level."))
  selectedRows <- taxonomyStandardized[, groupLevel] == taxonStandardized
  taxonomy[selectedRows, 1:groupLevel]
}
#' @rdname Subtaxonomy
#' @export
SubtaxonomySet <- function(taxon, taxonomy = zoologTaxonomy,
                           thesaurus = zoologThesaurus$taxon)
{
  subtaxonomy <- Subtaxonomy(taxon, taxonomy, thesaurus)
  as.character(unique(unlist(subtaxonomy)))
}
#' @rdname Subtaxonomy
#' @export
GetSpeciesIn <- function(taxon, taxonomy = zoologTaxonomy,
                         thesaurus = zoologThesaurus$taxon)
{
  as.character(Subtaxonomy(taxon, taxonomy, thesaurus)$Species)
}
# Check if including this InCategory.data.frame into the InCategory
# Check if we want that InCategory include the own name when it is not
# included in the thesaurus.
InCategory.array <- function(x, category, thesaurus)
{
  sapply(x, function(y) InCategory(y, category, thesaurus) | y == category)
}
TaxonomyLevel <- function(taxon,
                          taxonomy = zoologTaxonomy,
                          thesaurus = zoologThesaurus$taxon,
                          as.numeric = FALSE)
{
  cases <- InCategory.array(taxonomy, taxon, thesaurus)
  groupLevel <- as.logical(colSums(cases))
  if(as.numeric)
  {
    return(which(groupLevel))
  }
  else
  {
    return(names(taxonomy)[groupLevel])
  }
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/Subtaxonomy.R 
 | 
					
	#' Thesaurus Management
#'
#' Functions to modify and check thesauri.
#'
#' @inheritParams ThesaurusReaderWriter
#' @param newName Character vector with new names to be added to the thesaurus.
#' @param category Character vector identifying the classes where the
#' new names should be included.
#'
#' @return
#' \code{NewThesaurus} returns an empty thesaurus. This can then be
#' populated by \code{AddToThesaurus}.
#'
#' \code{AddToThesaurus} returns the input thesaurus complemented with new
#' names in the categories identified. If any of the categories is not present
#' in the input thesaurus, new categories are added as required.
#'
#' \code{RemoveRepeatedNames} returns the input thesaurus pruned of redundant
#' names in each category. The redundancy is evaluated in agreement with the
#' case and accent sensitivity of the thesaurus.
#'
#' \code{ThesaurusAmbiguity} returns FALSE if no ambiguity is present. When any
#' ambiguity is found, it returns TRUE with an attribute \code{errmessage}
#' including the names present in more than one category and the
#' the involved categories. This is internally used by
#' \code{\link{ReadThesaurus}} and \code{\link{AddToThesaurus}} to generate an
#' error in case they attempt to read or generate an ambiguous thesaurus.
#'
#' @examples
#' ## Load an example thesaurus:
#' thesaurus <- ReadThesaurus(system.file("extdata", "taxonThesaurus.csv",
#'                                        package="zoolog"))
#' ## with categories
#' names(thesaurus) #  "bos taurus"  "ovis aries"  "sus domesticus"
#' ## Add names to several categories:
#' thesaurusExtended <- AddToThesaurus(thesaurus,
#'                                     c("Kuh", "Schwein"),
#'                                     c("bos taurus","sus domesticus"))
#' ## This adds the name "Kuh" to the category "bos taurus" and
#' ## the name "Schwein" to the category "sus domesticus".
#'
#' ## Generate a new thesaurus and populate it with two categories
#' ## ("red" and "blue"):
#' thesaurusNew <- NewThesaurus()
#' thesaurusNew <- AddToThesaurus(thesaurusNew,
#'                                c("scarlet", "vermilion", "ruby", "cherry",
#'                                  "carmine", "wine"),
#'                                "red")
#' thesaurusNew
#' thesaurusNew <- AddToThesaurus(thesaurusNew,
#'                                 c("sky blue", "azure", "sapphire", "cerulean",
#'                                  "navy", "lapis lazuli", "indigo", "cyan"),
#'                                "blue")
#' thesaurusNew
#'
#' ## Attempt to generate an ambiguous thesaurus
#' try(AddToThesaurus(thesaurusNew, "scarlet", "blue"))
#'
#' ## Remove repeated names in the same category:
#' thesaurusWithRepetitions <- AddToThesaurus(thesaurusNew,
#'                                            c("scarlet", "ruby"), "red")
#' thesaurusWithRepetitions
#' RemoveRepeatedNames(thesaurusWithRepetitions)
#'
#' @seealso
#' \code{\link{zoologThesaurus}} for a description of the thesaurus and
#' thesaurus set structure,
#'
#' \code{\link{ReadThesaurus}}, \code{\link{WriteThesaurus}},
#' \code{\link{StandardizeNomenclature}}
#' @name ThesaurusManagement
#' @rdname ThesaurusManagement
#' @export
NewThesaurus <- function(caseSensitive = FALSE, accentSensitive = FALSE,
                         punctuationSensitive = FALSE)
{
  thesaurus <- data.frame()
  attr(thesaurus, "caseSensitive") <- caseSensitive
  attr(thesaurus, "accentSensitive") <- accentSensitive
  attr(thesaurus, "punctuationSensitive") <- punctuationSensitive
  return(thesaurus)
}
#' @rdname ThesaurusManagement
#' @export
AddToThesaurus <- function(thesaurus, newName, category)
{
  if(length(chainName <- intersect(newName, category))>0)
    stop(paste0("Inconsistent \"newName\" and \"category\". ",
                "Repeated name: ", paste0(chainName, collapse = ", "), "."))
  standardName <- StandardizeNomenclature(category, thesaurus)
  newColumns <- setdiff(standardName, names(thesaurus))
  newName <- c(newColumns, newName)
  standardName <- c(newColumns, standardName)
  thesNew <- lapply(thesaurus, function(a) a[a!=""])
  for(i in 1:length(newName))
  {
    case <- standardName[min(i,length(standardName))]
    thesNew[[case]] <- c(thesNew[[case]], newName[i])
  }
  thesNew <- ThesaurusFromList(thesNew, attributes(thesaurus))
  if(ambiguity <- ThesaurusAmbiguity(thesNew))
    stop(paste0("The resulting thesaurus would be ambiguous.\n",
                attr(ambiguity, "errmessage")))
  return(thesNew)
}
#' @rdname ThesaurusManagement
#' @export
RemoveRepeatedNames <- function(thesaurus)
{
  thesClean <- mapply(function(x,y) x[!duplicated(y) & y!=""],
                      thesaurus,
                      NormalizeForSensitiveness(thesaurus))
  ThesaurusFromList(thesClean, attributes(thesaurus))
}
#' @rdname ThesaurusManagement
#' @export
ThesaurusAmbiguity <- function(thesaurus)
{
  if(length(thesaurus)<2) return(FALSE)
  thesaurus <- NormalizeForSensitiveness(thesaurus)
  thesaurus <- lapply(thesaurus, function(a) a[a!=""])
  pairs <- utils::combn(names(thesaurus), 2)
  ambiguities <- list()
  for(i in 1:ncol(pairs))
  {
    pair.coincidence <- thesaurus[[pairs[1,i]]] %in% thesaurus[[pairs[2,i]]]
    if(any(pair.coincidence))
    {
      ambiguities[[paste0("Ambiguity in pair (\"", pairs[1,i], "\", \"", pairs[2,i], "\")")]] <-
        thesaurus[[pairs[1,i]]][pair.coincidence]
    }
  }
  res <- length(ambiguities)>0
  if(res) attr(res, "errmessage") <- paste0(names(ambiguities),
                                            ". Shared names: ", ambiguities,
                                            collapse = "\n")
  return(res)
}
#
# From here internal functions. Not exported.
#
ThesaurusFromList <- function(thesaurusList, attrib)
{
  n <- max(sapply(thesaurusList, length))
  thesaurus <- as.data.frame(lapply(thesaurusList,
                                    function(x) c(as.character(x),
                                                  rep("", n-length(x)))),
                             stringsAsFactors = FALSE)
  attr(thesaurus, "caseSensitive") <- attrib$caseSensitive
  attr(thesaurus, "accentSensitive") <- attrib$accentSensitive
  attr(thesaurus, "punctuationSensitive") <- attrib$punctuationSensitive
  names(thesaurus) <- names(thesaurusList)
  return(thesaurus)
}
NormalizeForSensitiveness <- function(thesaurus, x = NULL)
{
  sensitivenessAttrNames <- c("caseSensitive",
                              "accentSensitive",
                              "punctuationSensitive")
  sensitivenessAttr <- unlist(sapply(sensitivenessAttrNames, attr,
                                     x = thesaurus))
  xprepared <- x
  thesaurus <- lapply(thesaurus, SensitivenessTransformation, sensitivenessAttr)
  xprepared <- SensitivenessTransformation(xprepared, sensitivenessAttr)
  if(is.null(x)) return(thesaurus) else
    return(list(thesaurus = thesaurus, x = xprepared))
}
SensitivenessTransformation <- function(x, sensitiveness)
{
  if(is.null(sensitiveness)) return(x)
  if(is.false.or.na(sensitiveness["caseSensitive"]))
    x <- stringi::stri_trans_general(x, "Any-lower")
  if(is.false.or.na(sensitiveness["accentSensitive"]))
    x <- stringi::stri_trans_general(x, "Latin-ASCII")
  if(is.false.or.na(sensitiveness["punctuationSensitive"]))
    x <- gsub("[[:punct:][:blank:]]+", "", x)
  return(x)
}
is.false.or.na <- function(x)
{
  is.na(x) || !x
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/ThesaurusManagement.R 
 | 
					
	#' Thesaurus Readers and Writers
#'
#' Functions to read and write thesauri and thesaurus sets.
#'
#' @param file Name of a file.
#' @param thesaurus A thesaurus object.
#' @param thesaurusSet A thesaurus set.
#' @param caseSensitive,accentSensitive,punctuationSensitive Logical. They set
#' the case, accent, and punctuation sensitivity (\code{FALSE} by default) of
#' the thesaurus.
#'
#' @return
#' \code{WriteThesaurus} and \code{WriteThesaurusSet} create or overwrite the
#' corresponding files. No value is returned.
#'
#' \code{ReadThesaurus} and \code{ReadThesaurusSet} return the read thesaurus or
#' thesaurusSet, respectively.
#'
#' @examples
#' ## Read a thesaurus for taxa:
#' thesaurusFile <- system.file("extdata", "taxonThesaurus.csv", package="zoolog")
#' thesaurus <- ReadThesaurus(thesaurusFile)
#' ## The attributes of the thesaurus include the fields 'caseSensitive',
#' ## 'accentSensitive', and 'punctuationSensitive', all FALSE by default.
#' attributes(thesaurus)
#'
#' ## Any of them can be set by the user if desired:
#' thesaurus2 <- ReadThesaurus(thesaurusFile, accentSensitive = TRUE)
#' attributes(thesaurus2)
#'
#' ## Write the thesarus to a file:
#' fileExample <- file.path(tempdir(), "thesaurusExample.csv")
#' WriteThesaurus(thesaurus, fileExample)
#' ## Replace tempdir() for your preferred local path if you want to easily
#' ## examine the written file.
#'
#' ## Read a thesaurus set:
#' thesaurusSetFile <- system.file("extdata", "zoologThesaurusSet.csv", package="zoolog")
#' thesaurusSet <- ReadThesaurusSet(thesaurusSetFile)
#' ## The attributes of the thesaurus set include information of the constituent
#' ## thesauri: names, source file names, and their mode of application on datasets.
#' attributes(thesaurusSet)
#' ## The attributes of each thesaurus are also set by 'ReadThesaurusSet'.
#' attributes(thesaurusSet$measure)
#'
#' ## Write the thesaurus set to a file:
#' fileSetExample <- file.path(tempdir(), "thesaurusSetExample.csv")
#' WriteThesaurusSet(thesaurusSet, fileSetExample)
#' ## It writes the thesaurus-set main data frame and each of the included
#' ## thesaurus files.
#' ## Again, replace tempdir() for your preferred local path if you want to
#' ## easily examine the written files.
#'
#' @seealso
#' \code{\link{zoologThesaurus}} for a description of the thesaurus and
#' thesaurus set structure,
#'
#' \code{\link{ThesaurusManagement}},
#' \code{\link{StandardizeNomenclature}}
#' @name ThesaurusReaderWriter
#' @rdname ThesaurusReaderWriter
#' @export
ReadThesaurus <- function(file,
                          caseSensitive = FALSE,
                          accentSensitive = FALSE,
                          punctuationSensitive = FALSE)
{
  thesaurus <- utils::read.csv2(file, stringsAsFactors = FALSE, header = FALSE,
                                comment.char = "#")
  names(thesaurus) <- thesaurus[1,]
  attrib <- ReadThesaurusAttributes(file)
  for(sensitive in c("caseSensitive", "accentSensitive", "punctuationSensitive"))
  {
    if(!eval(call("missing", as.name(sensitive))) || is.null(attrib[[sensitive]]))
      attrib[[sensitive]] <- eval(as.name(sensitive))
    attr(thesaurus, sensitive) <- attrib[[sensitive]]
  }
  if(ambiguity <- ThesaurusAmbiguity(thesaurus))
    stop(paste0("Ambiguous thesaurus in ", file , ":\n",
                attr(ambiguity, "errmessage")))
  return(thesaurus)
}
#' @rdname ThesaurusReaderWriter
#' @export
ReadThesaurusSet <- function(file)
{
  data <- utils::read.csv2(file, comment.char = "#")
  dir <- dirname(file)
  filenames <- file.path(dir, data$FileName)
  thesaurusSet <- mapply(ReadThesaurus, filenames,
                         data$CaseSensitive, data$AccentSensitive,
                         data$PunctuationSensitive)
  names(thesaurusSet) <- data$ThesaurusName
  attr(thesaurusSet, "applyToColNames") <- data$ApplyToColNames
  attr(thesaurusSet, "applyToColValues") <- data$ApplyToColValues
  attr(thesaurusSet, "fileName") <- as.character(data$FileName)
  return(thesaurusSet)
}
#' @rdname ThesaurusReaderWriter
#' @export
WriteThesaurus <- function(thesaurus, file)
{
  WriteThesaurusAttributes(thesaurus, file)
  utils::write.table(thesaurus, file,
                     sep = ";", dec = ",", qmethod = "double",
                     row.names = FALSE, col.names = FALSE, quote = FALSE,
                     append = TRUE)
}
#' @rdname ThesaurusReaderWriter
#' @export
WriteThesaurusSet <- function(thesaurusSet, file)
{
  data <- data.frame()
  data[1:length(thesaurusSet),"ThesaurusName"] <- names(thesaurusSet)
  data$FileName <- attr(thesaurusSet, "fileName")
  data$CaseSensitive <- sapply(zoologThesaurus,
                               function(x) attr(x, "caseSensitive"))
  data$AccentSensitive <- sapply(zoologThesaurus,
                                 function(x) attr(x, "accentSensitive"))
  data$PunctuationSensitive <- sapply(zoologThesaurus,
                                 function(x) attr(x, "punctuationSensitive"))
  data$ApplyToColNames <- attr(thesaurusSet, "applyToColNames")
  data$ApplyToColValues <- attr(thesaurusSet, "applyToColValues")
  utils::write.csv2(data, file, row.names = FALSE, quote = FALSE)
  dir <- dirname(file)
  filenames <- file.path(dir, data$FileName)
  filenames <- file.path(dir, data$FileName)
  noreturn <- mapply(WriteThesaurus, thesaurusSet, filenames)
}
ReadThesaurusAttributes <- function(file)
{
  x <- ReadCommentLines(file)
  attrib <- list()
  for(sensitive in c("caseSensitive", "accentSensitive", "punctuationSensitive"))
  {
    value <- as.logical(GetAfterPattern(x, sensitive))
    if(length(value) > 0) attrib[[sensitive]] <- value[1]
  }
  return(attrib)
}
WriteThesaurusAttributes <- function(thesaurus, file)
{
  commentLine = c("##########################################")
  lines = c(commentLine, "## zoolog thesaurus")
  for(sensitive in c("caseSensitive", "accentSensitive", "punctuationSensitive"))
    lines = c(lines, paste("##", sensitive, attr(thesaurus, sensitive)))
  lines = c(lines, commentLine)
  writeLines(lines, file)
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/ThesaurusReaderWriter.R 
 | 
					
	#' Example dataset
#'
#' The dataset provided as an example originates from
#' \insertCite{valenzuela2008alimentacio}{zoolog}. The dataset is written in
#' Catalan, with the exception of some headings to facilitate understanding of
#' its contents.
#'
#' @importFrom Rdpack reprompt
#'
#' @format
#' The dataset is provided in the \pkg{zoolog} \code{extdata} folder as a file
#' in semicolon-separated values format but compressed with
#' gzip to reduce its size:
#' \describe{
#'   \item{`dataValenzuelaLamas2008.csv.gz`}{}
#' }
#' The file is provided in UTF-8 encoding. The file encoding is relevant
#' because the dataset contains accents and special characters that needs to be
#' correctly displayed. It can
#' be directly open by `utils::read.csv2`, provided that the correct
#' encoding is set (see examples below).
#'
#' Every row of the data.frame refers to one individual bone fragment unless
#' otherwise stated in the \emph{Observations} field ("Observacions").
#'
#' All the measurements are expressed in millimetres and were obtained with a
#' manual calliper.
#'
#' The main headings in the database are:
#' \describe{
#'   \item{Site}{The faunal remains from three Iron Age archaeological sites were recorded
#' 	(ALP = Alorda Park, TFC = Turó de la Font de la Canya, OLD = Olèrdola).}
#'   \item{N inv	}{A correlative number for each fragment.}
#'   \item{UE}{Refers to the Stratigraphic Unit (SU in English).}
#'   \item{Especie}{Refers to the species.}
#'   \item{Os}{Refers to the skeletal element.}
#'   \item{Fragment}{Refers to the preserved part in the vertical axis (distal, proximal, diaphysis,
#'	 etc.).}
#'   \item{Lat}{Bone laterality: right (d) or left (e).}
#'   \item{Vora}{Refers to the preserved part in relation to the circumference (c), or a vertically,
#' 	 transversally and obliquely fragmented (sto).}
#'   \item{Fract}{Refers to fracture during field excavation or lab work.}
#'   \item{Tafo}{Refers to anthropic and post-depositional alterations.}
#'   \item{Grau}{Refers to degree of bone alteration in a scale from 0 (no alteration) to 4 (diaphysis completely altered).}
#'   \item{Epif}{Degree of fusion: s= fused, ns= unfused, ec = fusion visible. Also tooth wear is recorded here following \insertCite{gardeisen1997exploitation}{zoolog}.}
#'   \item{Sexe}{Sex: male (masc) / female (fem).}
#'   \item{Traces}{Refers to butchery marks. It may also include other observations.}
#'   \item{Observacions}{Observations.}
#'   \item{Recinte}{Refers to the number of silo structure (e.g. SJ8) or the room (e.g. AB)
#' 	from which the material originates.}
#'   \item{TPQ}{Absolute chronology in Terminus Post Quem.}
#'   \item{TAQ}{Absolute chronology in Terminus Ante Quem.}
#'  \item{Period}{Chronological phasing.}
#'  \item{Capsa}{Box number that contains the item.}
#'  \item{Measurement codes }{The nomenclature follows \insertCite{von1976guide}{zoolog}.}
#' }
#'
#' @examples
#' dataFile <- system.file("extdata", "dataValenzuelaLamas2008.csv.gz",
#'                         package="zoolog")
#' dataExample <- utils::read.csv2(dataFile,
#'                                 na.strings = "",
#'                                 encoding = "UTF-8")
#'
#' @references
#'   \insertAllCited{}
#'
#' @name dataValenzuelaLamas2008
#' @docType data
NULL
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/exampleDataset.R 
 | 
					
	utils::globalVariables(c("reference", 
                         "referencesDatabase", 
                         "zoologThesaurus",
                         "zoologTaxonomy"))
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/globals.R 
 | 
					
	#' References
#'
#' Several osteometrical references are provided in \pkg{zoolog} to enable
#' researchers to use the one of their choice. The user can also use their
#' own osteometrical reference if preferred.
#'
#' @importFrom Rdpack reprompt
#'
#' @format
#' Each reference is a data.frame including 4 columns:
#' \describe{
#'   \item{TAX}{The taxon to which each reference bone belongs.}
#'   \item{EL}{The skeletal element.}
#'   \item{Measure}{The type of measurement taken on the bone.}
#'   \item{Standard}{The value of the measurement taken on the bone.
#'     All the measurements are expressed in millimetres.}
#' }
#'
#' @section Data Source:
#' Currently, the references include reference values for the main domesticates
#' and their agriotypes (\emph{Bos}, \emph{Ovis}, \emph{Capra},
#' \emph{Sus}), and other less frequent species, such as red deer and donkey,
#' drawn from the following publications and resources:
#'
#' ``` {r, echo=FALSE, results='asis'}
#' refDatabase <- read.csv2("inst/extdata/referencesDatabase.csv")
#' res <- "\\describe{\n"
#' for(genus in unique(refDatabase$Genus))
#' {
#'   res <- paste0(res, "\\item{**", genus, "**}{\\describe{\n")
#'   for(i in which(refDatabase$Genus == genus))
#'   {
#'     file <- paste0("inst/extdata/", refDatabase$Filename[i])
#'     description <- ReadCommentLines(file)
#'     nameLine <- which(StartsBy(description, "REFERENCE:"))
#'     if(length(nameLine)>0) {
#'       name <- GetAfterPattern(description[nameLine[1]], "REFERENCE:")
#'       description <- description[-nameLine[1]]
#'     } else {
#'       name <- refDatabase$Source[i]
#'     }
#'     res <- paste0(res, "\\item{", name, "}{")
#'     sourceLine <- which(StartsBy(description, "SOURCE:"))
#'     if(length(sourceLine)>0)
#'       description <- description[1:(sourceLine[1]-1)]
#'     description <- paste(description, collapse = "\n")
#'     res <- paste0(res, description)
#'     res <- paste0(res, "}\n")
#'   }
#'   res <- paste0(res, "}}\n")
#' }
#' cat(paste(res, "}\n"))
#' ```
#'
#' The \pkg{zoolog} variable `referencesDatabase` collects all these
#' references. It is structured as a named list of named lists, following the
#' hierarchy described above:
#' ``` {r}
#' str(referencesDatabase, max.level = 2)
#' ````
#'
#' @section Reference Sets:
#' The references' database is organized per taxon. However, in general the
#' zooarchaeological data to be analysed includes several taxa. Thus, the
#' reference dataframe should include one reference standard for each relevant
#' taxon.
#' The \pkg{zoolog} variable \code{referenceSets} defines four possible
#' references:
#' ``` {r, eval = FALSE}
#' referenceSets
#' ```
#'
#' ``` {r, echo=FALSE}
#' knitr::kable(referenceSets)
#' ```
#'
#' Each row defines a reference set consisting of a reference source for
#' each taxon (column). The function
#' \code{\link{AssembleReference}} allows us to build the reference set
#' taking the selected taxon-specific references from the
#' \code{referencesDatabase}.
#'
#' The \pkg{zoolog} variable \code{reference} is a named list including the
#' references defined by \code{referenceSets}:
#' ``` {r}
#' str(reference)
#' ````
#'
#' `reference$Combi` includes the most comprehensive reference for each
#' species so that more measurements can be considered. It is the default
#' reference for computing the [log ratios][LogRatios].
#'
#' If desired, the user can define their own combinations or can also use
#' their own references, which must be a dataframe with the format described
#' above.
#'
#' @section File Structure:
#' `referencesDatabase`, `refereceSets`, and `reference` are exported variables
#' automatically loaded in memory. In addition, \pkg{zoolog} provides in the
#' \code{extdata} folder a set of semicolon separated files (csv), generating
#' them:
#' \describe{
#'   \item{`referenceSets.csv`}{Defines `referenceSets`.}
#'   \item{`referencesDatabase.csv`}{Defines the structure of
#'     `referencesDatabase`.}
#'   \item{...}{A csv file for each taxon-specific reference, as named in
#'     `referencesDatabase.csv`.}
#' }
#' ``` {r}
#' utils::read.csv2(system.file("extdata", "referencesDatabase.csv",
#'                              package = "zoolog"))
#' ```
#'
#' @references
#'   \insertAllCited{}
#'
#' @section Acknowledgement:
#' We are grateful to Barbara Stopp and Sabine Deschler-Erb
#' (University of Basel, Switzerland)
#' for providing the Basel references for cattle, sheep, goat, wild boar,
#' and red deer \insertCite{stopp2018Basel}{zoolog},
#' together with the permission to publish them as part of \pkg{zoolog}.
#'
#' We thank also Francesca Slim and Dimitris Filioglou (University of Groningen)
#' for providing the references for aurochs, mouflon, wild goat, and wild boar
#' \insertCite{degerbol1970urus,uerpmann1994animal,hongo2000faunal}{zoolog}
#' in the Groningen set.
#'
#' We thank Claudia Minniti (University of Salento) for providing Johnstone's
#' reference for cattle \insertCite{johnstone2002late}{zoolog}.
#'
#' We are also grateful to Sierra Harding and Nimrod Marom (University of Haifa)
#' for providing the Haifa standard measurements for donkey, mountain gazelle, 
#' and Persian fallow deer \insertCite{Harding2021}{zoolog}.
#'
#' We thank Carly Ameen and Helene Benkert (University of Exeter) for providing
#' references for horse \insertCite{johnstone2004biometric}{zoolog} and 
#' European rabbit \insertCite{Ameen2021}{zoolog}.
#'
#' We thank Mikolaj Lisowski (University of York) for pointing to the existence
#' of the improved reference for Bos primigenius
#' \insertCite{steppan2001ur}{zoolog} and providing its source.
#'
#' @name referencesDatabase
#' @rdname referencesDatabase
"reference"
#' @format
#'
#' @rdname referencesDatabase
"referenceSets"
#' @format
#'
#' @rdname referencesDatabase
"referencesDatabase"
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/references.R 
 | 
					
	#' Taxonomy hierarchy for \pkg{zoolog}
#'
#' The taxonomy hierarchy for all taxa included in the osteometrical references
#' of the package \pkg{zoolog}.
#' This is used to allow the users to group the taxa by any taxonomical category
#' from \emph{species} to \emph{family}. See
#' \code{\link{Subtaxonomy}}.
#'
#' @format
#' The taxonomy is given as a data.frame with columns for
#' \emph{Species}, \emph{Genus}, \emph{Tribe}, \emph{Subfamily}, and
#' \emph{Family}.
#' Each row lists the information for one species:
#'
#' ``` {r, echo=FALSE}
#' knitr::kable(zoologTaxonomy)
#' ```
#'
#' @section File Structure:
#' \code{zoologTaxonomy} is an exported variable automatically loaded in
#' memory. In addition, the csv source file \code{zoologTaxonomy.csv}
#' generating it is included in the \pkg{zoolog} \code{extdata} folder.
#'
"zoologTaxonomy"
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/zoologTaxonomy.R 
 | 
					
	#' Thesaurus Set for \pkg{zoolog}
#'
#' The thesaurus set defined for the package \pkg{zoolog}.
#' This is used to make the methods robust to different nomenclatures used
#' in datasets created by different authors. The user can also use other
#' thesaurus sets, or can modify the provided thesaurus set (see
#' \code{\link{ThesaurusManagement}} and \code{\link{ThesaurusReaderWriter}}).
#'
#' @format
#' A thesaurus set is a list of thesauri with additional attributes:
#' \describe{
#'   \item{names}{Character vector with the name of each thesaurus.}
#'   \item{applyToColNames}{Logical vector indicating whether each thesaurus
#'           should be applied to the column names of the data frame.}
#'   \item{applyToColValues}{Logical vector indicating whether each thesaurus
#'           should be applied to the values in the corresponding column of
#'           the data frame.}
#'   \item{filename}{Character vector with the source file of each thesaurus.}
#' }
#'
#' The examples below show the list of four thesauri included in the provided
#' \code{zoologThesurus}.
#'
#' Each thesaurus is a data frame also with additional attributes. Each column
#' of the data frame is a category of names with equivalent meaning in the
#' intended application. The column name identifies the category and is used
#' as the standard when applying \code{\link{StandardizeNomenclature}}.
#'
#' The names in each column (category) must not be included in any other
#' column, since this would make the thesaurus ambiguous (see
#' \code{\link{ThesaurusAmbiguity}}).
#'
#' Each thesaurus has the following attributes:
#' \describe{
#'   \item{names}{The standard name for the categories.}
#'   \item{class}{"data.frame"}
#'   \item{row.names}{Irrelevant}
#'   \item{caseSensitive}{Logical indicating whether the names in the thesaurus
#'           should be considered case-sensitive.}
#'   \item{accentSensitive}{Logical indicating whether the names in the
#'           thesaurus should be differentiated by the presence of accent
#'           marks.}
#'   \item{punctuationSensitive}{Logical indicating whether the names in the
#'           thesaurus should be differentiated by the presence of punctuation
#'           marks.}
#' }
#'
#' The examples below show the content and characteristics of the first
#' thesaurus in \code{zoologThesaurus}.
#'
#' @section File Structure:
#' \code{zoologThesaurus} is an exported variable automatically loaded in
#' memory. In addition, the source files generating it are included in the
#' \pkg{zoolog} \code{extdata} folder. There is one file for the thesaurus set
#' main structure and one file for each included thesaurus. All of them are in
#' semicolon separated format. Thus, they can be examined in any text editor
#' or imported into any spreadsheet application. The files are:
#' \describe{
#'   \item{\code{zoologThesaurusSet.csv}}{Defines the main structure of the
#'     thesaurus set. It has a row for each thesaurus and seven columns
#'     (\emph{ThesaurusName}, \emph{FileName}, \emph{CaseSensitive},
#'     \emph{AccentSensitive}, \emph{PunctuationSensitive},
#'     \emph{ApplyToColNames}, and \emph{ApplyToColValues}).
#'     Their meaning coincides with the description above. Observe that the
#'     case, accent, and punctuation sensitiveness is stored here, instead of
#'     in each thesaurus.}
#'   \item{\code{identifierThesaurus.csv}}{Thesaurus for the identifiers used
#'     in \code{\link{LogRatios}} to identify the bone types and the measure
#'     names in the data and the references. It has for columns:
#'     \emph{Taxon}, \emph{Element}, \emph{Measure}, and \emph{Standard}.}
#'   \item{\code{taxonThesaurus.csv}}{Thesaurus for the taxa. There is one
#'     column for each category of taxon considered.}
#'   \item{\code{elementThesaurus.csv}}{Thesaurus for the skeletal elements.
#'     One column for each category.}
#'   \item{\code{measureThesaurus.csv}}{Thesaurus for the measure names.
#'     One column for each category.}
#' }
#'
#' @examples
#' ## List of thesaurus names and characteristics in the thesaurus set:
#' attributes(zoologThesaurus)
#' ## Content of the first thesaurus:
#' zoologThesaurus$identifier
#' attributes(zoologThesaurus$identifier)
#'
"zoologThesaurus"
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/R/zoologThesaurus.R 
 | 
					
	## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#>"
)
## -----------------------------------------------------------------------------
library(zoolog)
str(reference, max.level = 1)
## ----echo=FALSE---------------------------------------------------------------
options(knitr.kable.NA = "")
knitr::kable(referenceSets)
## ---- echo=FALSE--------------------------------------------------------------
knitr::kable(zoologTaxonomy)
## ---- echo = FALSE------------------------------------------------------------
options(stringsAsFactors = FALSE)
## -----------------------------------------------------------------------------
library(zoolog)
dataFile <- system.file("extdata", "dataValenzuelaLamas2008.csv.gz", 
                        package = "zoolog")
data = read.csv2(dataFile,
                 quote = "\"", header = TRUE, na.strings = "",
                 encoding = "UTF-8")
knitr::kable(head(data)[, -c(6:20,32:64)])
## -----------------------------------------------------------------------------
dataWithLog <- LogRatios(data)
knitr::kable(head(dataWithLog)[, -c(6:20,32:64)])
## -----------------------------------------------------------------------------
AScases <- InCategory(dataWithLog$Os, "astragalus", zoologThesaurus$element)
knitr::kable(head(dataWithLog[AScases, -c(6:20,32:64)]))
## ---- warning = FALSE---------------------------------------------------------
GLVariants <- list(c("GL", "GLl", "GLpe"))
dataWithLog <- LogRatios(data, mergedMeasures = GLVariants)
knitr::kable(head(dataWithLog[AScases, -c(6:20,32:64)]))
## ---- warning = FALSE---------------------------------------------------------
caprineCategory <- list(ovar = SubtaxonomySet("caprine"))
dataWithLog <- LogRatios(data, joinCategories = caprineCategory, mergedMeasures = GLVariants)
knitr::kable(head(dataWithLog)[, -c(6:20,32:64)])
## -----------------------------------------------------------------------------
SubtaxonomySet("caprine")
## -----------------------------------------------------------------------------
dataWithLogPruned=RemoveNACases(dataWithLog)
knitr::kable(head(dataWithLogPruned[, -c(6:20,32:64)]))
## ---- eval = FALSE, warning = FALSE-------------------------------------------
#  write.csv2(dataWithLogPruned, "myDataWithLogValues.csv",
#             quote=FALSE, row.names=FALSE, na="",
#             fileEncoding="UTF-8")
## -----------------------------------------------------------------------------
dataWithSummary <- CondenseLogs(dataWithLogPruned)
knitr::kable(head(dataWithSummary)[, -c(6:20,32:64,72:86)])
## -----------------------------------------------------------------------------
dataStandardized <- StandardizeDataSet(dataWithSummary)
knitr::kable(head(dataStandardized)[, -c(6:20,32:64,72:86)])
## -----------------------------------------------------------------------------
dataOC <- subset(dataWithSummary, InCategory(Especie, 
                                             SubtaxonomySet("caprine"),
                                             zoologThesaurus$taxon))
knitr::kable(head(dataOC)[, -c(6:20,32:64)])
## -----------------------------------------------------------------------------
dataOCStandardized <- StandardizeDataSet(dataOC)
knitr::kable(head(dataOCStandardized)[, -c(6:20,32:64)])
## -----------------------------------------------------------------------------
dataOCWithWidth <- RemoveNACases(dataOCStandardized, measureNames = "Width")
dataOCWithLength <- RemoveNACases(dataOCStandardized, measureNames = "Length")
## ---- echo = FALSE------------------------------------------------------------
library(ggplot2)
## ---- fig.asp = 0.6, fig.width = 6, fig.align="center"------------------------
ggplot(dataOCStandardized, aes(x = Site, y = Width)) +
  geom_boxplot(outlier.shape = NA, na.rm = TRUE) +
  geom_jitter(width = 0.2, height = 0, alpha = 1/2, color = 4, na.rm = TRUE) +
  theme_bw() +
  ggtitle("Caprine widths") +
  ylab("Width log-ratio") +
  coord_flip()
## ---- fig.asp = 0.6, fig.width = 6, fig.align="center"------------------------
ggplot(dataOCStandardized, aes(x = Site, y = Length)) +
  geom_boxplot(outlier.shape = NA, na.rm = TRUE) +
  geom_jitter(width = 0.2, height = 0, alpha = 1/2, color = 4, na.rm = TRUE) +
  theme_bw() +
  ggtitle("Caprine lengths") +
  ylab("Length log-ratio") +
  coord_flip()
## ---- fig.asp = 0.7, fig.width = 6, fig.align="center"------------------------
ggplot(dataOCStandardized, aes(Width)) +
  geom_histogram(bins = 30, na.rm = TRUE) +
  ggtitle("Caprine widths") +
  xlab("Width log-ratio") +
  facet_grid(Site ~.) +
  theme_bw() +
  theme(panel.grid.major.y = element_blank(),
        panel.grid.minor.y = element_blank()) +
  theme(plot.title = element_text(hjust = 0.5, size = 14),
        axis.title.x = element_text(size = 10),
        axis.title.y = element_text(size = 10),
        axis.text = element_text(size = 10) ) +
  scale_y_continuous(breaks = c(0, 10, 20, 30))
## -----------------------------------------------------------------------------
levels0 <- unique(dataOCStandardized$Taxon)
levels0
## -----------------------------------------------------------------------------
dataOCStandardized$Taxon <- factor(dataOCStandardized$Taxon, 
                                   levels = levels0[c(1,3,2)])
levels(dataOCStandardized$Taxon)
## ---- message = FALSE, fig.asp = 0.6, fig.width = 6, fig.align="center"-------
Ocolour <- c("#A2A475", "#D8B70A", "#81A88D")
ggplot(dataOCStandardized, aes(x=Site, y=Width)) + 
  geom_boxplot(aes(fill=Taxon), 
               notch = TRUE, alpha = 0, lwd = 0.377, outlier.alpha = 0,
               width = 0.5, na.rm = TRUE,
               position = position_dodge(0.75),
               show.legend = FALSE) + 
  geom_point(aes(colour = Taxon, shape = Taxon), 
             alpha = 0.7, size = 0.8, 
             position = position_jitterdodge(jitter.width = 0.3),
             na.rm = TRUE) +
  scale_colour_manual(values=Ocolour) +
  scale_shape_manual(values=c(15, 18, 16)) +
  theme_bw(base_size = 8) +
  ylab("LSI value") +
  ggtitle("Sheep/goat LSI width values") 
## ---- warning = FALSE, fig.asp = 0.6, fig.width = 6, fig.align="center"-------
TaxonSiteWidthHist <- ggplot(dataOCStandardized, aes(Width, fill = Taxon)) + 
  geom_histogram(bins = 30, alpha = 0.5, position = "identity") + 
  ggtitle("Sheep/goat Widths") + facet_grid(Site ~ Taxon)
TaxonSiteWidthHist
## ---- warning = FALSE, fig.asp = 0.6, fig.width = 6, fig.align="center"-------
TaxonSiteWidthHist <- ggplot(dataOCStandardized, aes(Width, fill = Taxon)) + 
  geom_histogram(bins = 30, alpha = 0.5, position = "identity") + 
  ggtitle("Sheep/goat Widths") + facet_grid(~Site)
TaxonSiteWidthHist
## -----------------------------------------------------------------------------
t.test(Length ~ Site, dataOCStandardized, 
       subset = Site %in% c("OLD", "ALP"),
       na.action = "na.omit")
## -----------------------------------------------------------------------------
t.test(Width ~ Site, dataOCStandardized, 
       subset = Site %in% c("OLD", "ALP"),
       na.action = "na.omit")
## ---- message = FALSE---------------------------------------------------------
library(stats)
pairwise.t.test(dataOCStandardized$Width, dataOCStandardized$Site, 
                pool.sd = FALSE)
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/inst/doc/index.R 
 | 
					
	---
title: "***zoolog***:  \n Zooarchaeological Analysis with Log-Ratios"
author: "Jose M Pozo, Angela Trentacoste,  Ariadna Nieto-Espinet, Silvia Guimarães Chiarelli, and Silvia Valenzuela-Lamas"
email: "[email protected], [email protected]"
date: "`r format(Sys.Date())`"
bibliography: ../inst/REFERENCES.bib
## to create the vignettes 'outside' the package, with table of content (toc)
#output:
#  html_document:
#    toc: true
#    toc_float:
#      collapsed: false
#      smooth_scroll: false
## to create the vignettes 'inside' the package, without table of content (toc)
output: rmarkdown::html_vignette
vignette: >
  %\VignetteIndexEntry{zoolog}
  %\VignetteEngine{knitr::rmarkdown}
  %\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#>"
)
```
# Introduction
The package *zoolog* includes functions and reference data to generate and 
manipulate log-ratios (also known as log size index (LSI) values) from measurements obtained on zooarchaeological material. Log ratios are used to compare the relative (rather than the absolute) dimensions of animals from archaeological contexts [@meadow1999use]. Essentially, the method compares archaeological measurements to a standard, producing a value that indicates how much larger or smaller the archaeological specimen is compared to that standard. *zoolog* is also able to seamlessly integrate data and references with heterogeneous nomenclature, which is internally managed by a zoolog thesaurus.
The methods included in the package were first developed in the framework of the ERC-Starting Grant 716298 [ZooMWest](https://zoomwest11.wixsite.com/zoomwest) 
(PI S. Valenzuela-Lamas), and were first used 
in the paper [@trentacoste2018pre]. They are based on the techniques proposed by @simpson1941large and @simpson1960quantitative, which calculates *log size index* (LSI) values as: 
$$
\mbox{LSI} = \log_{10} x - \log_{10} x_{\text{ref}} = \log_{10}(x/x_\text{ref}),
$$
where $x$ is the considered measure value and $x_\text{ref}$ is the corresponding reference value. Observe that LSI is defined using logarithms with base 10.
Several different sets of standard reference values are included in the package. These standards include several published and widely used biometric datasets (e.g. @davis1996measurements, @albarella2005neolithic) as well as other less known standards. These references, as well as the data example provided with the package, are based on the measures and measure abbreviations defined in @von1976guide and @davis1992rapid. 
In general, zooarchaeological datasets are composed of skeletal remains representing many different anatomical body parts. In investigation of animal size, the analysis of measurements from a given anatomical element provides the best control for the variables affecting size and shape and, as such, it is the preferable option. Unfortunately, this approach is not always viable due to low sample sizes in some archaeological assemblages. This problem can be mitigated by calculating the LSI values for measurements with respect to a reference, which provides a means of aggregating biometric information from different body parts. The resulting log ratios can be compared and statistically analysed under reasonable conditions [@albarella2002size]. However, length, width, and depth measurements of the anatomical elements still should not be directly aggregated for statistical analysis. 
The package includes a *zoolog thesaurus* to facilitate its usage by research teams across the globe, and working in different languages and with different recording traditions. The thesaurus enables the *zoolog* package to recognises many different names for taxa and skeletal elements (e.g. "Bos taurus", "cattle", "BT",  "bovino", "bota"). Consequently, there is no need to use a particular, standardised recording code for the names of different taxa or elements.
The package also includes a *zoolog taxonomy* to facilitate the management of cases recorded as identified at different taxonomic ranks (Species, Genus, Tribe) and their match with the corresponding references.
## Acknowledgements
We are particularly grateful to Sabine Deschler-Erb and Barbara Stopp, from the University of Basel (Switzerland) for making the reference values of several specimens available through the ICAZ Roman Period Working Group, which have been included here with their permission. We also thank Francesca Slim and Dimitris Filioglou from the University of Groningen, Claudia Minniti from University of Salento, Sierra Harding and Nimrod Marom from the University of Haifa, Carly Ameen and Helene Benkert from the University of Exeter, and Mikolaj Lisowski from the University of York for providing additional reference sets. Allowen Evin (CNRS-ISEM Montpellier) saw potential pitfalls in the use of Davis' references for sheep, which have been now solved.
The thesaurus has benefited from the contributions from Moussab Albesso, Canan Çakirlar, Jwana Chahoud, Jacopo De Grossi Mazzorin, Sabine Deschler-Erb, Dimitrios Filioglou, Armelle Gardeisen, Sierra Harding, Pilar Iborra, Michael MacKinnon, Nimrod Marom, Claudia Minniti, Francesca Slim, Barbara Stopp, and Emmanuelle Vila.
We are grateful to all of them for their contributions, comments, and help. In addition, users are encouraged to contribute to the thesaurus and other references so that *zoolog* can be expanded and adapted to any database.
# Installation
You can install the released version of zoolog from [CRAN](https://CRAN.R-project.org/package=zoolog) with:
``` r
install.packages("zoolog")
```
And the development version from [GitHub](https://github.com/) with:
``` r
install.packages("devtools")
devtools::install_github("josempozo/zoolog@HEAD", build_vignettes = TRUE)
```
# Reference standards {#sec:refStandards}
The package *zoolog* includes several osteometrical references. Currently, the references include reference values for the main domesticates and their agriotypes (*Bos*, *Ovis*, *Capra*, *Sus*), red deer (*Cervus elaphus*), Persian fallow deer (*Dama mesopotamica*), mountain gazelle (*Gazella gazella*), donkey (*Equus asinus*), horse (*Equus caballus*), European rabbit (*Oryctolagus cuniculus*), and grey wolf (*Canis lupus*). These are drawn from a variety of publications and resources (see below). In addition, the user can consider other references, or the provided references can be extended and updated integrating newer research data. Submission of extended/improved references is encouraged. Please, contact the maintainer through the provided email address to make the new reference fully accessible within the package.
These references, as well as the data example provided with the package, are based on the measurements and measure abbreviations defined in @von1976guide and @davis1992rapid. Please, note that Davis’ standard SD, equivalent to Von den Driesch’s DD, has been denoted as Davis.SD in order to resolve its incompatibility with Von den Driesch’s SD. This affects only Davis’ sheep reference and was introduced in Release 1.0.0.
The predefined reference sets included in *zoolog* are provided in the named list `reference`, currently comprising the following `r length(zoolog::reference)` sets:
```{r}
library(zoolog)
str(reference, max.level = 1)
```
The reference set `reference$Combi` is the default reference for computing the [log ratios](../help/LogRatios), since it includes the most comprehensive reference for each species.
The package also includes a `referencesDatabase` collecting the taxon-specific
reference standards from all the considered resources. Each reference set is
composed of a different combination of taxon-specific standards selected from
this `referencesDatabase`. The selection is defined by the data frame 
`referenceSets`:
 
```{r echo=FALSE}
options(knitr.kable.NA = "")
knitr::kable(referenceSets)
```
A detailed description of the reference data, including its structure, properties, and considered resources can be found in the [ReferencesDatabase](../help/referencesDatabase) help page.
# Thesaurus
A [thesaurus set](../help/zoologThesaurus) is defined in order to make the package compatible with the different recording conventions and languages used by authors of zooarchaeological datasets. This enables the function [LogRatios](../help/LogRatios) to match values in the user's dataset with the corresponding ones in the reference standard, regardless of differences in nomenclature or naming conventions, as long as both terms are included in the relevant thesaurus. The thesaurus also allows the user to [standardize the nomenclature](../help/StandardizeNomenclature) of the dataset if desired. 
The user can also use other thesaurus sets or modify the provided one. In this latter case, we encourage the user to contact the maintainer at the provided email address so that the additions can be incorporated into the new versions of the package.
Currently, the zoolog thesaurus set includes four thesauri:
identifierThesaurus
  : For the column names that identify the variables used in computing
    the log ratios. It includes the categories *Taxon*, *Element*, *Measure*, 
    and *Standard*. Each category provides a series of equivalent names. 
    For instance, *Taxon* includes the options 
    `r a <- zoologThesaurus$identifier$Taxon; 
      paste0("*", paste(a[a != ""], collapse = "*, *"), "*")`. 
    This thesaurus is case, accent, and punctuation insensitive, so that, for
    instance, "Especie" is equivalent to 
    "ESPECIE" or "Espècie".
taxonThesaurus
  : For the names of the different taxa when recording animal
    bones. The current categories are 
    *`r paste(names(zoologThesaurus$taxon), collapse = "*, *")`*,
    each with different equivalent names. This thesaurus is case, accent,
    and punctuation insensitive, so that, for instance, "Bos" is equivalent to 
    "bos", "Bos.", or "Bos\ ".
elementThesaurus
  : Names of anatomical elements when recording animal bones. It currently
    includes `r ncol(zoologThesaurus$element)` categories
    (*`r paste(names(zoologThesaurus$element)[1:3], collapse = "*, *")`*, ...),
    each with different equivalent names. 
    This thesaurus is case, accent, and punctuation insensitive.
measureThesaurus
  : Names of the measurements. While the English abbreviations from
    @von1976guide and @davis1992rapid are widely used in published
    literature, this thesaurus enables other nomenclatures (e.g. original German
    abbreviations in @von1976guide) to be included. This thesaurus is case
    sensitive.
# Taxonomy
A [taxonomy](../help/zoologTaxonomy) for the most typical zooarchaeological taxa has been introduced in the *zoolog* major release 1.0.0. The taxonomic hierarchy is structured up to the family level:
``` {r, echo=FALSE}
knitr::kable(zoologTaxonomy)
```
This taxonomy is intended to facilitate the management of cases recorded as identified at different taxonomic ranks and their match with the corresponding references.
The taxonomy is integrated in the function [LogRatios](../help/LogRatios), enabling it to automatically match different species in data and reference that are under the same genus. For instance, data of *Bos taurus* can be matched with reference of *Bos primigenius*, since both are *Bos*. It also enables the function `LogRatios` to detect when a case taxon has been only partially identified and recorded at a higher taxonomic rank, such as tribe or family, and to suggest the user the set of possible reference species.
Besides, it is complemented with a series of functions enabling the user to query for the [subtaxonomy](../help/Subtaxonomy) or the set of species under a queried taxon at any taxonomic rank. 
# Functions
The full list of functions is available under the zoolog *help* page. We list them here sorted by their prominence for a typical user, and grouped by functionality:
[**LogRatios**](../help/LogRatios)
  : It computes the log ratios of the measurements in a dataset relative to 
    standard reference values. By default `reference$Combi` is used. The 
    function includes the option 'joinCategories' allowing several taxa 
    (typically *Ovis*, *Capra*, and unknown *Ovis/Capra*) to be considered
    together with the same reference taxon.
  : Note that without using 'joinCategories' any taxa not part of the selected
    reference set will be excluded. For instance, if using
    `reference$NietoDavisAlbarella`, log ratios for goats will not be calculated 
    unless 'joinCategories' is set to indicate that the *Ovis aries* standard
    should also be applied to goats.
[**CondenseLogs**](../help/CondenseLogs)
  : It condenses the calculated log ratio values into a reduced number of 
    features by grouping several measure log ratios and selecting or calculating 
    a representative feature value. By default the selected groups represent a
    single dimension, i.e. *Length*, *Width*, and *Depth*. Only one feature is 
    extracted per group. Currently, two methods are possible: "priority" 
    (default) or "average". 
  : This operation is motivated by two circumstances. First, not all
    measurements are available for every bone specimen, which obstructs their
    direct comparison and statistical analysis. Second, several measurements 
    can be strongly correlated (e.g. SD and Bd both represent bone width). 
    Thus, considering them as independent would produce an over-representation 
    of bone remains with multiple measurements per axis. Condensing each group 
    of measurements into a single feature (e.g. one measure per axis) alleviates
    both problems. 
  : The default method ("priority"), selects the first available log ratio in 
    each group. Besides, `CondenseLogs` employs the following by-default group
    and prioritization introduced in @trentacoste2018pre: 
    *Length* considers in order of priority *GL*, *GLl*, *GLm*, and *HTC*. 
    *Width* considers in order of priority  *BT*, *Bd*, *Bp*, *SD*, *Bfd*, 
    and *Bfp*. 
    *Depth* considers in order of priority  *Dd*, *DD*, *BG*, and *Dp*. 
    This order maximises the robustness and
    reliability of the measurements, as priority is given to the most abundant,
    more replicable, and less age dependent measurements. But users can set 
    their own features with any group of measures and priorities. 
    The method "average" extracts the mean per group, ignoring the 
    non-available log ratios.
[**RemoveNACases**](../help/RemoveNACases)
  : It removes the cases (table rows) for which all measurements of interest are
    non-available (NA). A particular list of measurement names can be explicitly
    provided or selected by a common initial pattern (e.g. prefix). The default
    setting removes the rows with no available log ratios to facilitate 
    subsequent analysis of the data.
[**InCategory**](../help/InCategory)
  : It checks if an element belongs to a category according to a thesaurus. 
    It is similar to [base::is.element](../help/is.element), returning a logical
    vector indicating if each element in a given vector is included in a given
    set. 
    But `InCategory` checks for equality assuming the equivalencies defined in 
    the given thesaurus. It is intended for the user to easily select a subset
    of data without having to standardize the analysed dataset.
[**Nomenclature standardization**](../help/StandardizeNomenclature)
  : This includes two functions enabling the user to map data with heterogeneous
    nomenclature into a standard one as defined in a thesaurus:
    * `StandardizeNomenclature` standardizes a character vector according to 
      a given thesaurus.
    * `StandardizeDataSet` standardizes column names and values of a data 
      frame according to a thesaurus set.
[**Subtaxonomy**](../help/Subtaxonomy)
  : This includes three functions enabling the user to query for some 
  information on the subtaxonomy under a queried taxon at any taxonomic 
  rank:
    * `Subtaxonomy` provides the subtaxonomy dataframe collecting the 
    species (rows) included in the queried taxon, and the taxonomic ranks
    (columns) up to its level.
    * `SubtaxonomySet` provides the set (character vector without repetions) 
    of taxa, at any taxonomic rank, under 
    the queried taxon.
    * `GetSpeciesIn` provides the set of species included in the queried taxon.
  : By default, the subtaxonomy information is extracted from the *zoolog* 
  taxonomy.
[**AssembleReference**](../help/AssembleReference)
  : It allows the user to build new references assembling the desired
    taxon-specific references included in the *zoolog* `referencesDataSet` or
    in any other provided by the user.
[**Thesaurus readers and writers**](../help/ThesaurusReaderWriter)
  : This includes functions to read and write a single thesaurus 
    (`ReadThesaurus` and `WriteThesaurus`) and a thesaurus set
    (`ReadThesaurusSet` and `WriteThesaurusSet`).
    
[**Thesaurus management**](../help/ThesaurusManagement)
  : This includes functions to modify and check thesauri:
  
    * `NewThesaurus` generates an empty thesarus.
    * `AddToThesaurus` adds new names and categories to an existing thesaurus.
    * `RemoveRepeatedNames` cleans a thesarus from any repeated names on any 
      category.
    * `ThesaurusAmbiguity` checks if there are names included in more than one
      category in a thesaurus.
# Examples
The following examples are designed to be read and run sequentially. They 
represent a possible pipeline, meaningful for the processing and analysis of a
dataset. Only occasionally, a small diversion is included to illustrate some alternatives.
## Reading data and calculating log ratios
This example reads a dataset from a file in csv format and computes the
log-ratios. Then, the cases with no available log-ratios are removed.
Finally, the resulting dataset is saved in a file in csv format.
The first step is to set the local path to the folder where you have the dataset to be analysed (this is typically a comma-separated value (csv) file). Here the [example dataset](../help/dataValenzuelaLamas2008) from @valenzuela2008alimentacio included in the package is used:
```{r, echo = FALSE}
options(stringsAsFactors = FALSE)
```
```{r}
library(zoolog)
dataFile <- system.file("extdata", "dataValenzuelaLamas2008.csv.gz", 
                        package = "zoolog")
data = read.csv2(dataFile,
                 quote = "\"", header = TRUE, na.strings = "",
                 encoding = "UTF-8")
knitr::kable(head(data)[, -c(6:20,32:64)])
```
To enhance the visibility, we have shown only the most relevant columns.
We now calculate the log-ratios using the function `LogRatios`. Only measurements that have an associated standard will be included in this calculation. The log values will appear as new columns with the prefix 'log' following the original columns with the raw measurements:
```{r}
dataWithLog <- LogRatios(data)
knitr::kable(head(dataWithLog)[, -c(6:20,32:64)])
```
### A note on the warnings of LogRatios
Observe that `LogRatios` has output two warnings above. The first one informs the user that the data includes some cases of recorded taxa that did not match any species in the reference, but was matched by genus. In many cases this can be the behaviour expected by the user. For instance, using the reference of *Sus scrofa* for computing the log ratios of cases of *Sus domesticus*, can be acceptable if no better reference is available or if both (sub)species are being compared. The warning also includes cases recorded by genus, a recording practice that is understandable when working only with one species in the genus, such as *Oryctolagus* or *Ovis* above. However, the warning also informs the user that this matching by genus can be suppressed by setting the parameter `useGenusIfUnambiguous = FALSE`. This can be the case if the user wants to exclude from the analysis the cases with not matching species. Besides, this warning could make the user realize that the wrong reference was set by mistake.
The second warning informs the user that the data includes some cases recorded with a taxon not specifying a species or genus, but at a higher taxonomical rank (for instance undecided *ovis/capra* (equivalent to tribe *Caprini*)). It also informs of the cases recorded by the genus but for which the reference includes more than one species (as happens with *Equus* above). In addition, the user is remembered of the option to use the parameter `joinCategories` to indicate which reference species should be used for them.
These relationships between taxonomical categories is possible thanks to the taxonomy included in the package. The user can include their own taxonomy if desired.
In the following examples, these warnings are suppressed unless some particular message is of interest for them.
## Dealing with *lazy* datasets
If we observe the example dataset more carefully, we can see that the measures
recorded for the *astragali* presents a deviation from the measure definitions 
in @von1976guide and @davis1992rapid. To see this, 
we can select the cases where the element is an astragalus. The function [InCategory](../help/InCategory) allows us to
select them with the help of the thesaurus without requiring to know the terms actually used:
```{r}
AScases <- InCategory(dataWithLog$Os, "astragalus", zoologThesaurus$element)
knitr::kable(head(dataWithLog[AScases, -c(6:20,32:64)]))
```
For the involved taxa, according to the measure definitions, astragali should have no *GL* measurement, but *GLl*. However, in the example dataset the *GLl* measurements have been recorded merged in the *GL* column. This is a data-entry simplification that is used by some researchers. It is possible because *GLl* is only relevant for the astragalus, while *GL* is not applicable to it. Thus, there cannot be any ambiguity between both measures since they can be identified by the bone element. However, since the zoolog reference uses the proper measure name for each bone element (*GLl* for the astragalus), the reference measure has not been correctly identified. Consequently, the log ratio *logGL* has `NA` values and the column *logGLl* does not exists.
The same effect happens for the measure *GLpe*, only relevant for the phalanges. 
The optional parameter mergedMeasures facilitates the processing of this type of simplified datasets. For the example data, we can use
```{r, warning = FALSE}
GLVariants <- list(c("GL", "GLl", "GLpe"))
dataWithLog <- LogRatios(data, mergedMeasures = GLVariants)
knitr::kable(head(dataWithLog[AScases, -c(6:20,32:64)]))
```
This option allows us to automatically select, for each bone element, the corresponding measure present in the reference. Observe that now the log ratios have been computed and assigned to the column *logGL*.
## Using the same ovis reference for all caprines
We could be interested in obtaining the log ratios of all caprines, including *Ovis aries*, *Capra hircus*, and undetermined *Ovis/Capra*, with respect to the reference for *Ovis aries*. This can be set using the argument `joinCategories`.
```{r, warning = FALSE}
caprineCategory <- list(ovar = SubtaxonomySet("caprine"))
dataWithLog <- LogRatios(data, joinCategories = caprineCategory, mergedMeasures = GLVariants)
knitr::kable(head(dataWithLog)[, -c(6:20,32:64)])
```
The category to join can be manually defined, but here we have conveniently used the function `Subtaxonomy` applied to the tribe *Caprini*:
```{r}
SubtaxonomySet("caprine")
```
Note that this option does not remove the distinction in the data between the different species, it just indicates that for these taxa the log ratios must be computed from the same reference ("ovar").
## Pruning the data from cases with no available measure
The cases without log-ratios can be removed to facilitate subsequent analyses:
```{r}
dataWithLogPruned=RemoveNACases(dataWithLog)
knitr::kable(head(dataWithLogPruned[, -c(6:20,32:64)]))
```
You may want to write the resulting file in the working directory (you need to set it first):
```{r, eval = FALSE, warning = FALSE}
write.csv2(dataWithLogPruned, "myDataWithLogValues.csv", 
           quote=FALSE, row.names=FALSE, na="", 
           fileEncoding="UTF-8")
```
## Condensing log values
After calculating log ratios using the `LogRatios` function, many rows in the resultant dataframe (dataWithLog in the example above) may contain multiple log values, i.e. you will have several log values associated with a particular archaeological specimen. When analysing log ratios, it is preferential to avoid over-representation of bones with a greater number of measurements and account for each specimen only once. The `CondenseLogs` function extracts one length, one width, and one depth value from each row and places these in new Length, Width, and Depth columns. The 'priority' method described in @trentacoste2018pre has been set as default. In this case, the default option has been used:
```{r}
dataWithSummary <- CondenseLogs(dataWithLogPruned)
knitr::kable(head(dataWithSummary)[, -c(6:20,32:64,72:86)])
```
Nevertheless, other options (e.g. average of all width log values for a given specimen) can be chosen if preferred. 
## Standardizing the dataset nomenclature
The integration of the thesaurus functionality facilitates the use of datasets with heterogeneous nomenclatures, without further preprocessing. An extensive catalogue of names for equivalent categories has been integrated in the provided thesaurus set `zoologThesaurus`. These equivalences are internally and silently managed without requiring any action from the user. However, it can be also interesting to explicitly standardize the data to make figures legible to a wider audience. This is especially useful when different nomenclature for the same concept is found in the same dataset, for instance "sheep" and "ovis" for the same taxon or "hum" and "HU" for the bone element.
If we standardize the studied data, we can see that `zoologThesaurus` will change "ovar" to "Ovis aries", "hum" to "humerus", and "Especie" to "Taxon", for instance.
```{r}
dataStandardized <- StandardizeDataSet(dataWithSummary)
knitr::kable(head(dataStandardized)[, -c(6:20,32:64,72:86)])
```
## Selecting only caprines
We may be interested in selecting all caprine elements. This can be done even without standardizing the data using the function `InCategory`:
```{r}
dataOC <- subset(dataWithSummary, InCategory(Especie, 
                                             SubtaxonomySet("caprine"),
                                             zoologThesaurus$taxon))
knitr::kable(head(dataOC)[, -c(6:20,32:64)])
```
Observe that no standardization is performed in the output subset. To standardize the subset data, `StandardizeDataSet` can be applied either before or after the subsetting.
```{r}
dataOCStandardized <- StandardizeDataSet(dataOC)
knitr::kable(head(dataOCStandardized)[, -c(6:20,32:64)])
```
Observe also that the distinction between *Ovis aries*, *Capra hircus*, and *Ovis/Capra* has not been removed from the data.
If we were interested only in one summary measure, *Width* or *Length*, we could  retain the cases including this measure:
```{r}
dataOCWithWidth <- RemoveNACases(dataOCStandardized, measureNames = "Width")
dataOCWithLength <- RemoveNACases(dataOCStandardized, measureNames = "Length")
```
which gives respectively `r nrow(dataOCWithWidth)` (`=nrow(dataOCWithWidth)`)  and `r nrow(dataOCWithLength)` (`=nrow(dataOCWithLength)`)  cases.
## Different plots for data visualisation
Condensed log values can be visualised as histograms and box plots using ggplot [@wickham2011ggplot2]. Here we will look at some examples of plotting values from caprines.
### Horizontal Boxplot with dots grouped by site
For the example plots we will use the package **ggplot2**.
```{r, echo = FALSE}
library(ggplot2)
```
We can now create a boxplot for the widths: 
```{r, fig.asp = 0.6, fig.width = 6, fig.align="center"}
ggplot(dataOCStandardized, aes(x = Site, y = Width)) +
  geom_boxplot(outlier.shape = NA, na.rm = TRUE) +
  geom_jitter(width = 0.2, height = 0, alpha = 1/2, color = 4, na.rm = TRUE) +
  theme_bw() +
  ggtitle("Caprine widths") +
  ylab("Width log-ratio") +
  coord_flip()
```
And another boxplot for the lengths:
```{r, fig.asp = 0.6, fig.width = 6, fig.align="center"}
ggplot(dataOCStandardized, aes(x = Site, y = Length)) +
  geom_boxplot(outlier.shape = NA, na.rm = TRUE) +
  geom_jitter(width = 0.2, height = 0, alpha = 1/2, color = 4, na.rm = TRUE) +
  theme_bw() +
  ggtitle("Caprine lengths") +
  ylab("Length log-ratio") +
  coord_flip()
```
### Histograms grouped by site
We may choose to plot the width data as a histogram: 
```{r, fig.asp = 0.7, fig.width = 6, fig.align="center"}
ggplot(dataOCStandardized, aes(Width)) +
  geom_histogram(bins = 30, na.rm = TRUE) +
  ggtitle("Caprine widths") +
  xlab("Width log-ratio") +
  facet_grid(Site ~.) +
  theme_bw() +
  theme(panel.grid.major.y = element_blank(),
        panel.grid.minor.y = element_blank()) +
  theme(plot.title = element_text(hjust = 0.5, size = 14),
        axis.title.x = element_text(size = 10),
        axis.title.y = element_text(size = 10),
        axis.text = element_text(size = 10) ) +
  scale_y_continuous(breaks = c(0, 10, 20, 30))
```
### Vertical boxplot with dots grouped by taxon and site
Here we reorder the factor levels of `dataOCStandardized$Taxon` to make
the order of the boxplots more intuitive.
```{r}
levels0 <- unique(dataOCStandardized$Taxon)
levels0
```
```{r}
dataOCStandardized$Taxon <- factor(dataOCStandardized$Taxon, 
                                   levels = levels0[c(1,3,2)])
levels(dataOCStandardized$Taxon)
```
and assign specific colours for each category:
```{r, message = FALSE, fig.asp = 0.6, fig.width = 6, fig.align="center"}
Ocolour <- c("#A2A475", "#D8B70A", "#81A88D")
ggplot(dataOCStandardized, aes(x=Site, y=Width)) + 
  geom_boxplot(aes(fill=Taxon), 
               notch = TRUE, alpha = 0, lwd = 0.377, outlier.alpha = 0,
               width = 0.5, na.rm = TRUE,
               position = position_dodge(0.75),
               show.legend = FALSE) + 
  geom_point(aes(colour = Taxon, shape = Taxon), 
             alpha = 0.7, size = 0.8, 
             position = position_jitterdodge(jitter.width = 0.3),
             na.rm = TRUE) +
  scale_colour_manual(values=Ocolour) +
  scale_shape_manual(values=c(15, 18, 16)) +
  theme_bw(base_size = 8) +
  ylab("LSI value") +
  ggtitle("Sheep/goat LSI width values") 
```
### Histograms grouped by taxon and site  
```{r, warning = FALSE, fig.asp = 0.6, fig.width = 6, fig.align="center"}
TaxonSiteWidthHist <- ggplot(dataOCStandardized, aes(Width, fill = Taxon)) + 
  geom_histogram(bins = 30, alpha = 0.5, position = "identity") + 
  ggtitle("Sheep/goat Widths") + facet_grid(Site ~ Taxon)
TaxonSiteWidthHist
```
```{r, warning = FALSE, fig.asp = 0.6, fig.width = 6, fig.align="center"}
TaxonSiteWidthHist <- ggplot(dataOCStandardized, aes(Width, fill = Taxon)) + 
  geom_histogram(bins = 30, alpha = 0.5, position = "identity") + 
  ggtitle("Sheep/goat Widths") + facet_grid(~Site)
TaxonSiteWidthHist
```
## Statistical test
We may run a statistical test (here a Student t-test) to check whether the differences in log ratio length values between the sites "OLD" and "ALP" are statistically significant:
```{r}
t.test(Length ~ Site, dataOCStandardized, 
       subset = Site %in% c("OLD", "ALP"),
       na.action = "na.omit")
```
Similarly, for the differences in log ratio width values:
```{r}
t.test(Width ~ Site, dataOCStandardized, 
       subset = Site %in% c("OLD", "ALP"),
       na.action = "na.omit")
```
For testing all possible pairs of sites, the p-values must be adjusted for multiple comparisons:
```{r, message = FALSE}
library(stats)
pairwise.t.test(dataOCStandardized$Width, dataOCStandardized$Site, 
                pool.sd = FALSE)
```
# How to cite the package *zoolog*
We have invested a lot of time and effort in creating zoolog. Please cite the package if you publish an analysis or results obtained using *zoolog*.
For example, "Log ratios calculation and analysis was done using the package zoolog  [@pozo2022zoolog] in R 4.0.3 [@RCoreTeam2020]."
To get the details of the most recent version of the package, you can use the R citation function: `citation("zoolog")`.
When publishing it is also recommended that the references for standards are properly cited, as well as any details on selecting feature values. The details on the source of each of the reference standards are given in the help page [referencesDatabase](../help/referencesDatabase). For instance, if using the zoolog `reference$NietoDavisAlbarella` and the default selection method of features, a fair description may be: "Published references for cattle [@nieto2018element], sheep/goat [@davis1996measurements] and pigs [@albarella2005neolithic] were used as standards. One length and one width log ratio value from each specimen were included in the analysis, with values selected following the default zoolog 'priority' method [@trentacoste2018pre]: length values - GL, GLl, GLm, HTC; width values - BT, Bd, Bp, SD, Bfd, Bfp."
# Bibliography
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/inst/doc/index.Rmd 
 | 
					
	---
title: "***zoolog***:  \n Zooarchaeological Analysis with Log-Ratios"
author: "Jose M Pozo, Angela Trentacoste,  Ariadna Nieto-Espinet, Silvia Guimarães Chiarelli, and Silvia Valenzuela-Lamas"
email: "[email protected], [email protected]"
date: "`r format(Sys.Date())`"
bibliography: ../inst/REFERENCES.bib
## to create the vignettes 'outside' the package, with table of content (toc)
#output:
#  html_document:
#    toc: true
#    toc_float:
#      collapsed: false
#      smooth_scroll: false
## to create the vignettes 'inside' the package, without table of content (toc)
output: rmarkdown::html_vignette
vignette: >
  %\VignetteIndexEntry{zoolog}
  %\VignetteEngine{knitr::rmarkdown}
  %\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#>"
)
```
# Introduction
The package *zoolog* includes functions and reference data to generate and 
manipulate log-ratios (also known as log size index (LSI) values) from measurements obtained on zooarchaeological material. Log ratios are used to compare the relative (rather than the absolute) dimensions of animals from archaeological contexts [@meadow1999use]. Essentially, the method compares archaeological measurements to a standard, producing a value that indicates how much larger or smaller the archaeological specimen is compared to that standard. *zoolog* is also able to seamlessly integrate data and references with heterogeneous nomenclature, which is internally managed by a zoolog thesaurus.
The methods included in the package were first developed in the framework of the ERC-Starting Grant 716298 [ZooMWest](https://zoomwest11.wixsite.com/zoomwest) 
(PI S. Valenzuela-Lamas), and were first used 
in the paper [@trentacoste2018pre]. They are based on the techniques proposed by @simpson1941large and @simpson1960quantitative, which calculates *log size index* (LSI) values as: 
$$
\mbox{LSI} = \log_{10} x - \log_{10} x_{\text{ref}} = \log_{10}(x/x_\text{ref}),
$$
where $x$ is the considered measure value and $x_\text{ref}$ is the corresponding reference value. Observe that LSI is defined using logarithms with base 10.
Several different sets of standard reference values are included in the package. These standards include several published and widely used biometric datasets (e.g. @davis1996measurements, @albarella2005neolithic) as well as other less known standards. These references, as well as the data example provided with the package, are based on the measures and measure abbreviations defined in @von1976guide and @davis1992rapid. 
In general, zooarchaeological datasets are composed of skeletal remains representing many different anatomical body parts. In investigation of animal size, the analysis of measurements from a given anatomical element provides the best control for the variables affecting size and shape and, as such, it is the preferable option. Unfortunately, this approach is not always viable due to low sample sizes in some archaeological assemblages. This problem can be mitigated by calculating the LSI values for measurements with respect to a reference, which provides a means of aggregating biometric information from different body parts. The resulting log ratios can be compared and statistically analysed under reasonable conditions [@albarella2002size]. However, length, width, and depth measurements of the anatomical elements still should not be directly aggregated for statistical analysis. 
The package includes a *zoolog thesaurus* to facilitate its usage by research teams across the globe, and working in different languages and with different recording traditions. The thesaurus enables the *zoolog* package to recognises many different names for taxa and skeletal elements (e.g. "Bos taurus", "cattle", "BT",  "bovino", "bota"). Consequently, there is no need to use a particular, standardised recording code for the names of different taxa or elements.
The package also includes a *zoolog taxonomy* to facilitate the management of cases recorded as identified at different taxonomic ranks (Species, Genus, Tribe) and their match with the corresponding references.
## Acknowledgements
We are particularly grateful to Sabine Deschler-Erb and Barbara Stopp, from the University of Basel (Switzerland) for making the reference values of several specimens available through the ICAZ Roman Period Working Group, which have been included here with their permission. We also thank Francesca Slim and Dimitris Filioglou from the University of Groningen, Claudia Minniti from University of Salento, Sierra Harding and Nimrod Marom from the University of Haifa, Carly Ameen and Helene Benkert from the University of Exeter, and Mikolaj Lisowski from the University of York for providing additional reference sets. Allowen Evin (CNRS-ISEM Montpellier) saw potential pitfalls in the use of Davis' references for sheep, which have been now solved.
The thesaurus has benefited from the contributions from Moussab Albesso, Canan Çakirlar, Jwana Chahoud, Jacopo De Grossi Mazzorin, Sabine Deschler-Erb, Dimitrios Filioglou, Armelle Gardeisen, Sierra Harding, Pilar Iborra, Michael MacKinnon, Nimrod Marom, Claudia Minniti, Francesca Slim, Barbara Stopp, and Emmanuelle Vila.
We are grateful to all of them for their contributions, comments, and help. In addition, users are encouraged to contribute to the thesaurus and other references so that *zoolog* can be expanded and adapted to any database.
# Installation
You can install the released version of zoolog from [CRAN](https://CRAN.R-project.org/package=zoolog) with:
``` r
install.packages("zoolog")
```
And the development version from [GitHub](https://github.com/) with:
``` r
install.packages("devtools")
devtools::install_github("josempozo/zoolog@HEAD", build_vignettes = TRUE)
```
# Reference standards {#sec:refStandards}
The package *zoolog* includes several osteometrical references. Currently, the references include reference values for the main domesticates and their agriotypes (*Bos*, *Ovis*, *Capra*, *Sus*), red deer (*Cervus elaphus*), Persian fallow deer (*Dama mesopotamica*), mountain gazelle (*Gazella gazella*), donkey (*Equus asinus*), horse (*Equus caballus*), European rabbit (*Oryctolagus cuniculus*), and grey wolf (*Canis lupus*). These are drawn from a variety of publications and resources (see below). In addition, the user can consider other references, or the provided references can be extended and updated integrating newer research data. Submission of extended/improved references is encouraged. Please, contact the maintainer through the provided email address to make the new reference fully accessible within the package.
These references, as well as the data example provided with the package, are based on the measurements and measure abbreviations defined in @von1976guide and @davis1992rapid. Please, note that Davis’ standard SD, equivalent to Von den Driesch’s DD, has been denoted as Davis.SD in order to resolve its incompatibility with Von den Driesch’s SD. This affects only Davis’ sheep reference and was introduced in Release 1.0.0.
The predefined reference sets included in *zoolog* are provided in the named list `reference`, currently comprising the following `r length(zoolog::reference)` sets:
```{r}
library(zoolog)
str(reference, max.level = 1)
```
The reference set `reference$Combi` is the default reference for computing the [log ratios](../help/LogRatios), since it includes the most comprehensive reference for each species.
The package also includes a `referencesDatabase` collecting the taxon-specific
reference standards from all the considered resources. Each reference set is
composed of a different combination of taxon-specific standards selected from
this `referencesDatabase`. The selection is defined by the data frame 
`referenceSets`:
 
```{r echo=FALSE}
options(knitr.kable.NA = "")
knitr::kable(referenceSets)
```
A detailed description of the reference data, including its structure, properties, and considered resources can be found in the [ReferencesDatabase](../help/referencesDatabase) help page.
# Thesaurus
A [thesaurus set](../help/zoologThesaurus) is defined in order to make the package compatible with the different recording conventions and languages used by authors of zooarchaeological datasets. This enables the function [LogRatios](../help/LogRatios) to match values in the user's dataset with the corresponding ones in the reference standard, regardless of differences in nomenclature or naming conventions, as long as both terms are included in the relevant thesaurus. The thesaurus also allows the user to [standardize the nomenclature](../help/StandardizeNomenclature) of the dataset if desired. 
The user can also use other thesaurus sets or modify the provided one. In this latter case, we encourage the user to contact the maintainer at the provided email address so that the additions can be incorporated into the new versions of the package.
Currently, the zoolog thesaurus set includes four thesauri:
identifierThesaurus
  : For the column names that identify the variables used in computing
    the log ratios. It includes the categories *Taxon*, *Element*, *Measure*, 
    and *Standard*. Each category provides a series of equivalent names. 
    For instance, *Taxon* includes the options 
    `r a <- zoologThesaurus$identifier$Taxon; 
      paste0("*", paste(a[a != ""], collapse = "*, *"), "*")`. 
    This thesaurus is case, accent, and punctuation insensitive, so that, for
    instance, "Especie" is equivalent to 
    "ESPECIE" or "Espècie".
taxonThesaurus
  : For the names of the different taxa when recording animal
    bones. The current categories are 
    *`r paste(names(zoologThesaurus$taxon), collapse = "*, *")`*,
    each with different equivalent names. This thesaurus is case, accent,
    and punctuation insensitive, so that, for instance, "Bos" is equivalent to 
    "bos", "Bos.", or "Bos\ ".
elementThesaurus
  : Names of anatomical elements when recording animal bones. It currently
    includes `r ncol(zoologThesaurus$element)` categories
    (*`r paste(names(zoologThesaurus$element)[1:3], collapse = "*, *")`*, ...),
    each with different equivalent names. 
    This thesaurus is case, accent, and punctuation insensitive.
measureThesaurus
  : Names of the measurements. While the English abbreviations from
    @von1976guide and @davis1992rapid are widely used in published
    literature, this thesaurus enables other nomenclatures (e.g. original German
    abbreviations in @von1976guide) to be included. This thesaurus is case
    sensitive.
# Taxonomy
A [taxonomy](../help/zoologTaxonomy) for the most typical zooarchaeological taxa has been introduced in the *zoolog* major release 1.0.0. The taxonomic hierarchy is structured up to the family level:
``` {r, echo=FALSE}
knitr::kable(zoologTaxonomy)
```
This taxonomy is intended to facilitate the management of cases recorded as identified at different taxonomic ranks and their match with the corresponding references.
The taxonomy is integrated in the function [LogRatios](../help/LogRatios), enabling it to automatically match different species in data and reference that are under the same genus. For instance, data of *Bos taurus* can be matched with reference of *Bos primigenius*, since both are *Bos*. It also enables the function `LogRatios` to detect when a case taxon has been only partially identified and recorded at a higher taxonomic rank, such as tribe or family, and to suggest the user the set of possible reference species.
Besides, it is complemented with a series of functions enabling the user to query for the [subtaxonomy](../help/Subtaxonomy) or the set of species under a queried taxon at any taxonomic rank. 
# Functions
The full list of functions is available under the zoolog *help* page. We list them here sorted by their prominence for a typical user, and grouped by functionality:
[**LogRatios**](../help/LogRatios)
  : It computes the log ratios of the measurements in a dataset relative to 
    standard reference values. By default `reference$Combi` is used. The 
    function includes the option 'joinCategories' allowing several taxa 
    (typically *Ovis*, *Capra*, and unknown *Ovis/Capra*) to be considered
    together with the same reference taxon.
  : Note that without using 'joinCategories' any taxa not part of the selected
    reference set will be excluded. For instance, if using
    `reference$NietoDavisAlbarella`, log ratios for goats will not be calculated 
    unless 'joinCategories' is set to indicate that the *Ovis aries* standard
    should also be applied to goats.
[**CondenseLogs**](../help/CondenseLogs)
  : It condenses the calculated log ratio values into a reduced number of 
    features by grouping several measure log ratios and selecting or calculating 
    a representative feature value. By default the selected groups represent a
    single dimension, i.e. *Length*, *Width*, and *Depth*. Only one feature is 
    extracted per group. Currently, two methods are possible: "priority" 
    (default) or "average". 
  : This operation is motivated by two circumstances. First, not all
    measurements are available for every bone specimen, which obstructs their
    direct comparison and statistical analysis. Second, several measurements 
    can be strongly correlated (e.g. SD and Bd both represent bone width). 
    Thus, considering them as independent would produce an over-representation 
    of bone remains with multiple measurements per axis. Condensing each group 
    of measurements into a single feature (e.g. one measure per axis) alleviates
    both problems. 
  : The default method ("priority"), selects the first available log ratio in 
    each group. Besides, `CondenseLogs` employs the following by-default group
    and prioritization introduced in @trentacoste2018pre: 
    *Length* considers in order of priority *GL*, *GLl*, *GLm*, and *HTC*. 
    *Width* considers in order of priority  *BT*, *Bd*, *Bp*, *SD*, *Bfd*, 
    and *Bfp*. 
    *Depth* considers in order of priority  *Dd*, *DD*, *BG*, and *Dp*. 
    This order maximises the robustness and
    reliability of the measurements, as priority is given to the most abundant,
    more replicable, and less age dependent measurements. But users can set 
    their own features with any group of measures and priorities. 
    The method "average" extracts the mean per group, ignoring the 
    non-available log ratios.
[**RemoveNACases**](../help/RemoveNACases)
  : It removes the cases (table rows) for which all measurements of interest are
    non-available (NA). A particular list of measurement names can be explicitly
    provided or selected by a common initial pattern (e.g. prefix). The default
    setting removes the rows with no available log ratios to facilitate 
    subsequent analysis of the data.
[**InCategory**](../help/InCategory)
  : It checks if an element belongs to a category according to a thesaurus. 
    It is similar to [base::is.element](../help/is.element), returning a logical
    vector indicating if each element in a given vector is included in a given
    set. 
    But `InCategory` checks for equality assuming the equivalencies defined in 
    the given thesaurus. It is intended for the user to easily select a subset
    of data without having to standardize the analysed dataset.
[**Nomenclature standardization**](../help/StandardizeNomenclature)
  : This includes two functions enabling the user to map data with heterogeneous
    nomenclature into a standard one as defined in a thesaurus:
    * `StandardizeNomenclature` standardizes a character vector according to 
      a given thesaurus.
    * `StandardizeDataSet` standardizes column names and values of a data 
      frame according to a thesaurus set.
[**Subtaxonomy**](../help/Subtaxonomy)
  : This includes three functions enabling the user to query for some 
  information on the subtaxonomy under a queried taxon at any taxonomic 
  rank:
    * `Subtaxonomy` provides the subtaxonomy dataframe collecting the 
    species (rows) included in the queried taxon, and the taxonomic ranks
    (columns) up to its level.
    * `SubtaxonomySet` provides the set (character vector without repetions) 
    of taxa, at any taxonomic rank, under 
    the queried taxon.
    * `GetSpeciesIn` provides the set of species included in the queried taxon.
  : By default, the subtaxonomy information is extracted from the *zoolog* 
  taxonomy.
[**AssembleReference**](../help/AssembleReference)
  : It allows the user to build new references assembling the desired
    taxon-specific references included in the *zoolog* `referencesDataSet` or
    in any other provided by the user.
[**Thesaurus readers and writers**](../help/ThesaurusReaderWriter)
  : This includes functions to read and write a single thesaurus 
    (`ReadThesaurus` and `WriteThesaurus`) and a thesaurus set
    (`ReadThesaurusSet` and `WriteThesaurusSet`).
    
[**Thesaurus management**](../help/ThesaurusManagement)
  : This includes functions to modify and check thesauri:
  
    * `NewThesaurus` generates an empty thesarus.
    * `AddToThesaurus` adds new names and categories to an existing thesaurus.
    * `RemoveRepeatedNames` cleans a thesarus from any repeated names on any 
      category.
    * `ThesaurusAmbiguity` checks if there are names included in more than one
      category in a thesaurus.
# Examples
The following examples are designed to be read and run sequentially. They 
represent a possible pipeline, meaningful for the processing and analysis of a
dataset. Only occasionally, a small diversion is included to illustrate some alternatives.
## Reading data and calculating log ratios
This example reads a dataset from a file in csv format and computes the
log-ratios. Then, the cases with no available log-ratios are removed.
Finally, the resulting dataset is saved in a file in csv format.
The first step is to set the local path to the folder where you have the dataset to be analysed (this is typically a comma-separated value (csv) file). Here the [example dataset](../help/dataValenzuelaLamas2008) from @valenzuela2008alimentacio included in the package is used:
```{r, echo = FALSE}
options(stringsAsFactors = FALSE)
```
```{r}
library(zoolog)
dataFile <- system.file("extdata", "dataValenzuelaLamas2008.csv.gz", 
                        package = "zoolog")
data = read.csv2(dataFile,
                 quote = "\"", header = TRUE, na.strings = "",
                 encoding = "UTF-8")
knitr::kable(head(data)[, -c(6:20,32:64)])
```
To enhance the visibility, we have shown only the most relevant columns.
We now calculate the log-ratios using the function `LogRatios`. Only measurements that have an associated standard will be included in this calculation. The log values will appear as new columns with the prefix 'log' following the original columns with the raw measurements:
```{r}
dataWithLog <- LogRatios(data)
knitr::kable(head(dataWithLog)[, -c(6:20,32:64)])
```
### A note on the warnings of LogRatios
Observe that `LogRatios` has output two warnings above. The first one informs the user that the data includes some cases of recorded taxa that did not match any species in the reference, but was matched by genus. In many cases this can be the behaviour expected by the user. For instance, using the reference of *Sus scrofa* for computing the log ratios of cases of *Sus domesticus*, can be acceptable if no better reference is available or if both (sub)species are being compared. The warning also includes cases recorded by genus, a recording practice that is understandable when working only with one species in the genus, such as *Oryctolagus* or *Ovis* above. However, the warning also informs the user that this matching by genus can be suppressed by setting the parameter `useGenusIfUnambiguous = FALSE`. This can be the case if the user wants to exclude from the analysis the cases with not matching species. Besides, this warning could make the user realize that the wrong reference was set by mistake.
The second warning informs the user that the data includes some cases recorded with a taxon not specifying a species or genus, but at a higher taxonomical rank (for instance undecided *ovis/capra* (equivalent to tribe *Caprini*)). It also informs of the cases recorded by the genus but for which the reference includes more than one species (as happens with *Equus* above). In addition, the user is remembered of the option to use the parameter `joinCategories` to indicate which reference species should be used for them.
These relationships between taxonomical categories is possible thanks to the taxonomy included in the package. The user can include their own taxonomy if desired.
In the following examples, these warnings are suppressed unless some particular message is of interest for them.
## Dealing with *lazy* datasets
If we observe the example dataset more carefully, we can see that the measures
recorded for the *astragali* presents a deviation from the measure definitions 
in @von1976guide and @davis1992rapid. To see this, 
we can select the cases where the element is an astragalus. The function [InCategory](../help/InCategory) allows us to
select them with the help of the thesaurus without requiring to know the terms actually used:
```{r}
AScases <- InCategory(dataWithLog$Os, "astragalus", zoologThesaurus$element)
knitr::kable(head(dataWithLog[AScases, -c(6:20,32:64)]))
```
For the involved taxa, according to the measure definitions, astragali should have no *GL* measurement, but *GLl*. However, in the example dataset the *GLl* measurements have been recorded merged in the *GL* column. This is a data-entry simplification that is used by some researchers. It is possible because *GLl* is only relevant for the astragalus, while *GL* is not applicable to it. Thus, there cannot be any ambiguity between both measures since they can be identified by the bone element. However, since the zoolog reference uses the proper measure name for each bone element (*GLl* for the astragalus), the reference measure has not been correctly identified. Consequently, the log ratio *logGL* has `NA` values and the column *logGLl* does not exists.
The same effect happens for the measure *GLpe*, only relevant for the phalanges. 
The optional parameter mergedMeasures facilitates the processing of this type of simplified datasets. For the example data, we can use
```{r, warning = FALSE}
GLVariants <- list(c("GL", "GLl", "GLpe"))
dataWithLog <- LogRatios(data, mergedMeasures = GLVariants)
knitr::kable(head(dataWithLog[AScases, -c(6:20,32:64)]))
```
This option allows us to automatically select, for each bone element, the corresponding measure present in the reference. Observe that now the log ratios have been computed and assigned to the column *logGL*.
## Using the same ovis reference for all caprines
We could be interested in obtaining the log ratios of all caprines, including *Ovis aries*, *Capra hircus*, and undetermined *Ovis/Capra*, with respect to the reference for *Ovis aries*. This can be set using the argument `joinCategories`.
```{r, warning = FALSE}
caprineCategory <- list(ovar = SubtaxonomySet("caprine"))
dataWithLog <- LogRatios(data, joinCategories = caprineCategory, mergedMeasures = GLVariants)
knitr::kable(head(dataWithLog)[, -c(6:20,32:64)])
```
The category to join can be manually defined, but here we have conveniently used the function `Subtaxonomy` applied to the tribe *Caprini*:
```{r}
SubtaxonomySet("caprine")
```
Note that this option does not remove the distinction in the data between the different species, it just indicates that for these taxa the log ratios must be computed from the same reference ("ovar").
## Pruning the data from cases with no available measure
The cases without log-ratios can be removed to facilitate subsequent analyses:
```{r}
dataWithLogPruned=RemoveNACases(dataWithLog)
knitr::kable(head(dataWithLogPruned[, -c(6:20,32:64)]))
```
You may want to write the resulting file in the working directory (you need to set it first):
```{r, eval = FALSE, warning = FALSE}
write.csv2(dataWithLogPruned, "myDataWithLogValues.csv", 
           quote=FALSE, row.names=FALSE, na="", 
           fileEncoding="UTF-8")
```
## Condensing log values
After calculating log ratios using the `LogRatios` function, many rows in the resultant dataframe (dataWithLog in the example above) may contain multiple log values, i.e. you will have several log values associated with a particular archaeological specimen. When analysing log ratios, it is preferential to avoid over-representation of bones with a greater number of measurements and account for each specimen only once. The `CondenseLogs` function extracts one length, one width, and one depth value from each row and places these in new Length, Width, and Depth columns. The 'priority' method described in @trentacoste2018pre has been set as default. In this case, the default option has been used:
```{r}
dataWithSummary <- CondenseLogs(dataWithLogPruned)
knitr::kable(head(dataWithSummary)[, -c(6:20,32:64,72:86)])
```
Nevertheless, other options (e.g. average of all width log values for a given specimen) can be chosen if preferred. 
## Standardizing the dataset nomenclature
The integration of the thesaurus functionality facilitates the use of datasets with heterogeneous nomenclatures, without further preprocessing. An extensive catalogue of names for equivalent categories has been integrated in the provided thesaurus set `zoologThesaurus`. These equivalences are internally and silently managed without requiring any action from the user. However, it can be also interesting to explicitly standardize the data to make figures legible to a wider audience. This is especially useful when different nomenclature for the same concept is found in the same dataset, for instance "sheep" and "ovis" for the same taxon or "hum" and "HU" for the bone element.
If we standardize the studied data, we can see that `zoologThesaurus` will change "ovar" to "Ovis aries", "hum" to "humerus", and "Especie" to "Taxon", for instance.
```{r}
dataStandardized <- StandardizeDataSet(dataWithSummary)
knitr::kable(head(dataStandardized)[, -c(6:20,32:64,72:86)])
```
## Selecting only caprines
We may be interested in selecting all caprine elements. This can be done even without standardizing the data using the function `InCategory`:
```{r}
dataOC <- subset(dataWithSummary, InCategory(Especie, 
                                             SubtaxonomySet("caprine"),
                                             zoologThesaurus$taxon))
knitr::kable(head(dataOC)[, -c(6:20,32:64)])
```
Observe that no standardization is performed in the output subset. To standardize the subset data, `StandardizeDataSet` can be applied either before or after the subsetting.
```{r}
dataOCStandardized <- StandardizeDataSet(dataOC)
knitr::kable(head(dataOCStandardized)[, -c(6:20,32:64)])
```
Observe also that the distinction between *Ovis aries*, *Capra hircus*, and *Ovis/Capra* has not been removed from the data.
If we were interested only in one summary measure, *Width* or *Length*, we could  retain the cases including this measure:
```{r}
dataOCWithWidth <- RemoveNACases(dataOCStandardized, measureNames = "Width")
dataOCWithLength <- RemoveNACases(dataOCStandardized, measureNames = "Length")
```
which gives respectively `r nrow(dataOCWithWidth)` (`=nrow(dataOCWithWidth)`)  and `r nrow(dataOCWithLength)` (`=nrow(dataOCWithLength)`)  cases.
## Different plots for data visualisation
Condensed log values can be visualised as histograms and box plots using ggplot [@wickham2011ggplot2]. Here we will look at some examples of plotting values from caprines.
### Horizontal Boxplot with dots grouped by site
For the example plots we will use the package **ggplot2**.
```{r, echo = FALSE}
library(ggplot2)
```
We can now create a boxplot for the widths: 
```{r, fig.asp = 0.6, fig.width = 6, fig.align="center"}
ggplot(dataOCStandardized, aes(x = Site, y = Width)) +
  geom_boxplot(outlier.shape = NA, na.rm = TRUE) +
  geom_jitter(width = 0.2, height = 0, alpha = 1/2, color = 4, na.rm = TRUE) +
  theme_bw() +
  ggtitle("Caprine widths") +
  ylab("Width log-ratio") +
  coord_flip()
```
And another boxplot for the lengths:
```{r, fig.asp = 0.6, fig.width = 6, fig.align="center"}
ggplot(dataOCStandardized, aes(x = Site, y = Length)) +
  geom_boxplot(outlier.shape = NA, na.rm = TRUE) +
  geom_jitter(width = 0.2, height = 0, alpha = 1/2, color = 4, na.rm = TRUE) +
  theme_bw() +
  ggtitle("Caprine lengths") +
  ylab("Length log-ratio") +
  coord_flip()
```
### Histograms grouped by site
We may choose to plot the width data as a histogram: 
```{r, fig.asp = 0.7, fig.width = 6, fig.align="center"}
ggplot(dataOCStandardized, aes(Width)) +
  geom_histogram(bins = 30, na.rm = TRUE) +
  ggtitle("Caprine widths") +
  xlab("Width log-ratio") +
  facet_grid(Site ~.) +
  theme_bw() +
  theme(panel.grid.major.y = element_blank(),
        panel.grid.minor.y = element_blank()) +
  theme(plot.title = element_text(hjust = 0.5, size = 14),
        axis.title.x = element_text(size = 10),
        axis.title.y = element_text(size = 10),
        axis.text = element_text(size = 10) ) +
  scale_y_continuous(breaks = c(0, 10, 20, 30))
```
### Vertical boxplot with dots grouped by taxon and site
Here we reorder the factor levels of `dataOCStandardized$Taxon` to make
the order of the boxplots more intuitive.
```{r}
levels0 <- unique(dataOCStandardized$Taxon)
levels0
```
```{r}
dataOCStandardized$Taxon <- factor(dataOCStandardized$Taxon, 
                                   levels = levels0[c(1,3,2)])
levels(dataOCStandardized$Taxon)
```
and assign specific colours for each category:
```{r, message = FALSE, fig.asp = 0.6, fig.width = 6, fig.align="center"}
Ocolour <- c("#A2A475", "#D8B70A", "#81A88D")
ggplot(dataOCStandardized, aes(x=Site, y=Width)) + 
  geom_boxplot(aes(fill=Taxon), 
               notch = TRUE, alpha = 0, lwd = 0.377, outlier.alpha = 0,
               width = 0.5, na.rm = TRUE,
               position = position_dodge(0.75),
               show.legend = FALSE) + 
  geom_point(aes(colour = Taxon, shape = Taxon), 
             alpha = 0.7, size = 0.8, 
             position = position_jitterdodge(jitter.width = 0.3),
             na.rm = TRUE) +
  scale_colour_manual(values=Ocolour) +
  scale_shape_manual(values=c(15, 18, 16)) +
  theme_bw(base_size = 8) +
  ylab("LSI value") +
  ggtitle("Sheep/goat LSI width values") 
```
### Histograms grouped by taxon and site  
```{r, warning = FALSE, fig.asp = 0.6, fig.width = 6, fig.align="center"}
TaxonSiteWidthHist <- ggplot(dataOCStandardized, aes(Width, fill = Taxon)) + 
  geom_histogram(bins = 30, alpha = 0.5, position = "identity") + 
  ggtitle("Sheep/goat Widths") + facet_grid(Site ~ Taxon)
TaxonSiteWidthHist
```
```{r, warning = FALSE, fig.asp = 0.6, fig.width = 6, fig.align="center"}
TaxonSiteWidthHist <- ggplot(dataOCStandardized, aes(Width, fill = Taxon)) + 
  geom_histogram(bins = 30, alpha = 0.5, position = "identity") + 
  ggtitle("Sheep/goat Widths") + facet_grid(~Site)
TaxonSiteWidthHist
```
## Statistical test
We may run a statistical test (here a Student t-test) to check whether the differences in log ratio length values between the sites "OLD" and "ALP" are statistically significant:
```{r}
t.test(Length ~ Site, dataOCStandardized, 
       subset = Site %in% c("OLD", "ALP"),
       na.action = "na.omit")
```
Similarly, for the differences in log ratio width values:
```{r}
t.test(Width ~ Site, dataOCStandardized, 
       subset = Site %in% c("OLD", "ALP"),
       na.action = "na.omit")
```
For testing all possible pairs of sites, the p-values must be adjusted for multiple comparisons:
```{r, message = FALSE}
library(stats)
pairwise.t.test(dataOCStandardized$Width, dataOCStandardized$Site, 
                pool.sd = FALSE)
```
# How to cite the package *zoolog*
We have invested a lot of time and effort in creating zoolog. Please cite the package if you publish an analysis or results obtained using *zoolog*.
For example, "Log ratios calculation and analysis was done using the package zoolog  [@pozo2022zoolog] in R 4.0.3 [@RCoreTeam2020]."
To get the details of the most recent version of the package, you can use the R citation function: `citation("zoolog")`.
When publishing it is also recommended that the references for standards are properly cited, as well as any details on selecting feature values. The details on the source of each of the reference standards are given in the help page [referencesDatabase](../help/referencesDatabase). For instance, if using the zoolog `reference$NietoDavisAlbarella` and the default selection method of features, a fair description may be: "Published references for cattle [@nieto2018element], sheep/goat [@davis1996measurements] and pigs [@albarella2005neolithic] were used as standards. One length and one width log ratio value from each specimen were included in the analysis, with values selected following the default zoolog 'priority' method [@trentacoste2018pre]: length values - GL, GLl, GLm, HTC; width values - BT, Bd, Bp, SD, Bfd, Bfp."
# Bibliography
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoolog/vignettes/index.Rmd 
 | 
					
	## test return value of "try"
isError <- function(x) {
  return(class(x) == "try-error")
}
## fetch current limits
.lim <- function(usr) {
  d <- diff(usr)/1.08
  return(usr[1:2]+d*c(0.04, -0.04))
}
## fetch current xlim
.xlim <- function() {
  return(.lim(par("usr")[1:2]))
}
## fetch current ylim
.ylim <- function() {
  return(.lim(par("usr")[3:4]))
}
## move xlim limits:
.movelim <- function(lim, width,isLog) {
  step <- diff(lim)*width
  out <- lim+step
  return(out)
}
## move xlim limits
##  width > 0 => to the right
##  width < 0 => to the left
.moveXlim <- function(width) {
  return(.movelim(.xlim(), width=width,isLog=par("xlog")))
}
## move ylim limits
##  width > 0 => up
##  width < 0 => down
.moveYlim <- function(width) {
  return(.movelim(.ylim(), width=width,isLog=par("ylog")))
}
## zoom limits
.zoomLim <- function(lim, width) {
  d <- diff(lim)/2/width
  m <- sum(lim)/2
  return(m+d*c(-1, 1))
}
## zoom only x axis
##  width < 1 => zoom out
##  width > 1 => zoom in
.zoomXlim <- function(width) {
  return(.zoomLim(.xlim(), width))
}
## zoom only y axis
##  width < 1 => zoom out
##  width > 1 => zoom in
.zoomYlim <- function(width) {
  return(.zoomLim(.ylim(), width))
}
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoom/R/utils.R 
 | 
					
	#' A spatial data visualization tool.
#'
#' \tabular{ll}{
#' Package: \tab zoom\cr
#' Type: \tab Package\cr
#' Version: \tab 2.0.6\cr
#' Date: \tab 2014-03-18\cr
#' Depends: \tab R (>= 2.10.0)\cr
#' Encoding: \tab UTF-8\cr
#' License: \tab GPL (>= 3)\cr
#' LazyLoad: \tab yes\cr
#' URL: \tab https://github.com/cbarbu/R-package-zoom\cr
#' }
#'
#' zm(), enters an interactive session to zoom and navigate the active plot. The development version, as well as binary releases can be found at https://github.com/cbarbu/R-package-zoom
#'
#' @aliases zoom-package zoom
#' @name zoom-package
#' @docType package
#' @title The zoom Package
#' @author Corentin M Barbu \email{corentin.barbu@@gmail.com}, with contributions from Sebastian Gibb \email{mail@@sebastiangibb.de}
#' @keywords package
NULL
 
 | 
	/scratch/gouwar.j/cran-all/cranData/zoom/R/zoom-package.R 
 | 
					
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.