repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
annayqho/TheCannon | code/panstarrs/ps_colors.py | 1 | 1971 | import numpy as np
from matplotlib import rc
from TheCannon import model
from TheCannon import dataset
from lamost import load_spectra, load_labels
rc('text', usetex=True)
rc('font', family='serif')
tr_ID = np.loadtxt("example_PS1/ps_colors_ts_overlap.txt",
usecols=(1,), dtype='str', delimiter=',')
dir_dat = "example_LAMOST/Data_All"
tr_IDs, wl, tr_flux, tr_ivar = load_spectra(dir_dat, tr_ID)
label_file = "apogee_dr12_labels.csv"
all_labels = load_labels(label_file, tr_IDs)
teff = all_labels[:,0]
logg = all_labels[:,1]
mh = all_labels[:,2]
alpha = all_labels[:,3]
tr_label = np.vstack((teff, logg, mh, alpha)).T
data = dataset.Dataset(
wl, tr_IDs, tr_flux, tr_ivar, tr_label,
tr_IDs, tr_flux, tr_ivar)
data.set_label_names(['T_{eff}', '\log g', '[M/H]', '[\\alpha/Fe]'])
data.continuum_normalize_gaussian_smoothing(L=50)
# get colors
colors = np.loadtxt("example_PS1/ps_colors_ts_overlap.txt",
usecols=(2,4,6,8), dtype='float', delimiter=',')
errors = np.loadtxt("example_PS1/ps_colors_ts_overlap.txt",
usecols=(3,5,7,9), dtype='float', delimiter=',')
ivars = 1./ errors**2
colors = colors[np.argsort(tr_ID)]
ivars = ivars[np.argsort(tr_ID)]
ivars = ivars * 1e15
# add another column to the tr_flux, tr_ivar, test_flux, test_ivar
logwl = np.log(data.wl)
delta = logwl[1]-logwl[0]
toadd = logwl[-1]+delta*np.arange(1,5)
new_logwl = np.hstack((logwl, toadd))
data.wl = np.exp(new_logwl)
data.tr_flux = np.hstack((data.tr_flux, colors))
data.test_flux = data.tr_flux
data.tr_ivar = np.hstack((data.tr_ivar, ivars))
data.test_ivar = data.tr_ivar
# train model
m = model.CannonModel(2) # 2 = quadratic model
m.fit(data)
m.infer_labels(data)
# data.diagnostics_1to1()
def scatter(i):
return np.std(data.tr_label[:,i]-data.test_label_vals[:,i])
def bias(i):
return np.mean(data.tr_label[:,i]-data.test_label_vals[:,i])
for i in range(0,4):
print(scatter(i), bias(i))
| mit |
Tong-Chen/scikit-learn | sklearn/semi_supervised/label_propagation.py | 8 | 14061 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
if sparse.isspmatrix(X):
self.X_ = X
else:
self.X_ = np.asarray(X)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
SusanJL/iris | docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py | 17 | 1646 |
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris
import iris.analysis
import iris.plot as iplt
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
regional_ash = iris.load_cube(iris.sample_data_path('NAME_output.txt'))
regional_ash = regional_ash.collapsed('flight_level', iris.analysis.SUM)
# Mask values so low that they are anomalous.
regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6)
norm = matplotlib.colors.LogNorm(5e-6, 0.0175)
global_air_temp.coord('longitude').guess_bounds()
global_air_temp.coord('latitude').guess_bounds()
fig = plt.figure(figsize=(8, 4.5))
plt.subplot(2, 2, 1)
iplt.pcolormesh(regional_ash, norm=norm)
plt.title('Volcanic ash total\nconcentration not regridded',
size='medium')
for subplot_num, mdtol in zip([2, 3, 4], [0, 0.5, 1]):
plt.subplot(2, 2, subplot_num)
scheme = iris.analysis.AreaWeighted(mdtol=mdtol)
global_ash = regional_ash.regrid(global_air_temp, scheme)
iplt.pcolormesh(global_ash, norm=norm)
plt.title('Volcanic ash total concentration\n'
'regridded with AreaWeighted(mdtol={})'.format(mdtol),
size='medium')
plt.subplots_adjust(hspace=0, wspace=0.05,
left=0.001, right=0.999, bottom=0, top=0.955)
# Iterate over each of the figure's axes, adding coastlines, gridlines
# and setting the extent.
for ax in fig.axes:
ax.coastlines('50m')
ax.gridlines()
ax.set_extent([-80, 40, 31, 75])
plt.show()
| gpl-3.0 |
gwtsa/gwtsa | pastas/project/maps.py | 1 | 6742 | """This module contains the mapping methods for Pastas Projects.
Raoul Collenteur, 2018 - Artesia Water
"""
import matplotlib.pyplot as plt
class Map:
def __init__(self, mls):
"""
Parameters
----------
mls: pastas.Project
Pastas project
"""
self.mls = mls
def parameter(self, parameter, models=None, param_value="optimal", s=30,
show_nan=True, label=False, **kwargs):
"""Plot the value of a parameter.
Parameters
----------
parameter: str
String with the name of the parameter to plot.
models: list, optional
List of the models top plot the parameter for. By default,
all models are plotted.
param_value: str, optional
String with the parameter kind to be plotted. Any column name
of the Model parameter DataFrame (e.g. optimal, stderr)
s: int, optional
Size of the marker.
show_nan: bool, optional
Show nan-values (default is True), which occur when the
parameter is not in the model.
label: bool, optional
Label the points by the model name, default is False.
kwargs: dict, optional
Any arguments that are passed on the the "values" method".
Returns
-------
sc: matplotlib.axes
The axes are returned.
"""
values = self.mls.get_parameters(parameters=[parameter], models=models,
param_value=param_value)
sc = self.values(values, models, show_nan, label, s, **kwargs)
return sc
def statistic(self, statistic, models=None, s=30, show_nan=True,
label=False, **kwargs):
"""Plot the value of a parameter.
Parameters
----------
statistic: str
String with the name of the statistic to plot. Must be exactly
similar to the methods name in the pastas.stats module.
models: list, optional
List of the models top plot the parameter for. By default,
all models are plotted.
s: int, optional
Size of the marker.
show_nan: bool, optional
Show nan-values (default is True), which occur when the
parameter is not in the model.
label: bool, optional
Label the points by the model name, default is False.
kwargs: dict, optional
Any arguments that are passed on the the "values" method".
Returns
-------
sc: matplotlib.axes
The axes are returned.
"""
values = self.mls.get_statistics(statistics=[statistic], models=models)
sc = self.values(values, models, show_nan, label, s, **kwargs)
return sc
def values(self, values, models=None, show_nan=True, label=False, s=30,
**kwargs):
"""Plot the value of a parameter.
Parameters
----------
values: pandas.Series
Series with the values to plot, the index should be the model
names as are used in mls.models.keys()
models: list, optional
List of the models top plot the parameter for. By default,
all models are plotted.
param_value: str, optional
String with the parameter kind to be plotted. Any column name
of the Model parameter DataFrame (e.g. optimal, stderr)
s: int, optional
Size of the marker.
show_nan: bool, optional
Show nan-values (default is True), which occur when the
parameter is not in the model.
label: bool, optional
Label the points by the model name, default is False.
kwargs: dict, optional
Any arguments that are passed on the the "values" method".
Returns
-------
sc: matplotlib.axes
The axes are returned.
"""
if models is None:
models = values.index
models = self.mls.oseries.loc[models, "z"].sort_values(
ascending=False).index
else:
values = values.loc[models]
x = self.mls.oseries.loc[models, "x"].astype(float)
y = self.mls.oseries.loc[models, "y"].astype(float)
s = self._normalize(self.mls.oseries.loc[models, "z"].astype(float), s)
if show_nan:
nan = values[values.isnull()].fillna(-999)
plt.scatter(x[nan.index], y[nan.index], c="none", edgecolors="k",
s=s)
sc = plt.scatter(x, y, c=values, s=s, edgecolors="k", **kwargs)
if label:
not_nan = values[~values.isnull()].index
labels = values[not_nan].astype(str)
for name, xy in zip(labels, zip(x[not_nan], y[not_nan])):
plt.annotate(s=name, xy=xy,
bbox=dict(facecolor='w', edgecolor='k'),
textcoords="offset points", xytext=(10, 10))
return sc
def series(self, kind="stresses", label=False, **kwargs):
"""Plot the location of the oseries or the stresses on a map.
Parameters
----------
kind: str
kind of series to plot. Possible values are the oseries,
stresses or a specific type of stress (e.g. prec, evap or well).
label: bool, optional
Display a label next to the point with the name of the series.
kwargs: dict, optional
Any keyword arguments are passed on to plt.scatter.
Returns
-------
sc: matplotlib.axes
Return the axes.
"""
if kind == "oseries":
series = self.mls.oseries
elif kind == "stresses":
series = self.mls.stresses
else:
series = self.mls.stresses.loc[self.mls.stresses.kind == kind]
x = series.loc[:, "x"].astype(float)
y = series.loc[:, "y"].astype(float)
sc = plt.scatter(x, y, **kwargs)
if label:
for name, xy in zip(x.index, zip(x, y)):
plt.annotate(s=name, xy=xy, fontsize=10,
bbox=dict(facecolor='w', edgecolor='k'),
textcoords="offset points", xytext=(10, 10))
return sc
@staticmethod
def _normalize(series, s=30):
"""Internal method to normalize the series for the size op the scatterplot.
"""
mu = series.mean()
if mu == 0.0: # Prevent sizes of zero to be calculates
mu = 1
series = (series.subtract(series.min()) / mu + 1) * s
return series
| mit |
Reaktoro/Reaktoro | demos/python/demo-transport-solver.py | 1 | 1400 | import reaktoro as rkt
import matplotlib.pyplot as plt
import numpy as np
# Auxiliary time related constant
day = 86400 # [s]
# Parameters for the transport simulation
nsteps = 225 # the number of steps in the transport simulation
ncells = 100 # the number of cells in the discretization
xl = 0.0 # the x-coordinate of the left boundary
xr = 100.0 # the x-coordinate of the right boundary
D = 1.0e-9 # the diffusion coefficient (in units of m2/s)
v = 1.0 / day # the velocity (in units of m/s)
dt = 0.5 * day # the time step (in units of s)
ul = 1 # concentration at the left boundary (mol/m3)
mesh = rkt.Mesh(ncells, xl, xr)
x = mesh.xcells()
transport = rkt.TransportSolver()
transport.setMesh(mesh)
transport.setVelocity(v)
transport.setDiffusionCoeff(D)
transport.setBoundaryValue(ul)
transport.setTimeStep(dt)
transport.initialize()
u = np.zeros(ncells)
useries = []
tseries = []
for i in range(1, nsteps):
# Record at every 25 steps the current time and concentration
if i % 25 == 0:
tseries.append(i * dt / day)
useries.append(u.copy())
# Perform one time step
transport.step(u)
fig, ax = plt.subplots(figsize=(10, 20))
ax.set(xlabel="x [m]", ylabel="u [mol/m3]")
for i in range(len(tseries)):
ax.plot(x, useries[i], label="{} days".format(tseries[i]))
legend = ax.legend(fontsize="x-large")
fig.set_size_inches((18, 7))
plt.show()
| gpl-3.0 |
meduz/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
UT-CWE/Hyospy | Hyospy_ensemble/lib/SUNTANS/DataIO/read_otps.py | 1 | 29289 | # -*- coding: utf-8 -*-
"""
Tools for handling the OSU tidal prediction software (OTPS) output data
(http://volkov.oce.orst.edu/tides/)
This software is based on the tide model driver (TMD) matlab code from here:
http://polaris.esr.org/ptm_index.html
Matt Rayson
Stanford University
March 2013
"""
import os
import numpy as np
from interpXYZ import interpXYZ
import othertime
from datetime import datetime
import pdb
otis_constits = { 'M2':{'index':1,'omega':1.405189e-04,'v0u':1.731557546},\
'S2':{'index':2,'omega':1.454441e-04,'v0u':0.000000000},\
'N2':{'index':3,'omega':0.00013787970,'v0u':6.050721243},\
'K2':{'index':4,'omega':0.0001458423,'v0u':3.487600001},\
'K1':{'index':5,'omega':7.292117e-05,'v0u':0.173003674},\
'O1':{'index':6,'omega':6.759774e-05,'v0u':1.558553872},\
'P1':{'index':7,'omega':7.252295e-05,'v0u':6.110181633},\
'Q1':{'index':8,'omega':6.495854e-05,'v0u':5.877717569}}
def tide_pred(modfile,lon,lat,time,z=None,conlist=None):
"""
Performs a tidal prediction at all points in [lon,lat] at times in vector [time]
"""
# Read and interpolate the constituents
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lon,lat,z=z,conlist=conlist)
# Initialise the output arrays
sz = lon.shape
nx = np.prod(sz)
nt = time.shape[0]
ncon = omega.shape[0]
h_re = h_re.reshape((ncon,nx))
h_im = h_im.reshape((ncon,nx))
u_re = u_re.reshape((ncon,nx))
u_im = u_im.reshape((ncon,nx))
v_re = v_re.reshape((ncon,nx))
v_im = v_im.reshape((ncon,nx))
# Calculate nodal correction to amps and phases
#baseyear = time[0].year
#t1992 = othertime.SecondsSince(datetime(baseyear,1,1),basetime=datetime(1992,1,1))/86400.0
t1992 = othertime.SecondsSince(time[0],basetime=datetime(1992,1,1))/86400.0
pu,pf,v0u = nodal(t1992+48622.0,conlist)
# Calculate the time series
tsec = othertime.SecondsSince(time,basetime=datetime(1992,1,1)) # Needs to be referenced to 1992
h=np.zeros((nt,nx))
u=np.zeros((nt,nx))
v=np.zeros((nt,nx))
for nn,om in enumerate(omega):
for ii in range(0,nx):
h[:,ii] += pf[nn]*h_re[nn,ii] * np.cos(om*tsec + v0u[nn] + pu[nn]) - \
pf[nn]*h_im[nn,ii] * np.sin(om*tsec + v0u[nn] + pu[nn])
u[:,ii] += pf[nn]*u_re[nn,ii] * np.cos(om*tsec + v0u[nn] + pu[nn]) - \
pf[nn]*u_im[nn,ii] * np.sin(om*tsec + v0u[nn] + pu[nn])
v[:,ii] += pf[nn]*v_re[nn,ii] * np.cos(om*tsec + v0u[nn] + pu[nn]) - \
pf[nn]*v_im[nn,ii] * np.sin(om*tsec + v0u[nn] + pu[nn])
szo = (nt,)+sz
#import matplotlib.pyplot as plt
#plt.plot(h[:,0])
#plt.show()
#pdb.set_trace()
return h.reshape(szo), u.reshape(szo), v.reshape(szo)
def tide_pred_correc(modfile,lon,lat,time,dbfile,ID,z=None,conlist=None):
"""
Performs a tidal prediction at all points in [lon,lat] at times in vector [time]
Applies an amplitude and phase correction based on a time series
"""
from timeseries import timeseries, loadDBstation
print 'Calculating tidal correction factors from time series...'
# Load using the timeseries module
t0 = datetime.strftime(time[0],'%Y%m%d.%H%M%S')
t1 = datetime.strftime(time[-1],'%Y%m%d.%H%M%S')
dt = time[1]-time[0]
print t0, t1, dt.total_seconds()
timeinfo = (t0,t1,dt.total_seconds())
TS,meta = loadDBstation(dbfile,ID,'waterlevel',timeinfo=timeinfo,filttype='low',cutoff=2*3600,output_meta=True)
lonpt=meta['longitude']
latpt=meta['latitude']
print lonpt,latpt
# Extract the OTIS tide prediction
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lonpt,latpt)
h_amp = np.abs(h_re+1j*h_im)[:,0]
h_phs = np.angle(h_re+1j*h_im)[:,0]
# Harmonic analysis of observation time series
amp, phs, frq, frqnames, htide = TS.tidefit(frqnames=conlist)
TS_harm = timeseries(time,htide)
residual = TS.y - htide
# Calculate the amp and phase corrections
dphs = phs - h_phs + np.pi
damp = amp/h_amp
# Extract the data along the specified points
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lon,lat,z=z,conlist=conlist)
h_amp = np.abs(h_re+1j*h_im)
h_phs = np.angle(h_re+1j*h_im)
u_amp = np.abs(u_re+1j*u_im)
u_phs = np.angle(u_re+1j*u_im)
v_amp = np.abs(v_re+1j*v_im)
v_phs = np.angle(v_re+1j*v_im)
# Initialise the output arrays
sz = lon.shape
nx = np.prod(sz)
nt = time.shape[0]
h=np.zeros((nt,nx))
u=np.zeros((nt,nx))
v=np.zeros((nt,nx))
# Rebuild the time series
#tsec=TS_harm.tsec - TS_harm.tsec[0]
tsec = othertime.SecondsSince(time,basetime=time[0])
print tsec[0]
for nn,om in enumerate(omega):
for ii in range(0,nx):
h[:,ii] += damp[nn]*h_amp[nn,ii] * np.cos(om*tsec - (h_phs[nn,ii] + dphs[nn]))
u[:,ii] += damp[nn]*u_amp[nn,ii] * np.cos(om*tsec - (u_phs[nn,ii] + dphs[nn]))
v[:,ii] += damp[nn]*v_amp[nn,ii] * np.cos(om*tsec - (v_phs[nn,ii] + dphs[nn]))
szo = (nt,)+sz
return h.reshape(szo), u.reshape(szo), v.reshape(szo), residual
def tide_pred_old(modfile,lon,lat,time,z=None,conlist=None):
"""
### UNUSED ###
Performs a tidal prediction at all points in [lon,lat] at times in vector [time]
"""
# Read and interpolate the constituents
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lon,lat,z=z,conlist=conlist)
# Initialise the output arrays
sz = lon.shape
nx = np.prod(sz)
nt = time.shape[0]
ncon = omega.shape[0]
h_re = h_re.reshape((ncon,nx))
h_im = h_im.reshape((ncon,nx))
u_re = u_re.reshape((ncon,nx))
u_im = u_im.reshape((ncon,nx))
v_re = v_re.reshape((ncon,nx))
v_im = v_im.reshape((ncon,nx))
# Nodal correction to amps and phases here...
baseyear = time[0].year
amp, phase = cart2pol(h_re, h_im)
amp,phase = nodal_correction(baseyear,conlist, amp, phase)
h_re, h_im = pol2cart(amp, phase)
amp, phase = cart2pol(u_re, u_im)
amp, phase = nodal_correction(baseyear,conlist, amp, phase)
u_re, u_im = pol2cart(amp, phase)
amp, phase = cart2pol(v_re, v_im)
amp, phase = nodal_correction(baseyear,conlist, amp, phase)
v_re, v_im = pol2cart(amp, phase)
# Calculate the time series
tsec = othertime.SecondsSince(time,basetime=datetime(baseyear,1,1))
h=np.zeros((nt,nx))
u=np.zeros((nt,nx))
v=np.zeros((nt,nx))
for nn,om in enumerate(omega):
for ii in range(0,nx):
h[:,ii] += h_re[nn,ii] * np.cos(om*tsec) + h_im[nn,ii] * np.sin(om*tsec)
u[:,ii] += u_re[nn,ii] * np.cos(om*tsec) + u_im[nn,ii] * np.sin(om*tsec)
v[:,ii] += v_re[nn,ii] * np.cos(om*tsec) + v_im[nn,ii] * np.sin(om*tsec)
szo = (nt,)+sz
return h.reshape(szo), u.reshape(szo), v.reshape(szo)
def extract_HC(modfile,lon,lat,z=None,conlist=None):
"""
Extract harmonic constituents from OTIS binary output and interpolate onto points in lon,lat
set "z" to specifiy depth for transport to velocity conversion
set "constituents" in conlist
Returns:
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist
"""
###
# Make sure the longitude is between 0 and 360
lon = np.mod(lon,360.0)
###
# Read the filenames from the model file
pathfile = os.path.split(modfile)
path = pathfile[0]
f = open(modfile,'r')
hfile = path+'/' + f.readline().strip()
uvfile = path+'/' + f.readline().strip()
grdfile = path+'/' + f.readline().strip()
f.close()
###
# Read the grid file
X,Y,depth, mask = read_OTPS_grd(grdfile)
#X[X>180.0] = 180.0 - X[X>180.0]
mask = mask == 1
# Create an interpolation object
sz = lon.shape
lon = lon.ravel()
lat = lat.ravel()
nx = lon.size
F= interpXYZ(np.vstack((X[mask],Y[mask])).T,np.vstack((lon,lat)).T,method='idw',NNear=3,p=1.0)
# Interpolate the model depths onto the points if z is none
if z == None:
z = F(depth[mask])
else:
z = np.abs(z) # make sure they are positive
###
# Check that the constituents are in the file
conOTIS = get_OTPS_constits(hfile)
if conlist == None:
conlist = conOTIS
for vv in conlist:
if not vv in conOTIS:
print 'Warning: constituent name: %s not present in OTIS file.'%vv
conlist.remove(vv)
###
# Now go through and read the data for each
# Initialse the arrays
ncon = len(conlist)
u_re = np.zeros((ncon,nx))
u_im = np.zeros((ncon,nx))
v_re = np.zeros((ncon,nx))
v_im = np.zeros((ncon,nx))
h_re = np.zeros((ncon,nx))
h_im = np.zeros((ncon,nx))
omega = np.zeros((ncon,))
for ii, vv in enumerate(conlist):
idx = otis_constits[vv]['index']
omega[ii] = otis_constits[vv]['omega']
print 'Interpolating consituent: %s...'%vv
# Read and interpolate h
X ,Y, tmp_h_re, tmp_h_im = read_OTPS_h(hfile,idx)
h_re[ii,:] = F(tmp_h_re[mask])
h_im[ii,:] = F(tmp_h_im[mask])
# Read and interpolate u and v - Note the conversion from transport to velocity
X ,Y, tmp_u_re, tmp_u_im, tmp_v_re, tmp_v_im = read_OTPS_UV(uvfile,idx)
u_re[ii,:] = F(tmp_u_re[mask]) / z
u_im[ii,:] = F(tmp_u_im[mask]) / z
v_re[ii,:] = F(tmp_v_re[mask]) / z
v_im[ii,:] = F(tmp_v_im[mask]) / z
# Return the arrays in their original shape
szout = (ncon,) + sz
return u_re.reshape(szout), u_im.reshape(szout), v_re.reshape(szout), \
v_im.reshape(szout), h_re.reshape(szout), h_im.reshape(szout), omega, conlist
def nodal_correction(year,conlist,amp, phase):
"""
### UNUSED ###
Applies a lunar nodal correction to the amplitude and phase
Code modified from Rusty Holleman's GET_COMPONENTS code below...
#
# GET_COMPONENTS
# [UAMP,UPHASE,VAMP,VPHASE,HAMP,HPHASE]=GET_COMPONENTS(YEAR,OMEGAT,LUN_NODE,V0U,AG)
# calculates the tidal amplitudes and phases from the interpolated OTIS
# data in the AG matrix.
#
# This code has been adapted from Brian Dushaw's matlab scripts
# obtained from http://909ers.apl.washington.edu/~dushaw/tidegui/tidegui.html
#
#function [uamp,uphase,vamp,vphase,hamp,hphase]=get_components(YEAR,omegat,lun_node,v0u,AG)
"""
import tide_consts as tc
#oneday=np.array( [335.62, 0, 322.55, 1.97, 334.63, 0.99, -0.99, 321.57])
oneday = {'M2':335.62, 'S2':0, 'N2':322.55, 'K2':1.97, 'O1':334.63, 'K1':0.99, 'P1':-0.99, 'Q1':321.57}
#if year < 1970 or year > 2037:
# print 'Constants for prediction year are not available'
#return None
# Find the index
JJ=[]
od=np.zeros((len(conlist),1))
for ii,vv in enumerate(conlist):
jj=[item for item in range(len(tc.const_names)) if tc.const_names[item] == vv]
if len(jj) > 0:
JJ.append(jj)
if oneday.has_key(vv):
od[ii]=(np.pi/180)*oneday[vv]
I = int( np.where(year==tc.years)[0] )
vou=tc.v0u[JJ,I]
lunnod=tc.lun_nodes[JJ,I]
vou=(np.pi/180)*vou
#oneday=(np.pi/180)*oneday
hamp = amp*lunnod
#hphase = - oneday[JJ] + vou[JJ] - G
hphase = -od + vou - phase
return hamp, hphase
def read_OTPS_UV(uvfile,ic):
"""
Reads the tidal transport constituent data from an otis binary file
ic = constituent number
Returns: X, Y, h_re and h_im (Real and imaginary components)
See this post on byte ordering
http://stackoverflow.com/questions/1632673/python-file-slurp-w-endian-conversion
"""
f = open(uvfile,'rb')
#f = hfile
# Try numpy
ll = np.fromfile(f,dtype=np.int32,count=1)
nm = np.fromfile(f,dtype=np.int32,count=3)
th_lim = np.fromfile(f,dtype=np.float32,count=2)
ph_lim = np.fromfile(f,dtype=np.float32,count=2)
# Need to go from little endian to big endian
ll.byteswap(True)
nm.byteswap(True)
th_lim.byteswap(True)
ph_lim.byteswap(True)
n = nm[0]
m = nm[1]
nc = nm[2]
if ic < 1 or ic > nc:
raise Exception,'ic must be > 1 and < %d'%ic
# Read the actual data
nskip = (ic-1)*(nm[0]*nm[1]*16+8) + 8 + ll - 28
f.seek(nskip,1)
htemp = np.fromfile(f,dtype=np.float32,count=4*n*m)
htemp.byteswap(True)
f.close()
htemp = np.reshape(htemp,(m,4*n))
U_re = htemp[:,0:4*n-3:4]
U_im = htemp[:,1:4*n-2:4]
V_re = htemp[:,2:4*n-1:4]
V_im = htemp[:,3:4*n:4]
X,Y = np.meshgrid(np.linspace(th_lim[0],th_lim[1],n),np.linspace(ph_lim[0],ph_lim[1],m))
return X, Y, U_re, U_im, V_re, V_im
def read_OTPS_grd(grdfile):
"""
Reads the grid data from an otis binary file
Returns: X, Y, hz, mask
See this post on byte ordering
http://stackoverflow.com/questions/1632673/python-file-slurp-w-endian-conversion
"""
f = open(grdfile,'rb')
#
## Try numpy
f.seek(4,0)
n = np.fromfile(f,dtype=np.int32,count=1)
m = np.fromfile(f,dtype=np.int32,count=1)
lats = np.fromfile(f,dtype=np.float32,count=2)
lons = np.fromfile(f,dtype=np.float32,count=2)
dt = np.fromfile(f,dtype=np.float32,count=1)
n.byteswap(True)
m.byteswap(True)
lats.byteswap(True)
lons.byteswap(True)
dt.byteswap(True)
nob = np.fromfile(f,dtype=np.int32,count=1)
nob.byteswap(True)
if nob == 0:
f.seek(20,1)
iob = []
else:
f.seek(8,1)
iob = np.fromfile(f,dtype=np.int32,count=2*nob)
iob.byteswap(True)
iob = np.reshape(iob,(2,nob))
f.seek(8,1)
hz = np.fromfile(f,dtype=np.float32,count=n*m)
f.seek(8,1)
mask = np.fromfile(f,dtype=np.int32,count=n*m)
hz.byteswap(True)
mask.byteswap(True)
hz = np.reshape(hz,(m,n))
mask = np.reshape(mask,(m,n))
f.close()
X,Y = np.meshgrid(np.linspace(lons[0],lons[1],n),np.linspace(lats[0],lats[1],m))
return X, Y ,hz, mask
def read_OTPS_h(hfile,ic):
"""
Reads the elevation constituent data from an otis binary file
ic = constituent number
Returns: X, Y, h_re and h_im (Real and imaginary components)
See this post on byte ordering
http://stackoverflow.com/questions/1632673/python-file-slurp-w-endian-conversion
"""
f = open(hfile,'rb')
#f = hfile
# Try numpy
ll = np.fromfile(f,dtype=np.int32,count=1)
nm = np.fromfile(f,dtype=np.int32,count=3)
th_lim = np.fromfile(f,dtype=np.float32,count=2)
ph_lim = np.fromfile(f,dtype=np.float32,count=2)
# Need to go from little endian to big endian
ll.byteswap(True)
nm.byteswap(True)
th_lim.byteswap(True)
ph_lim.byteswap(True)
n = nm[0]
m = nm[1]
nc = nm[2]
if ic < 1 or ic > nc:
raise Exception,'ic must be > 1 and < %d'%ic
#return -1
# Read the actual data
nskip = (ic-1)*(nm[0]*nm[1]*8+8) + 8 + ll - 28
f.seek(nskip,1)
htemp = np.fromfile(f,dtype=np.float32,count=2*n*m)
htemp.byteswap(True)
#
f.close()
htemp = np.reshape(htemp,(m,2*n))
h_re = htemp[:,0:2*n-1:2]
h_im = htemp[:,1:2*n:2]
X,Y = np.meshgrid(np.linspace(th_lim[0],th_lim[1],n),np.linspace(ph_lim[0],ph_lim[1],m))
return X ,Y, h_re, h_im
def get_OTPS_constits(hfile):
"""
Returns the list of constituents in the file
"""
f = open(hfile,'rb')
ll = np.fromfile(f,dtype=np.int32,count=1)
nm = np.fromfile(f,dtype=np.int32,count=3)
ll.byteswap(True)
nm.byteswap(True)
f.close()
ncon = nm[2]
conList = []
for ii in range(1,ncon+1):
for vv in otis_constits:
if otis_constits[vv]['index']==ii:
conList.append(vv)
return conList
def cart2pol(re,im):
amp = np.abs(re + 1j*im)
phs = np.angle(re + 1j*im)
return amp, phs
def pol2cart(amp,phs):
re = amp * np.cos(phs)
im = amp * np.sin(phs)
return re, im
def astrol(time):
"""
%function [s,h,p,N]=astrol(time);
% Computes the basic astronomical mean longitudes s, h, p, N.
% Note N is not N', i.e. N is decreasing with time.
% These formulae are for the period 1990 - 2010, and were derived
% by David Cartwright (personal comm., Nov. 1990).
% time is UTC in decimal MJD.
% All longitudes returned in degrees.
% R. D. Ray Dec. 1990
% Non-vectorized version. Re-make for matlab by Lana Erofeeva, 2003
% usage: [s,h,p,N]=astrol(time)
% time, MJD
circle=360;
T = time - 51544.4993;
% mean longitude of moon
% ----------------------
s = 218.3164 + 13.17639648 * T;
% mean longitude of sun
% ---------------------
h = 280.4661 + 0.98564736 * T;
% mean longitude of lunar perigee
% -------------------------------
p = 83.3535 + 0.11140353 * T;
% mean longitude of ascending lunar node
% --------------------------------------
N = 125.0445D0 - 0.05295377D0 * T;
%
s = mod(s,circle);
h = mod(h,circle);
p = mod(p,circle);
N = mod(N,circle);
"""
circle=360;
T = time - 51544.4993;
# mean longitude of moon
# ----------------------
s = 218.3164 + 13.17639648 * T;
# mean longitude of sun
# ---------------------
h = 280.4661 + 0.98564736 * T;
# mean longitude of lunar perigee
# -------------------------------
p = 83.3535 + 0.11140353 * T;
# mean longitude of ascending lunar node
# --------------------------------------
N = 125.0445 - 0.05295377 * T;
#
s = np.mod(s,circle);
h = np.mod(h,circle);
p = np.mod(p,circle);
N = np.mod(N,circle);
return s,h,p,N
def nodal(time,con):
"""
Nodal correction
Derived from the tide model driver matlab scipt: nodal.m
"""
rad = np.pi/180.0
s,h,p,omega=astrol(time)
#
# omega =
#
# determine nodal corrections f and u
# -----------------------------------
sinn = np.sin(omega*rad);
cosn = np.cos(omega*rad);
sin2n = np.sin(2*omega*rad);
cos2n = np.cos(2*omega*rad);
sin3n = np.sin(3*omega*rad);
ndict={'M2':{'f':np.sqrt((1.-.03731*cosn+.00052*cos2n)**2 + (.03731*sinn-.00052*sin2n)**2),\
'u':np.arctan((-.03731*sinn+.00052*sin2n)/(1.-.03731*cosn+.00052*cos2n))/rad},\
'S2':{'f':1.0, 'u':0.0},\
'K1':{'f':np.sqrt((1.+.1158*cosn-.0029*cos2n)**2 + (.1554*sinn-.0029*sin2n)**2),\
'u':np.arctan((-.1554*sinn+.0029*sin2n)/(1.+.1158*cosn-.0029*cos2n))/rad},\
'O1':{'f':np.sqrt((1.0+0.189*cosn-0.0058*cos2n)**2 + (0.189*sinn-0.0058*sin2n)**2),\
'u':10.8*sinn - 1.3*sin2n + 0.2*sin3n},\
'N2':{'f':np.sqrt((1.-.03731*cosn+.00052*cos2n)**2 + (.03731*sinn-.00052*sin2n)**2),\
'u':np.arctan((-.03731*sinn+.00052*sin2n)/(1.-.03731*cosn+.00052*cos2n))/rad},\
'P1':{'f':1.0, 'u':0.0},\
'K2':{'f':np.sqrt((1.+.2852*cosn+.0324*cos2n)**2 + (.3108*sinn+.0324*sin2n)**2),\
'u':np.arctan(-(.3108*sinn+.0324*sin2n) /(1.+.2852*cosn+.0324*cos2n))/rad},\
'Q1':{'f':np.sqrt((1.+.188*cosn)**2+(.188*sinn)**2),\
'u':np.arctan(.189*sinn / (1.+.189*cosn))/rad} }
# Prepare the output data
ncon = len(con)
pu = np.zeros((ncon,1))
pf = np.ones((ncon,1))
v0u = np.zeros((ncon,1))
for ii,vv in enumerate(con):
if ndict.has_key(vv):
pu[ii,:] = ndict[vv]['u']*rad
pf[ii,:] = ndict[vv]['f']
if otis_constits.has_key(vv):
v0u[ii,:] = otis_constits[vv]['v0u']
return pu, pf, v0u
#%%
#f=zeros(nT,53);
#f(:,1) = 1; % Sa
#f(:,2) = 1; % Ssa
#f(:,3) = 1 - 0.130*cosn; % Mm
#f(:,4) = 1; % MSf
#f(:,5) = 1.043 + 0.414*cosn; % Mf
#f(:,6) = sqrt((1+.203*cosn+.040*cos2n).^2 + ...
# (.203*sinn+.040*sin2n).^2); % Mt
#
#f(:,7) = 1; % alpha1
#f(:,8) = sqrt((1.+.188*cosn).^2+(.188*sinn).^2);% 2Q1
#f(:,9) = f(:,8); % sigma1
#f(:,10) = f(:,8); % q1
#f(:,11) = f(:,8); % rho1
#f(:,12) = sqrt((1.0+0.189*cosn-0.0058*cos2n).^2 + ...
# (0.189*sinn-0.0058*sin2n).^2);% O1
#f(:, 13) = 1; % tau1
#% tmp1 = 2.*cos(p*rad)+.4*cos((p-omega)*rad);
#% tmp2 = sin(p*rad)+.2*sin((p-omega)*rad);% Doodson's
#tmp1 = 1.36*cos(p*rad)+.267*cos((p-omega)*rad);% Ray's
#tmp2 = 0.64*sin(p*rad)+.135*sin((p-omega)*rad);
#f(:,14) = sqrt(tmp1.^2 + tmp2.^2); % M1
#f(:,15) = sqrt((1.+.221*cosn).^2+(.221*sinn).^2);% chi1
#f(:,16) = 1; % pi1
#f(:,17) = 1; % P1
#f(:,18) = 1; % S1
#f(:,19) = sqrt((1.+.1158*cosn-.0029*cos2n).^2 + ...
# (.1554*sinn-.0029*sin2n).^2); % K1
#f(:,20) = 1; % psi1
#f(:,21) = 1; % phi1
#f(:,22) = 1; % theta1
#f(:,23) = sqrt((1.+.169*cosn).^2+(.227*sinn).^2); % J1
#f(:,24) = sqrt((1.0+0.640*cosn+0.134*cos2n).^2 + ...
# (0.640*sinn+0.134*sin2n).^2 ); % OO1
#f(:,25) = sqrt((1.-.03731*cosn+.00052*cos2n).^2 + ...
# (.03731*sinn-.00052*sin2n).^2);% 2N2
#f(:,26) = f(:,25); % mu2
#f(:,27) = f(:,25); % N2
#f(:,28) = f(:,25); % nu2
#f(:,29) = 1; % M2a
#f(:,30) = f(:,25); % M2
#f(:,31) = 1; % M2b
#f(:,32) = 1; % lambda2
#temp1 = 1.-0.25*cos(2*p*rad)-0.11*cos((2*p-omega)*rad)-0.04*cosn;
#temp2 = 0.25*sin(2*p*rad)+0.11*sin((2*p-omega)*rad)+ 0.04*sinn;
#f(:,33) = sqrt(temp1.^2 + temp2.^2); % L2
#f(:,34) = 1; % T2
#f(:,35) = 1; % S2
#f(:,36) = 1; % R2
#f(:,37) = sqrt((1.+.2852*cosn+.0324*cos2n).^2 + ...
# (.3108*sinn+.0324*sin2n).^2); % K2
#f(:,38) = sqrt((1.+.436*cosn).^2+(.436*sinn).^2); % eta2
#f(:,39) = f(:,30).^2; % MNS2
#f(:,40) = f(:,30); % 2SM2
#f(:,41) = 1; % wrong % M3
#f(:,42) = f(:,19).*f(:,30); % MK3
#f(:,43) = 1; % S3
#f(:,44) = f(:,30).^2; % MN4
#f(:,45) = f(:,44); % M4
#f(:,46) = f(:,44); % MS4
#f(:,47) = f(:,30).*f(:,37); % MK4
#f(:,48) = 1; % S4
#f(:,49) = 1; % S5
#f(:,50) = f(:,30).^3; % M6
#f(:,51) = 1; % S6
#f(:,52) = 1; % S7
#f(:,53) = 1; % S8
#%
#u=zeros(nT,53);
#u(:, 1) = 0; % Sa
#u(:, 2) = 0; % Ssa
#u(:, 3) = 0; % Mm
#u(:, 4) = 0; % MSf
#u(:, 5) = -23.7*sinn + 2.7*sin2n - 0.4*sin3n; % Mf
#u(:, 6) = atan(-(.203*sinn+.040*sin2n)./...
# (1+.203*cosn+.040*cos2n))/rad; % Mt
#u(:, 7) = 0; % alpha1
#u(:, 8) = atan(.189*sinn./(1.+.189*cosn))/rad; % 2Q1
#u(:, 9) = u(:,8); % sigma1
#u(:,10) = u(:,8); % q1
#u(:,11) = u(:,8); % rho1
#u(:,12) = 10.8*sinn - 1.3*sin2n + 0.2*sin3n; % O1
#u(:,13) = 0; % tau1
#u(:,14) = atan2(tmp2,tmp1)/rad; % M1
#u(:,15) = atan(-.221*sinn./(1.+.221*cosn))/rad; % chi1
#u(:,16) = 0; % pi1
#u(:,17) = 0; % P1
#u(:,18) = 0; % S1
#u(:,19) = atan((-.1554*sinn+.0029*sin2n)./...
# (1.+.1158*cosn-.0029*cos2n))/rad; % K1
#u(:,20) = 0; % psi1
#u(:,21) = 0; % phi1
#u(:,22) = 0; % theta1
#u(:,23) = atan(-.227*sinn./(1.+.169*cosn))/rad; % J1
#u(:,24) = atan(-(.640*sinn+.134*sin2n)./...
# (1.+.640*cosn+.134*cos2n))/rad; % OO1
#u(:,25) = atan((-.03731*sinn+.00052*sin2n)./ ...
# (1.-.03731*cosn+.00052*cos2n))/rad; % 2N2
#u(:,26) = u(:,25); % mu2
#u(:,27) = u(:,25); % N2
#u(:,28) = u(:,25); % nu2
#u(:,29) = 0; % M2a
#u(:,30) = u(:,25); % M2
#u(:,31) = 0; % M2b
#u(:,32) = 0; % lambda2
#u(:,33) = atan(-temp2./temp1)/rad ; % L2
#u(:,34) = 0; % T2
#u(:,35) = 0; % S2
#u(:,36) = 0; % R2
#u(:,37) = atan(-(.3108*sinn+.0324*sin2n)./ ...
# (1.+.2852*cosn+.0324*cos2n))/rad; % K2
#u(:,38) = atan(-.436*sinn./(1.+.436*cosn))/rad; % eta2
#u(:,39) = u(:,30)*2; % MNS2
#u(:,40) = u(:,30); % 2SM2
#u(:,41) = 1.5d0*u(:,30); % M3
#u(:,42) = u(:,30) + u(:,19); % MK3
#u(:,43) = 0; % S3
#u(:,44) = u(:,30)*2; % MN4
#u(:,45) = u(:,44); % M4
#u(:,46) = u(:,30); % MS4
#u(:,47) = u(:,30)+u(:,37); % MK4
#u(:,48) = 0; % S4
#u(:,49) = 0; % S5
#u(:,50) = u(:,30)*3; % M6
#u(:,51) = 0; % S6
#u(:,52) = 0; % S7
#u(:,53) = 0; % S8
###
# Testing
###
#grdfile = 'C:/Projects/GOMGalveston/DATA/Tides/DATA/grid_Mex'
#hfile = 'C:/Projects/GOMGalveston/DATA/Tides/DATA/h_Mex2010'
#uvfile = 'C:/Projects/GOMGalveston/DATA/Tides/DATA/UV_Mex2010'
#ic=6
#X, Y ,hz, mask = read_OTPS_grd(grdfile)
#plt.figure()
#plt.contourf(X,Y,hz,30)
#plt.colorbar()
#X,Y,h_re, h_im = read_OTPS_h(hfile,ic)
#plt.figure()
#plt.contourf(X,Y,h_re,30)
##plt.imshow(h_re)
#plt.colorbar()
#plt.show()
#X,Y,U_re, U_im, V_re, V_im = read_OTPS_UV(uvfile,ic)
#plt.figure()
#plt.contourf(X,Y,U_re,30)
##plt.imshow(h_re)
#plt.colorbar()
#plt.show()
# Inputs:
#modfile ='C:/Projects/GOMGalveston/DATA/Tides/Model_Mex'
#lon = np.array([-90.0,-91.0,-91.0])
#lat = np.array([27.0,28.0,28.5])
#z = None
#conlist = ['M2','S2','K1','O1','r2d2']
#time = othertime.TimeVector('20000101.0000','20000201.0000',3600.)
##
##tides = extract_HC(modfile,lon,lat)
#
#huv = tide_pred(modfile,lon,lat,time,z=None,conlist=None)
| mit |
r-mart/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
antgonza/qiime | scripts/identify_paired_differences.py | 15 | 9191 | #!/usr/bin/env python
# File created on 19 Jun 2013
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2013, The QIIME project"
__credits__ = ["Greg Caporaso", "Jose Carlos Clemente Litran"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from biom import load_table
from qiime.group import (
extract_per_individual_state_metadata_from_sample_metadata,
extract_per_individual_state_metadata_from_sample_metadata_and_biom)
from qiime.parse import parse_mapping_file_to_dict
from qiime.util import (parse_command_line_parameters,
make_option)
from qiime.filter import sample_ids_from_metadata_description
from qiime.stats import paired_difference_analyses
script_info = {}
script_info[
'brief_description'] = "Generate plots and stats to test for change in some data point(s) with a state change on a per-individual basis."
script_info[
'script_description'] = "This script provides a framework for paired-difference testing (i.e., analysis of data generated under a pre/post experimental design). In a pre/post experimental design, individuals are sampled before and after some 'treatment'. This code plots differences in values in the sample metadata (i.e., the mapping file) or observation counts in a BIOM table, and runs a (Bonferroni-corrected) one sample t-test on each sample metadata category or BIOM observation to determine if the mean of each distribution of pre/post differences differs from zero. If 'None' appears for the t score and p-values, this often means that the distribution of differences contained no variance, so the t-test could not be run. This can happen, for example, if the value passed for --valid_states is so restrictive that only a single sample is retained for analysis."
script_info['script_usage'] = []
script_info['script_usage'].append(
("Generate plots and stats for one category from the mapping file where the y-axis should be consistent across plots and the lines in the plots should be light blue.",
"",
"%prog -m map.txt --metadata_categories 'Streptococcus Abundance' --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o taxa_results --ymin 0 --ymax 60 --line_color '#eeefff'"))
script_info['script_usage'].append(
("Generate plots and stats for three categories from the mapping file.",
"",
"%prog -m map.txt --metadata_categories 'Streptococcus Abundance,Phylogenetic Diversity,Observed OTUs' --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o taxa_and_alpha_results"))
script_info['script_usage'].append(
("Generate plots for all observations in a biom file",
"",
"%prog -m map.txt -b otu_table.biom --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o otu_results"))
script_info['script_usage'].append(
("Generate plots for all observations in a biom file, but only including samples from individuals whose 'TreatmentResponse' was 'Improved' (as defined in the mapping file).",
"",
"%prog -m map.txt -b otu_table.biom --state_category TreatmentState --state_values Pre,Post --individual_id_category PersonalID -o otu_results_improved_only --valid_states TreatmentResponse:Improved"))
script_info[
'output_description'] = "The output of this script is plots of pre/post differences and associated statistics."
script_info['required_options'] = [
make_option(
'-m',
'--mapping_fp',
type="existing_filepath",
help='the input metadata map filepath'),
make_option(
'-o',
'--output_dir',
type="new_filepath",
help='directory where output files should be saved'),
make_option(
'-t',
'--state_category',
help='the mapping file column name to plot change over (usually has values like "pre-treatment" and "post-treatment")'),
make_option(
'-x',
'--state_values',
help='ordered list of state values to test change over (defines direction of graphs, generally something like "pre-treatment,post-treatment"). currently limited to two states.'),
make_option(
'-c',
'--individual_id_category',
help='the mapping file column name containing each individual\'s identifier (usually something like "personal_identifier")'),
]
script_info['optional_options'] = [
make_option(
'--ymin',
default=None,
type='float',
help='set the minimum y-value across plots [default: determined on a per-plot basis]'),
make_option(
'--ymax',
default=None,
type='float',
help='set the maximum y-value across plots [default: determined on a per-plot basis]'),
make_option(
'--metadata_categories',
help='ordered list of the mapping file column names to test for paired differences (usually something like "StreptococcusAbundance,Phylogenetic Diversity") [default: %default]',
default=None),
make_option(
'--observation_ids',
help='ordered list of the observation ids to test for paired differences if a biom table is provided (usually something like "otu1,otu2") [default: compute paired differences for all observation ids]',
default=None),
make_option(
'-b',
'--biom_table_fp',
help='path to biom table to use for computing paired differences [default: %default]',
type='existing_filepath',
default=None),
make_option(
'-s',
'--valid_states',
help="string describing samples that should be included based on their metadata (e.g. 'TreatmentResponse:Improved') [default: all samples are included in analysis]",
default=None),
make_option(
'--line_color',
help="color of lines in plots, useful if generating multiple plots in different runs of this script to overlay on top of one another. these can be specified as matplotlib color names, or as html hex strings [default: %default]",
default="black"),
]
script_info['version'] = __version__
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
mapping_fp = opts.mapping_fp
state_values = opts.state_values.split(',')
metadata_categories = opts.metadata_categories
state_category = opts.state_category
individual_id_category = opts.individual_id_category
output_dir = opts.output_dir
biom_table_fp = opts.biom_table_fp
observation_ids = opts.observation_ids
if not observation_ids is None:
observation_ids = observation_ids.split(',')
valid_states = opts.valid_states
ymin = opts.ymin
ymax = opts.ymax
line_color = opts.line_color
# validate the input - currently only supports either biom data
# or mapping file data. if useful in the future it shouldn't be too
# hard to allow the user to provide both.
if metadata_categories and biom_table_fp:
option_parser.error(
"Can only pass --metadata_categories or --biom_table_fp, not both.")
elif not (metadata_categories or biom_table_fp):
option_parser.error(
"Must pass either --metadata_categories or --biom_table_fp.")
else:
pass
# parse the mapping file to a dict
mapping_data = parse_mapping_file_to_dict(open(mapping_fp, 'U'))[0]
# currently only support for pre/post (ie, two-state) tests
if len(state_values) != 2:
option_parser.error(
"Exactly two state_values must be passed separated by a comma.")
# filter mapping_data, if requested
if valid_states:
sample_ids_to_keep = sample_ids_from_metadata_description(
open(mapping_fp, 'U'), valid_states)
for sid in mapping_data.keys():
if sid not in sample_ids_to_keep:
del mapping_data[sid]
if biom_table_fp:
biom_table = load_table(biom_table_fp)
analysis_categories = observation_ids or biom_table.ids(axis='observation')
personal_ids_to_state_values = \
extract_per_individual_state_metadata_from_sample_metadata_and_biom(
mapping_data,
biom_table,
state_category,
state_values,
individual_id_category,
observation_ids=analysis_categories)
else:
analysis_categories = metadata_categories.split(',')
personal_ids_to_state_values = \
extract_per_individual_state_metadata_from_sample_metadata(
mapping_data,
state_category,
state_values,
individual_id_category,
analysis_categories)
paired_difference_analyses(personal_ids_to_state_values,
analysis_categories,
state_values,
output_dir,
line_color=line_color,
ymin=ymin,
ymax=ymax)
if __name__ == "__main__":
main()
| gpl-2.0 |
EtienneCmb/brainpipe | brainpipe/preprocessing/reference.py | 1 | 9040 | """iEEG referencing."""
import logging
import numpy as np
import pandas as pd
from re import findall
from brainpipe.system.logging import set_log_level
logger = logging.getLogger('brainpipe')
def ieeg_referencing(data, channels, xyz=None, method='bipolar', sep='.',
ignore=None, verbose=None):
"""Rereferencing intracranial data.
Parameters
----------
data : array_like
Array of data of shape (n_channels, n_pts, n_trials)
channels : list
List of channels with a length of (n_channels).
xyz : array_like | None
Array of MNI/Talairach coordinates of shape (n_channels, 3)
method : {'bipolar', 'laplacian'}
Referencing method.
sep : string | '.'
Channel name separator (e.g "v1.124" will be considered as "v1" with
`sep='.'`)
ignore : list | None
List of channel index to ignore.
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either
PROFILER, DEBUG, INFO, WARNING, ERROR, or CRITICAL.
Returns
-------
data_b : array_like
Bipolarized data.
chan_b : array_like
Name of the bipolarized channels.
xyz_b : array_like
Bipolarized coordinates.
"""
methods = dict(bipolar=_ref_bipolar, laplacian=_ref_laplacian)
n_chan_data, n_pts, n_trials = data.shape
n_chan = len(channels)
set_log_level(verbose)
channels = np.asarray(channels)
# Checking :
assert isinstance(data, np.ndarray), "data should be an array"
assert data.ndim == 3, "data should be (n_channels, n_pts, n_trials)"
assert n_chan_data == n_chan, ("The number of channels along dimension 0 "
"should be %i" % (n_chan))
if ignore is not None:
msg = "ignore should either be a list, a tuple or an array of integers"
assert isinstance(ignore, (list, tuple, np.ndarray)), msg
assert len(ignore), msg
assert all([isinstance(k, int) for k in ignore]), msg
ignore = np.asarray(ignore)
consider = np.ones((n_chan,), dtype=bool)
assert method in methods, "method should be %s" % ', '.join(methods)
logger.info("Referencing %i channels using %s method" % (n_chan, method))
if not isinstance(xyz, np.ndarray) or (xyz.shape[0] != n_chan):
xyz = np.zeros((n_chan, 3))
logger.info(" No coordinates detected")
# Preprocess channel names by separating channel names / number:
chnames, chnums = [], []
for num, k in enumerate(channels):
# Remove spaces and separation :
channels[num] = k.strip().replace(' ', '').split(sep)[0]
# Get only the name / number :
if findall(r'\d+', k):
number = findall(r'\d+', k)[0]
chnums.append(int(number))
chnames.append(k.split(number)[0])
else:
chnums.append(-1)
chnames.append(k)
chnums, chnames = np.asarray(chnums), np.asarray(chnames)
# Find if some channels have to be ignored :
if isinstance(ignore, (tuple, list, np.ndarray)):
ignore = np.asarray(ignore)
consider[ignore] = False
consider[chnums == -1] = False
logger.info(' %i channels are going to be ignored (%s)' % (
(~consider).sum(), ', '.join(channels[~consider].tolist())))
# Get index to bipolarize :
_fcn = methods[method]
return _fcn(data, xyz, channels, chnames, chnums, consider)
def _ref_bipolar(data, xyz, channels, chnames, chnums, consider):
"""Referencing using bipolarization."""
idx = []
for num in range(len(channels)):
if not consider[num]:
continue
# Get the name of the current electrode and the needed one :
need_elec = str(chnames[num]) + str(int(chnums[num]) - 1)
is_present = channels == need_elec
if not any(is_present):
continue
# Find where is located the electrode :
idx_need = np.where(is_present)[0]
assert len(idx_need) == 1, ("Multiple channels have the same name "
"%s" % ', '.join(['%s (%i)' % (channels[k],
k) for k in idx_need])) # noqa
idx += [[num, idx_need[0]]]
logger.info(" Reference iEEG data using bipolarization")
chan_b = []
n_pts, n_trials = data.shape[1], data.shape[2]
data_b = np.zeros((len(idx), n_pts, n_trials), dtype=data.dtype)
xyz_b = np.zeros((len(idx), 3))
for k, i in enumerate(idx):
chan_b += ['%s - %s' % (channels[i[0]], channels[i[1]])]
data_b[k, ...] = data[i[0], ...] - data[i[1], ...]
xyz_b[k, ...] = np.c_[xyz[i[0], :], xyz[i[1], :]].mean(1)
return data_b, chan_b, xyz_b
def _ref_laplacian(data, xyz, channels, chnames, chnums, consider):
"""Referencing using laplacian."""
idx = []
for num in range(len(channels)):
if not consider[num]:
continue
# Get the name of the current electrode and the needed one :
need_elec_left = str(chnames[num]) + str(int(chnums[num]) - 1)
need_elec_right = str(chnames[num]) + str(int(chnums[num]) + 1)
is_present_left = channels == need_elec_left
is_present_right = channels == need_elec_right
if not any(is_present_left) and not any(is_present_right):
continue
# Find where are located left / right electrodes :
idx_need_left = np.where(is_present_left)[0]
idx_need_right = np.where(is_present_right)[0]
assert (len(idx_need_left) <= 1) and (len(idx_need_right) <= 1)
idx += [[num, np.r_[idx_need_left, idx_need_right].tolist()]]
logger.info(" Reference iEEG data using laplacian")
chan_b = []
n_pts, n_trials = data.shape[1], data.shape[2]
data_b = np.zeros((len(idx), n_pts, n_trials), dtype=data.dtype)
xyz_b = np.zeros((len(idx), 3))
for k, i in enumerate(idx):
chan_b += ['%s - m(%s)' % (channels[i[0]], ', '.join(channels[i[1]]))]
data_b[k, ...] = data[i[0], ...] - data[i[1], ...].mean(axis=0)
xyz_b[k, ...] = np.c_[xyz[i[0], :], xyz[i[1], :].mean(axis=0)].mean(1)
return data_b, chan_b, xyz_b
def contact_bipo_to_mono(contact):
"""Convert a list of bipolar contacts into unique monopolar sites.
Parameters
----------
contact : list
List of bipolar contact.
Returns
-------
contact_r : list
List of unsorted monopolar contacts.
"""
from textwrap import wrap
contact = [k.strip().replace(' ', '').replace('-', '') for k in contact]
_split = []
for k in contact:
_k = wrap(k, int(np.ceil(len(k) / 2)))
assert len(_k) == 2, "Wrong channel conversion %s" % str(_k)
_split += list(_k)
_split = np.ravel(_split)
c_unique = []
_ = [c_unique.append(k) for k in _split if k not in c_unique] # noqa
return c_unique
def contact_mono_to_bipo(contact, sep='-'):
"""Convert a list of monopolar contacts into bipolar contacts.
Parameters
----------
contact : list
List of monopolar contact.
sep : string | '-'
String separator between bipolar contact.
Returns
-------
contact_r : list
List of bipolar contacts.
"""
bip = []
for k in contact:
try:
letter = ''.join([i for i in k if not i.isdigit()])
number = int(findall(r'\d+', k)[0])
previous_contact = '%s%i' % (letter, number - 1)
if previous_contact in contact:
bip += ['%s%s%s' % (k, sep, previous_contact)]
except:
logger.info('%s is not an SEEG channel' % k)
return bip
def flat_bipolar_contact(contact):
"""Get a flatten version of bipolar contacts.
For example, "A'12 - A'11" -> "A'12A'11"
Parameters
----------
contact : list
List of contacts.
Returns
-------
contact_c : list
List of flattened contacts.
"""
repl = {' ': '', '-': ''}
for i, k in enumerate(contact):
for o, n in repl.items():
contact[i] = k.replace(o, n)
return contact
def clean_contact(contact):
"""Clean contact's name.
For example "A02 - A01" -> "A2-A1"
Parameters
----------
contact : list
List of contacts.
Returns
-------
contact_c : list
List of cleaned contacts.
"""
chan_repl = {'01': '1', '02': '2', '03': '3', '04': '4', '05': '5',
'06': '6', '07': '7', '08': '8', '09': '9', ' ': ''}
if not isinstance(contact, pd.Series):
contact = pd.Series(data=contact, name='contact')
contact.replace(chan_repl, regex=True, inplace=True)
# replace p -> ' (only if single letters)
for n_c, c in enumerate(contact):
if ("p" in c) and len(findall(r'[A-Za-z]+', c)[0]) > 1:
contact[n_c] = contact[n_c].replace('p', "'")
contact = contact.str.upper()
contact = contact.str.strip()
return list(contact)
| gpl-3.0 |
Statoil/sunbeam | examples/swofplt.py | 1 | 1614 | #!/usr/bin/env python
import sys
from os.path import isdir, join
import sunbeam
from datetime import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
def plotswof(ecl):
assert('SWOF' in ecl.table)
krw = ecl.table['SWOF', 'KRW']
krow = ecl.table['SWOF', 'KROW']
pcow = ecl.table['SWOF', 'PCOW']
swofl = [x/20.0 for x in range(21)]
krwl = [krw(x/20.0) for x in range(21)]
krowl = [krow(x/20.0) for x in range(21)]
pcowl = [pcow(x/20.0) for x in range(21)]
plt.figure(1)
plt.plot(swofl, krwl, label = 'KRW')
plt.plot(swofl, krowl, label = 'KROW')
plt.legend()
plt.show()
plt.figure(2)
plt.plot(swofl, pcowl, label = 'Water-oil capillary pressure')
plt.legend()
plt.show()
def opmdatadir():
global OPMDATA_DIR
if isdir(OPMDATA_DIR):
return OPMDATA_DIR
if len(sys.argv) < 2:
return None
d = sys.argv[1]
if isdir(d) and isdir(join(d, 'norne')):
return d
return None
def haveopmdata():
return opmdatadir() is not None
def parse(fname):
s = dt.now()
es = sunbeam.parse(fname, ('PARSE_RANDOM_SLASH', sunbeam.action.ignore))
e = dt.now()
print('Parsing took %s sec' % (e - s).seconds)
return es
def main():
es = parse(join(opmdatadir(), 'norne/NORNE_ATW2013.DATA'))
plotswof(es)
if __name__ == '__main__':
global OPMDATA_DIR
OPMDATA_DIR = '../../opm-data'
if haveopmdata():
print('Found norne, parsing ...')
main()
else:
print('Need to have path "%s" or give opm-data as argument' % OPMDATA_DIR)
| gpl-3.0 |
huzq/scikit-learn | sklearn/cluster/_birch.py | 3 | 24358 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics import pairwise_distances_argmin
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.validation import check_is_fitted, _deprecate_positional_args
from ..exceptions import ConvergenceWarning
from . import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in range(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold=threshold, branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold=threshold, branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[(farthest_idx,)]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode:
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : list
List of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray of shape (branching_factor + 1, n_features)
Manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray of shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray of shape (branching_factor + 1, n_features)
View of ``init_centroids_``.
squared_norm_ : ndarray of shape (branching_factor + 1,)
View of ``init_sq_norm_``.
"""
def __init__(self, *, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster:
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray of shape (n_features,), default=None
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray of shape (branching_factor + 1, n_features)
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray of shape (branching_factor + 1,)
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, *, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.centroid_ = self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(ClusterMixin, TransformerMixin, BaseEstimator):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
.. versionadded:: 0.16
Parameters
----------
threshold : float, default=0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default=50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default=3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- :mod:`sklearn.cluster` Estimator : If a model is provided, the model
is fit treating the subclusters as new samples and the initial data
is mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default=True
Whether or not to compute labels for each fit.
copy : bool, default=True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray of shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
See Also
--------
MiniBatchKMeans
Alternative implementation that does incremental updates
of the centers' positions using mini-batches.
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(n_clusters=None)
>>> brc.fit(X)
Birch(n_clusters=None)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
"""
@_deprecate_positional_args
def __init__(self, *, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = self._validate_data(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold=threshold,
branching_factor=branching_factor,
is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold=threshold,
branching_factor=branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold=threshold,
branching_factor=branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : list of shape (n_leaves,)
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), \
default=None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
check_is_fitted(self)
if (hasattr(self, 'subcluster_centers_') and
X.shape[1] != self.subcluster_centers_.shape[1]):
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray of shape(n_samples,)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
kwargs = {'Y_norm_squared': self._subcluster_norms}
return self.subcluster_labels_[
pairwise_distances_argmin(X,
self.subcluster_centers_,
metric_kwargs=kwargs)
]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self)
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, numbers.Integral):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters), ConvergenceWarning)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
python-acoustics/python-acoustics | tests/test_imaging.py | 2 | 1114 | import numpy as np
import pytest
has_matplotlib = pytest.importorskip("matplotlib")
if has_matplotlib:
from acoustics.bands import octave, third
from acoustics.imaging import plot_octave, plot_third, plot_bands
def setup_module(imaging):
imaging.octaves = octave(16, 16000)
imaging.thirds = third(63, 8000)
imaging.tl_oct = np.array([3, 4, 5, 12, 15, 24, 28, 23, 35, 45, 55])
imaging.tl_third = np.array([0, 0, 0, 1, 1, 2, 3, 5, 8, 13, 21, 32, 41, 47, 46, 44, 58, 77, 61, 75, 56, 54])
imaging.title = 'Title'
imaging.label = 'Label'
def test_plot_octave():
plot_octave(tl_oct, octaves)
def test_plot_octave_kHz():
plot_octave(tl_oct, octaves, kHz=True, xlabel=label, ylabel=label, title=title, separator='.')
def test_plot_third_octave():
plot_third(tl_third, thirds, marker='s', separator=',')
def test_plot_third_octave_kHz():
plot_third(tl_third, thirds, marker='s', kHz=True, xlabel=label, ylabel=label, title=title)
def test_plot_band_oct():
plot_bands(tl_oct, octaves, axes=None, band_type='octave')
def teardown_module(imaging):
pass
| bsd-3-clause |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/tests/series/methods/test_convert_dtypes.py | 2 | 5852 | from itertools import product
import numpy as np
import pytest
from pandas.core.dtypes.common import is_interval_dtype
import pandas as pd
import pandas._testing as tm
# Each test case consists of a tuple with the data and dtype to create the
# test Series, the default dtype for the expected result (which is valid
# for most cases), and the specific cases where the result deviates from
# this default. Those overrides are defined as a dict with (keyword, val) as
# dictionary key. In case of multiple items, the last override takes precendence.
test_cases = [
(
# data
[1, 2, 3],
# original dtype
np.dtype("int32"),
# default expected dtype
"Int32",
# exceptions on expected dtype
{("convert_integer", False): np.dtype("int32")},
),
(
[1, 2, 3],
np.dtype("int64"),
"Int64",
{("convert_integer", False): np.dtype("int64")},
),
(
["x", "y", "z"],
np.dtype("O"),
pd.StringDtype(),
{("convert_string", False): np.dtype("O")},
),
(
[True, False, np.nan],
np.dtype("O"),
pd.BooleanDtype(),
{("convert_boolean", False): np.dtype("O")},
),
(
["h", "i", np.nan],
np.dtype("O"),
pd.StringDtype(),
{("convert_string", False): np.dtype("O")},
),
( # GH32117
["h", "i", 1],
np.dtype("O"),
np.dtype("O"),
{},
),
(
[10, np.nan, 20],
np.dtype("float"),
"Int64",
{
("convert_integer", False, "convert_floating", True): "Float64",
("convert_integer", False, "convert_floating", False): np.dtype("float"),
},
),
(
[np.nan, 100.5, 200],
np.dtype("float"),
"Float64",
{("convert_floating", False): np.dtype("float")},
),
(
[3, 4, 5],
"Int8",
"Int8",
{},
),
(
[[1, 2], [3, 4], [5]],
None,
np.dtype("O"),
{},
),
(
[4, 5, 6],
np.dtype("uint32"),
"UInt32",
{("convert_integer", False): np.dtype("uint32")},
),
(
[-10, 12, 13],
np.dtype("i1"),
"Int8",
{("convert_integer", False): np.dtype("i1")},
),
(
[1.2, 1.3],
np.dtype("float32"),
"Float32",
{("convert_floating", False): np.dtype("float32")},
),
(
[1, 2.0],
object,
"Int64",
{
("convert_integer", False): "Float64",
("convert_integer", False, "convert_floating", False): np.dtype("float"),
("infer_objects", False): np.dtype("object"),
},
),
(
[1, 2.5],
object,
"Float64",
{
("convert_floating", False): np.dtype("float"),
("infer_objects", False): np.dtype("object"),
},
),
(["a", "b"], pd.CategoricalDtype(), pd.CategoricalDtype(), {}),
(
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
pd.DatetimeTZDtype(tz="UTC"),
pd.DatetimeTZDtype(tz="UTC"),
{},
),
(
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
"datetime64[ns]",
np.dtype("datetime64[ns]"),
{},
),
(
pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]),
object,
np.dtype("datetime64[ns]"),
{("infer_objects", False): np.dtype("object")},
),
(pd.period_range("1/1/2011", freq="M", periods=3), None, pd.PeriodDtype("M"), {}),
(
pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]),
None,
pd.IntervalDtype("int64"),
{},
),
]
class TestSeriesConvertDtypes:
@pytest.mark.parametrize(
"data, maindtype, expected_default, expected_other",
test_cases,
)
@pytest.mark.parametrize("params", product(*[(True, False)] * 5))
def test_convert_dtypes(
self, data, maindtype, params, expected_default, expected_other
):
if maindtype is not None:
series = pd.Series(data, dtype=maindtype)
else:
series = pd.Series(data)
result = series.convert_dtypes(*params)
param_names = [
"infer_objects",
"convert_string",
"convert_integer",
"convert_boolean",
"convert_floating",
]
params_dict = dict(zip(param_names, params))
expected_dtype = expected_default
for spec, dtype in expected_other.items():
if all(params_dict[key] is val for key, val in zip(spec[::2], spec[1::2])):
expected_dtype = dtype
expected = pd.Series(data, dtype=expected_dtype)
tm.assert_series_equal(result, expected)
# Test that it is a copy
copy = series.copy(deep=True)
if is_interval_dtype(result.dtype) and result.dtype.subtype.kind in ["i", "u"]:
msg = "Cannot set float NaN to integer-backed IntervalArray"
with pytest.raises(ValueError, match=msg):
result[result.notna()] = np.nan
else:
result[result.notna()] = np.nan
# Make sure original not changed
tm.assert_series_equal(series, copy)
def test_convert_string_dtype(self):
# https://github.com/pandas-dev/pandas/issues/31731 -> converting columns
# that are already string dtype
df = pd.DataFrame(
{"A": ["a", "b", pd.NA], "B": ["ä", "ö", "ü"]}, dtype="string"
)
result = df.convert_dtypes()
tm.assert_frame_equal(df, result)
def test_convert_bool_dtype(self):
# GH32287
df = pd.DataFrame({"A": pd.array([True])})
tm.assert_frame_equal(df, df.convert_dtypes())
| gpl-2.0 |
ltiao/scikit-learn | sklearn/datasets/species_distributions.py | 64 | 7917 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| bsd-3-clause |
louispotok/pandas | pandas/tests/plotting/test_datetimelike.py | 3 | 55675 | """ Test cases for time series specific (freq conversion, etc) """
from datetime import datetime, timedelta, date, time
import pickle
import pytest
from pandas.compat import lrange, zip
import numpy as np
from pandas import Index, Series, DataFrame, NaT
from pandas.compat import PY3
from pandas.core.indexes.datetimes import date_range, bdate_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.tseries.offsets import DateOffset
from pandas.core.indexes.period import period_range, Period, PeriodIndex
from pandas.core.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tests.plotting.common import (TestPlotBase,
_skip_if_no_scipy_gaussian_kde)
@td.skip_if_no_mpl
class TestTSPlot(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def teardown_method(self, method):
tm.close()
@pytest.mark.slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
df = DataFrame(np.random.randn(10, 9), index=range(10))
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
assert label.get_fontsize() == 2
@pytest.mark.slow
def test_frame_inferred(self):
# inferred freq
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_is_error_nozeroindex(self):
# GH11858
i = np.array([1, 2, 3])
a = DataFrame(i, index=i)
_check_plot_works(a.plot, xerr=a)
_check_plot_works(a.plot, yerr=a)
def test_nonnumeric_exclude(self):
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}, idx)
fig, ax = self.plt.subplots()
df.plot(ax=ax) # it works
assert len(ax.get_lines()) == 1 # B was plotted
self.plt.close(fig)
pytest.raises(TypeError, df['A'].plot)
def test_tsplot_deprecated(self):
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
ts = tm.makeTimeSeries()
with tm.assert_produces_warning(FutureWarning):
tsplot(ts, self.plt.Axes.plot, ax=ax)
@pytest.mark.slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
ts = tm.makeTimeSeries()
def f(*args, **kwds):
with tm.assert_produces_warning(FutureWarning):
return tsplot(s, self.plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
_, ax = self.plt.subplots()
ts.plot(style='k', ax=ax)
color = (0., 0., 0., 1) if self.mpl_ge_2_0_0 else (0., 0., 0.)
assert color == ax.get_lines()[0].get_color()
def test_both_style_and_color(self):
ts = tm.makeTimeSeries()
pytest.raises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
pytest.raises(ValueError, s.plot, style='b-', color='#000099')
@pytest.mark.slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
_, ax = self.plt.subplots()
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot, ax=ax)
def test_get_datevalue(self):
from pandas.plotting._converter import get_datevalue
assert get_datevalue(None, 'D') is None
assert get_datevalue(1987, 'A') == 1987
assert (get_datevalue(Period(1987, 'A'), 'M') ==
Period('1987-12', 'M').ordinal)
assert (get_datevalue('1/1/1987', 'D') ==
Period('1987-1-1', 'D').ordinal)
@pytest.mark.slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
assert expected_string == ax.format_coord(first_x, first_y)
except (ValueError):
pytest.skip("skipping test because issue forming "
"test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3,
freq='A-DEC'))
_, ax = self.plt.subplots()
annual.plot(ax=ax)
check_format_of_first_point(ax, 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and
# changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
daily.plot(ax=ax)
check_format_of_first_point(ax,
't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
with tm.assert_produces_warning(FutureWarning):
tsplot(annual, self.plt.Axes.plot, ax=ax)
check_format_of_first_point(ax, 't = 2014 y = 1.000000')
with tm.assert_produces_warning(FutureWarning):
tsplot(daily, self.plt.Axes.plot, ax=ax)
check_format_of_first_point(ax, 't = 2014-01-01 y = 1.000000')
@pytest.mark.slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@pytest.mark.slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@pytest.mark.slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@pytest.mark.slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@pytest.mark.slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
_, ax = self.plt.subplots()
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ts.plot(ax=ax)
assert not hasattr(ax, 'freq')
@pytest.mark.slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@pytest.mark.slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(
2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@pytest.mark.slow
def test_uhf(self):
import pandas.plotting._converter as conv
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
assert xp == rs
@pytest.mark.slow
def test_irreg_hf(self):
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.iloc[[0, 1, 3, 4]]
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()
_, ax = self.plt.subplots()
df2 = df.copy()
df2.index = df.index.astype(object)
df2.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
assert (np.fabs(diffs[1:] - sec) < 1e-8).all()
def test_irregular_datetime64_repr_bug(self):
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
_, ax = self.plt.subplots()
ret = ser.plot(ax=ax)
assert ret is not None
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
assert rs == xp
def test_business_freq(self):
bts = tm.makePeriodSeries()
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == 'B'
@pytest.mark.slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == 'M'
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') + timedelta(
minutes=30))
df = DataFrame(np.arange(24), index=idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
rs = ax.get_lines()[0].get_xdata()
assert not Index(rs).is_normalized
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
_, ax = self.plt.subplots()
bts.plot(ax=ax)
idx = ax.get_lines()[0].get_xdata()
tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))
@pytest.mark.slow
def test_axis_limits(self):
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
assert result[0] == xlim[0] - 5
assert result[1] == xlim[1] + 10
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
# datetime
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
fig = ax.get_figure()
self.plt.close(fig)
ser = tm.makeTimeSeries()
_, ax = self.plt.subplots()
ser.plot(ax=ax)
_test(ax)
_, ax = self.plt.subplots()
df = DataFrame({'a': ser, 'b': ser + 1})
df.plot(ax=ax)
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.plotting._converter as conv
assert conv.get_finder('B') == conv._daily_finder
assert conv.get_finder('D') == conv._daily_finder
assert conv.get_finder('M') == conv._monthly_finder
assert conv.get_finder('Q') == conv._quarterly_finder
assert conv.get_finder('A') == conv._annual_finder
assert conv.get_finder('W') == conv._daily_finder
@pytest.mark.slow
def test_finder_daily(self):
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
if self.mpl_ge_2_0_0:
xpl1 = [7565, 7564, 7553, 7546, 7518, 7428, 7066]
xpl2 = [7566, 7564, 7554, 7546, 7519, 7429, 7066]
else:
xpl1 = xpl2 = [Period('1999-1-1', freq='B').ordinal] * len(day_lst)
for i, n in enumerate(day_lst):
xp = xpl1[i]
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
xp = xpl2[i]
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
self.plt.close(ax.get_figure())
@pytest.mark.slow
def test_finder_quarterly(self):
yrs = [3.5, 11]
if self.mpl_ge_2_0_0:
xpl1 = [68, 68]
xpl2 = [72, 68]
else:
xpl1 = xpl2 = [Period('1988Q1').ordinal] * len(yrs)
for i, n in enumerate(yrs):
xp = xpl1[i]
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert rs == xp
xp = xpl2[i]
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
self.plt.close(ax.get_figure())
@pytest.mark.slow
def test_finder_monthly(self):
yrs = [1.15, 2.5, 4, 11]
if self.mpl_ge_2_0_0:
xpl1 = [216, 216, 204, 204]
xpl2 = [216, 216, 216, 204]
else:
xpl1 = xpl2 = [Period('Jan 1988').ordinal] * len(yrs)
for i, n in enumerate(yrs):
xp = xpl1[i]
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert rs == xp
xp = xpl2[i]
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
self.plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
assert rs == xp
@pytest.mark.slow
def test_finder_annual(self):
if self.mpl_ge_2_0_0:
xp = [1986, 1986, 1990, 1990, 1995, 2020, 1970, 1970]
else:
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert rs == Period(xp[i], freq='A').ordinal
self.plt.close(ax.get_figure())
@pytest.mark.slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
if self.mpl_ge_2_0_0:
xp = Period('1998-12-29 12:00', freq='Min').ordinal
else:
xp = Period('1/1/1999', freq='Min').ordinal
assert rs == xp
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
if self.mpl_ge_2_0_0:
xp = Period('1998-12-31 22:00', freq='H').ordinal
else:
xp = Period('1/1/1999', freq='H').ordinal
assert rs == xp
@td.skip_if_mpl_1_5
@pytest.mark.slow
def test_gaps(self):
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
_, ax = self.plt.subplots()
ts.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
self.plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
self.plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
_, ax = self.plt.subplots()
ser.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
@td.skip_if_mpl_1_5
@pytest.mark.slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
_, ax = self.plt.subplots()
low.plot(ax=ax)
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
assert len(lines) == 1
assert len(ax.right_ax.get_lines()) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
@pytest.mark.slow
def test_secondary_y(self):
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == 'right'
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert (ax2.get_yaxis().get_ticks_position() ==
self.default_tick_position)
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
assert not hasattr(ax, 'left_ax')
assert hasattr(ax, 'right_ax')
assert hasattr(ax2, 'left_ax')
assert not hasattr(ax2, 'right_ax')
@pytest.mark.slow
def test_secondary_y_ts(self):
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == 'right'
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert (ax2.get_yaxis().get_ticks_position() ==
self.default_tick_position)
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_secondary_kde(self):
if not self.mpl_ge_1_5_0:
pytest.skip("mpl is not supported")
_skip_if_no_scipy_gaussian_kde()
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ax = ser.plot(secondary_y=True, kind='density', ax=ax)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == 'right'
@pytest.mark.slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ser.plot(secondary_y=True, kind='bar', ax=ax)
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == 'right'
@pytest.mark.slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == 'right'
assert (axes[1].get_yaxis().get_ticks_position() ==
self.default_tick_position)
assert axes[2].get_yaxis().get_ticks_position() == 'right'
@pytest.mark.slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == 'right'
assert (axes[1].get_yaxis().get_ticks_position() ==
self.default_tick_position)
assert axes[2].get_yaxis().get_ticks_position() == 'right'
def test_mixed_freq_regular_first(self):
# TODO
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
# it works!
_, ax = self.plt.subplots()
s1.plot(ax=ax)
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
tm.assert_index_equal(idx1, s1.index.to_period('B'))
tm.assert_index_equal(idx2, s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left <= pidx[0].ordinal
assert right >= pidx[-1].ordinal
@pytest.mark.slow
def test_mixed_freq_irregular_first(self):
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
_, ax = self.plt.subplots()
s2.plot(style='g', ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s1.plot(ax=ax)
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
assert idx1.equals(s1.index.to_period('B'))
assert idx2.equals(s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left <= pidx[0].ordinal
assert right >= pidx[-1].ordinal
@pytest.mark.slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s2.plot(style='g', ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'D'
@pytest.mark.slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
ts2.plot(style='r', ax=ax)
assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0]
@pytest.mark.slow
def test_mixed_freq_lf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(legend=True, ax=ax)
high.plot(legend=True, ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'D'
leg = ax.get_legend()
assert len(leg.texts) == 2
self.plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'T'
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
ps.plot(ax=ax)
def test_mixed_freq_shared_ax(self):
# GH13341, using sharex=True
idx1 = date_range('2015-01-01', periods=3, freq='M')
idx2 = idx1[:1].union(idx1[2:])
s1 = Series(range(len(idx1)), idx1)
s2 = Series(range(len(idx2)), idx2)
fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert ax1.freq == 'M'
assert ax2.freq == 'M'
assert (ax1.lines[0].get_xydata()[0, 0] ==
ax2.lines[0].get_xydata()[0, 0])
# using twinx
fig, ax1 = self.plt.subplots()
ax2 = ax1.twinx()
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert (ax1.lines[0].get_xydata()[0, 0] ==
ax2.lines[0].get_xydata()[0, 0])
# TODO (GH14330, GH14322)
# plotting the irregular first does not yet work
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# s2.plot(ax=ax1)
# s1.plot(ax=ax2)
# assert (ax1.lines[0].get_xydata()[0, 0] ==
# ax2.lines[0].get_xydata()[0, 0])
def test_nat_handling(self):
_, ax = self.plt.subplots()
dti = DatetimeIndex(['2015-01-01', NaT, '2015-01-03'])
s = Series(range(len(dti)), dti)
s.plot(ax=ax)
xdata = ax.get_lines()[0].get_xdata()
# plot x data is bounded by index values
assert s.index.min() <= Series(xdata).min()
assert Series(xdata).max() <= s.index.max()
@pytest.mark.slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
_, ax = self.plt.subplots()
from pandas.tseries.plotting import tsplot
with tm.assert_produces_warning(FutureWarning):
tsplot(high, self.plt.Axes.plot, ax=ax)
with tm.assert_produces_warning(FutureWarning):
lines = tsplot(low, self.plt.Axes.plot, ax=ax)
for l in lines:
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
@pytest.mark.slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
expected_h = idxh.to_period().asi8.astype(np.float64)
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544,
1549, 1553, 1558, 1562], dtype=np.float64)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
else:
tm.assert_numpy_array_equal(xdata, expected_h)
tm.close()
_, ax = self.plt.subplots()
from pandas.tseries.plotting import tsplot
with tm.assert_produces_warning(FutureWarning):
tsplot(low, self.plt.Axes.plot, ax=ax)
with tm.assert_produces_warning(FutureWarning):
lines = tsplot(high, self.plt.Axes.plot, ax=ax)
for l in lines:
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
else:
tm.assert_numpy_array_equal(xdata, expected_h)
@pytest.mark.slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
_, ax = self.plt.subplots()
low.plot(kind=kind1, stacked=True, ax=ax)
high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
assert PeriodIndex(l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
# check stacked values are correct
expected_y += low[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
_, ax = self.plt.subplots()
high.plot(kind=kind1, stacked=True, ax=ax)
low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += low[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
@pytest.mark.slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
assert len(ax.get_lines()) == 2
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'L'
tm.close()
# low to high
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
assert len(ax.get_lines()) == 2
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'L'
@pytest.mark.slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].astype(object)
df = DataFrame(np.random.randn(len(idx), 3), idx)
_, ax = self.plt.subplots()
_check_plot_works(df.plot, ax=ax)
@pytest.mark.xfail(not PY3, reason="failing on mpl 1.4.3 on PY2")
@pytest.mark.slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
fig, ax = self.plt.subplots()
df.plot(ax=ax)
# verify tick labels
fig.canvas.draw()
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if s != 0:
xp = time(h, m, s).strftime('%H:%M:%S')
else:
xp = time(h, m, s).strftime('%H:%M')
assert xp == rs
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
fig.canvas.draw()
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if s != 0:
xp = time(h, m, s).strftime('%H:%M:%S')
else:
xp = time(h, m, s).strftime('%H:%M')
assert xp == rs
@pytest.mark.slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
fig, ax = self.plt.subplots()
ax = df.plot(ax=ax)
# verify tick labels
fig.canvas.draw()
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
us = int(round((t - int(t)) * 1e6))
h, m = divmod(m, 60)
rs = l.get_text()
if len(rs) > 0:
if (us % 1000) != 0:
xp = time(h, m, s, us).strftime('%H:%M:%S.%f')
elif (us // 1000) != 0:
xp = time(h, m, s, us).strftime('%H:%M:%S.%f')[:-3]
elif s != 0:
xp = time(h, m, s, us).strftime('%H:%M:%S')
else:
xp = time(h, m, s, us).strftime('%H:%M')
assert xp == rs
@pytest.mark.slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
ax = high.plot(secondary_y=True, ax=ax)
for l in ax.get_lines():
assert PeriodIndex(l.get_xdata()).freq == 'D'
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
for l in ax.left_ax.get_lines():
assert PeriodIndex(l.get_xdata()).freq == 'D'
@pytest.mark.slow
def test_secondary_legend(self):
fig = self.plt.figure()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
df.plot(secondary_y=['A', 'B'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == 'A (right)'
assert leg.get_texts()[1].get_text() == 'B (right)'
assert leg.get_texts()[2].get_text() == 'C'
assert leg.get_texts()[3].get_text() == 'D'
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df.plot(secondary_y=['A', 'C'], mark_right=False, ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == 'A'
assert leg.get_texts()[1].get_text() == 'B'
assert leg.get_texts()[2].get_text() == 'C'
assert leg.get_texts()[3].get_text() == 'D'
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind='bar', secondary_y=['A'], ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == 'A (right)'
assert leg.get_texts()[1].get_text() == 'B'
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind='bar', secondary_y=['A'], mark_right=False, ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == 'A'
assert leg.get_texts()[1].get_text() == 'B'
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
# non-ts
df = tm.makeDataFrame()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
assert l.get_rotation() == 30
@pytest.mark.slow
def test_ax_plot(self):
x = DatetimeIndex(start='2012-01-02', periods=10, freq='D')
y = lrange(len(x))
_, ax = self.plt.subplots()
lines = ax.plot(x, y, label='Y')
tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)
@pytest.mark.slow
def test_mpl_nopandas(self):
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
_, ax = self.plt.subplots()
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp)
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)
@pytest.mark.slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
assert left <= ts_irregular.index.min().toordinal()
assert right >= ts_irregular.index.max().toordinal()
@pytest.mark.slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before >= left_after
assert right_before < right_after
@pytest.mark.slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before >= left_after
assert right_before < right_after
@pytest.mark.slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
_, ax = self.plt.subplots()
ts.plot(ax=ax)
left_before, right_before = ax.get_xlim()
ts.resample('D').mean().plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
assert left_before == left_after
assert right_before == right_after
@pytest.mark.slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
assert left <= ts_irregular.index.min().toordinal()
assert right >= ts_irregular.index.max().toordinal()
def test_plot_outofbounds_datetime(self):
# 2579 - checking this does not raise
values = [date(1677, 1, 1), date(1677, 1, 2)]
_, ax = self.plt.subplots()
ax.plot(values)
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
ax.plot(values)
def test_format_timedelta_ticks_narrow(self):
if self.mpl_ge_2_2_0:
expected_labels = (['-1 days 23:59:59.999999998'] +
['00:00:00.0000000{:0>2d}'.format(2 * i)
for i in range(6)])
elif self.mpl_ge_2_0_0:
expected_labels = [''] + [
'00:00:00.00000000{:d}'.format(2 * i)
for i in range(5)] + ['']
else:
expected_labels = [
'00:00:00.00000000{:d}'.format(i)
for i in range(10)]
rng = timedelta_range('0', periods=10, freq='ns')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
fig.canvas.draw()
labels = ax.get_xticklabels()
assert len(labels) == len(expected_labels)
for l, l_expected in zip(labels, expected_labels):
assert l.get_text() == l_expected
def test_format_timedelta_ticks_wide(self):
if self.mpl_ge_2_0_0:
expected_labels = [
'',
'00:00:00',
'1 days 03:46:40',
'2 days 07:33:20',
'3 days 11:20:00',
'4 days 15:06:40',
'5 days 18:53:20',
'6 days 22:40:00',
'8 days 02:26:40',
'9 days 06:13:20',
''
]
if self.mpl_ge_2_2_0:
expected_labels[0] = '-2 days 20:13:20'
expected_labels[-1] = '10 days 10:00:00'
else:
expected_labels = [
'00:00:00',
'1 days 03:46:40',
'2 days 07:33:20',
'3 days 11:20:00',
'4 days 15:06:40',
'5 days 18:53:20',
'6 days 22:40:00',
'8 days 02:26:40',
''
]
rng = timedelta_range('0', periods=10, freq='1 d')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
ax = df.plot(fontsize=2, ax=ax)
fig.canvas.draw()
labels = ax.get_xticklabels()
assert len(labels) == len(expected_labels)
for l, l_expected in zip(labels, expected_labels):
assert l.get_text() == l_expected
def test_timedelta_plot(self):
# test issue #8711
s = Series(range(5), timedelta_range('1day', periods=5))
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test long period
index = timedelta_range('1 day 2 hr 30 min 10 s',
periods=10, freq='1 d')
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test short period
index = timedelta_range('1 day 2 hr 30 min 10 s',
periods=10, freq='1 ns')
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
def test_hist(self):
# https://github.com/matplotlib/matplotlib/issues/8459
rng = date_range('1/1/2011', periods=10, freq='H')
x = rng
w1 = np.arange(0, 1, .1)
w2 = np.arange(0, 1, .1)[::-1]
_, ax = self.plt.subplots()
ax.hist([x, x], weights=[w1, w2])
@pytest.mark.slow
def test_overlapping_datetime(self):
# GB 6608
s1 = Series([1, 2, 3], index=[datetime(1995, 12, 31),
datetime(2000, 12, 31),
datetime(2005, 12, 31)])
s2 = Series([1, 2, 3], index=[datetime(1997, 12, 31),
datetime(2003, 12, 31),
datetime(2008, 12, 31)])
# plot first series, then add the second series to those axes,
# then try adding the first series again
_, ax = self.plt.subplots()
s1.plot(ax=ax)
s2.plot(ax=ax)
s1.plot(ax=ax)
@pytest.mark.xfail(reason="GH9053 matplotlib does not use"
" ax.xaxis.converter")
def test_add_matplotlib_datetime64(self):
# GH9053 - ensure that a plot with PeriodConverter still understands
# datetime64 data. This still fails because matplotlib overrides the
# ax.xaxis.converter with a DatetimeConverter
s = Series(np.random.randn(10),
index=date_range('1970-01-02', periods=10))
ax = s.plot()
ax.plot(s.index, s.values, color='g')
l1, l2 = ax.lines
tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata())
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
# GH18439
# this is supported only in Python 3 pickle since
# pickle in Python2 doesn't support instancemethod pickling
if PY3:
with ensure_clean(return_filelike=True) as path:
pickle.dump(fig, path)
finally:
plt.close(fig)
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/numpy/lib/twodim_base.py | 13 | 25584 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| apache-2.0 |
tobyriddell/self-driving-car | building_block_drive_car.py | 1 | 2566 | #!/usr/bin/python
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import explorerhat
import datetime
import sys, tty, termios
from sklearn.externals import joblib
import numpy as np
import image_operations
# Load the model
clf = joblib.load('model.pkl')
#scaler = joblib.load('scaler.pkl')
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
camera.vflip = True
#camera.color_effects = (128,128) # turn camera to black and white
rawCapture = PiRGBArray(camera, size=(640, 480))
# allow the camera to warmup
time.sleep(0.1)
def turn_left():
explorerhat.motor.two.forward(100)
time.sleep(0.5)
explorerhat.motor.two.stop()
def turn_right():
explorerhat.motor.one.forward(100)
time.sleep(0.5)
explorerhat.motor.one.stop()
def move_forward():
explorerhat.motor.forwards()
time.sleep(0.5)
explorerhat.motor.stop()
def do_driving(result):
if result == "left":
turn_left()
elif result == "right":
turn_right()
elif result == "forward":
move_forward()
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
# Crop, blur, select blue channel, and binarise the image
processed_image = image_operations.process_image(image)
processed_image = cv2.resize(processed_image, (24, 24))
#processed_image = image
#processed_image = processed_image[240:480, 0:640]
#processed_image = cv2.blur(processed_image, (5, 5))
#b,g,r = cv2.split(processed_image)
#processed_image = b
#retval, processed_image = cv2.threshold(processed_image, 140, 255, cv2.THRESH_BINARY)
image_as_array = np.ndarray.flatten(np.array(processed_image))
result = clf.predict([image_as_array])[0]
do_driving(result)
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# fd = sys.stdin.fileno()
# old_settings = termios.tcgetattr(fd)
# try:
# tty.setraw(sys.stdin.fileno())
# key = sys.stdin.read(1)
# finally:
# termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
#
# print("Read key %s" % key)
#
# if key == "q": # if the `q` key was pressed, break from the loop
# break
#
# time.sleep(0.5) # Let the car settle to prevent blurring
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| mit |
Achuth17/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
gtesei/fast-furious | competitions/microsoft-malware-prediction/base_lightGBM2.py | 1 | 22043 | # -*- Coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import re
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
import pandas as pd
import numpy as np
import lightgbm as lgb
#import xgboost as xgb
from scipy.sparse import vstack, csr_matrix, save_npz, load_npz
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import StratifiedKFold
#from sklearn.metrics import roc_auc_score
import gc
import sys
gc.enable()
### FUNC ########################################################################
def get_xgb_imp(xgb, feat_names):
from numpy import array
imp_vals = xgb.booster().get_fscore()
imp_dict = {feat_names[i]:float(imp_vals.get('f'+str(i),0.)) for i in range(len(feat_names))}
total = array(imp_dict.values()).sum()
return {k:v/total for k,v in imp_dict.items()}
def xgb_feat_importance(model,cols,file_name):
print('-----> Feature importance ... ')
feature_importance_dict = model.get_fscore()
fs = ['f%i' % i for i in range(len(cols))]
f1 = pd.DataFrame({'f': list(feature_importance_dict.keys()), 'importance': list(feature_importance_dict.values())})
f2 = pd.DataFrame({'f': fs, 'feature_name': cols})
feature_importance = pd.merge(f1, f2, how='right', on='f')
feature_importance = feature_importance.fillna(0)
feature_importance.sort_values(by='importance', ascending=False)
print(feature_importance.sort_values)
feature_importance.to_csv(file_name, index=False)
def add_avg_per(df,what_to_avg,on,new_name,include_delta=True,include_perc=True):
if type(on) == str:
_full = [on,what_to_avg]
_fulla = [on,new_name]
elif type(on) == list:
_full = on.copy()
_full.append(what_to_avg)
_fulla = on.copy()
_fulla.append(new_name)
else:
raise Exception('what type is on!')
_avg = df.groupby(on)[_full].mean()
_avg.columns = _fulla
prev_len = len(df)
df = df.merge(_avg,how='inner' , on=on)
assert len(df) == prev_len
if include_delta:
df[str(new_name+'_delta')] = df[what_to_avg] - df[new_name]
if include_perc:
df[str(new_name+'_perc')] = (df[what_to_avg] - df[new_name])/df[new_name]
return df
def encode_dataset(train,test,meta,target_model='lgb'):
y_train = train[meta['target']]
train = train.drop([meta['target']],axis=1)
assert train.shape[1] == test.shape[1]
for i in range(train.shape[1]):
assert train.columns[i] == test.columns[i]
train_obs = len(train)
#
for i,usecol in enumerate(train.columns.tolist()[1:-1]):
if usecol in ['Census_PrimaryDiskTotalCapacity','Census_SystemVolumeTotalCapacity',
'Census_InternalPrimaryDiagonalDisplaySizeInInches',
'Census_InternalBatteryNumberOfCharges','Census_OSBuildNumber']:
print(i,usecol,"NUM")
train[usecol].fillna(-1)
test[usecol].fillna(-1)
else:
print(i,usecol,"CAT")
train[usecol] = train[usecol].astype('str')
test[usecol] = test[usecol].astype('str')
#Fit LabelEncoder
le = LabelEncoder().fit(np.unique(train[usecol].unique().tolist()+test[usecol].unique().tolist()))
#At the end 0 will be used for dropped values
train[usecol] = le.transform(train[usecol])+1
test[usecol] = le.transform(test[usecol])+1
agg_tr = (train.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Train'}, axis=1))
agg_te = (test.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Test'}, axis=1))
agg = pd.merge(agg_tr, agg_te, on=usecol, how='outer').replace(np.nan, 0)
#Select values with more than 1000 observations
agg = agg[(agg['Train'] > 1000)].reset_index(drop=True)
agg['Total'] = agg['Train'] + agg['Test']
#Drop unbalanced values
agg = agg[(agg['Train'] / agg['Total'] > 0.2) & (agg['Train'] / agg['Total'] < 0.8)]
agg[usecol+'Copy'] = agg[usecol]
train[usecol] = (pd.merge(train[[usecol]],agg[[usecol, usecol+'Copy']],on=usecol, how='left')[usecol+'Copy'].replace(np.nan, 0).astype('int').astype('category'))
test[usecol] = (pd.merge(test[[usecol]],agg[[usecol, usecol+'Copy']],on=usecol, how='left')[usecol+'Copy'].replace(np.nan, 0).astype('int').astype('category'))
assert train_obs == len(y_train)
return train , test, y_train
#################################################################################
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
print('Download Train and Test Data.\n')
DEBUG = False
if DEBUG:
print(">>> Debug mode ... ")
train = pd.read_csv('data/train_sample.csv', dtype=dtypes, low_memory=True)
train['MachineIdentifier'] = train.index.astype('uint32')
test = pd.read_csv('data/test_sample.csv', dtype=dtypes, low_memory=True)
test['MachineIdentifier'] = test.index.astype('uint32')
gc.collect()
else:
print(">>> Production mode ... ")
train = pd.read_csv('data/train.csv', dtype=dtypes, low_memory=True)
train['MachineIdentifier'] = train.index.astype('uint32')
test = pd.read_csv('data/test.csv', dtype=dtypes, low_memory=True)
test['MachineIdentifier'] = test.index.astype('uint32')
gc.collect()
print('Transform almost all features to category.\n')
meta = {'target': 'HasDetections',
'test_id': 'MachineIdentifier',
'cols': {
'MachineIdentifier': 'CAT',
'ProductName': 'CAT',
'EngineVersion': 'CAT',
'AppVersion': 'CAT',
'AvSigVersion': 'CAT',
'IsBeta': 'CAT',
'RtpStateBitfield': 'CAT',
'IsSxsPassiveMode': 'CAT',
'DefaultBrowsersIdentifier': 'CAT',
'AVProductStatesIdentifier': 'CAT',
'AVProductsInstalled': 'CAT',
'AVProductsEnabled': 'CAT',
'HasTpm': 'CAT',
'CountryIdentifier': 'CAT',
'CityIdentifier': 'CAT',
'OrganizationIdentifier': 'CAT',
'GeoNameIdentifier': 'CAT',
'LocaleEnglishNameIdentifier': 'CAT',
'Platform': 'CAT',
'Processor': 'CAT',
'OsVer': 'CAT',
'OsBuild': 'CAT',
'OsSuite': 'CAT',
'OsPlatformSubRelease': 'CAT',
'OsBuildLab': 'CAT',
'SkuEdition': 'CAT',
'IsProtected': 'CAT',
'AutoSampleOptIn': 'CAT',
'PuaMode': 'CAT',
'SMode': 'CAT',
'IeVerIdentifier': 'CAT',
'SmartScreen': 'CAT',
'Firewall': 'CAT',
'UacLuaenable': 'CAT',
'Census_MDC2FormFactor': 'CAT',
'Census_DeviceFamily': 'CAT',
'Census_OEMNameIdentifier': 'CAT',
'Census_OEMModelIdentifier': 'CAT',
'Census_ProcessorCoreCount': 'CAT',
'Census_ProcessorManufacturerIdentifier': 'CAT',
'Census_ProcessorModelIdentifier': 'CAT',
'Census_ProcessorClass': 'CAT',
'Census_PrimaryDiskTotalCapacity': 'NUM',
'Census_PrimaryDiskTypeName': 'CAT',
'Census_SystemVolumeTotalCapacity': 'NUM',
'Census_HasOpticalDiskDrive': 'CAT',
'Census_TotalPhysicalRAM': 'CAT',
'Census_ChassisTypeName': 'CAT',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'NUM',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'CAT',
'Census_InternalPrimaryDisplayResolutionVertical': 'CAT',
'Census_PowerPlatformRoleName': 'CAT',
'Census_InternalBatteryType': 'CAT',
'Census_InternalBatteryNumberOfCharges': 'NUM',
'Census_OSVersion': 'CAT',
'Census_OSArchitecture': 'CAT',
'Census_OSBranch': 'CAT',
'Census_OSBuildNumber': 'NUM',
'Census_OSBuildRevision': 'CAT',
'Census_OSEdition': 'CAT',
'Census_OSSkuName': 'CAT',
'Census_OSInstallTypeName': 'CAT',
'Census_OSInstallLanguageIdentifier': 'CAT',
'Census_OSUILocaleIdentifier': 'CAT',
'Census_OSWUAutoUpdateOptionsName': 'CAT',
'Census_IsPortableOperatingSystem': 'CAT',
'Census_GenuineStateName': 'CAT',
'Census_ActivationChannel': 'CAT',
'Census_IsFlightingInternal': 'CAT',
'Census_IsFlightsDisabled': 'CAT',
'Census_FlightRing': 'CAT',
'Census_ThresholdOptIn': 'CAT',
'Census_FirmwareManufacturerIdentifier': 'CAT',
'Census_FirmwareVersionIdentifier': 'CAT',
'Census_IsSecureBootEnabled': 'CAT',
'Census_IsWIMBootEnabled': 'CAT',
'Census_IsVirtualDevice': 'CAT',
'Census_IsTouchEnabled': 'CAT',
'Census_IsPenCapable': 'CAT',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'CAT',
'Wdft_IsGamer': 'CAT',
'Wdft_RegionIdentifier': 'CAT'
}}
train, test , y_train = encode_dataset(train=train,test=test,meta=meta,target_model='lightgbm')
print(train.head())
train_obs = len(y_train)
train_ids = train.index
test_ids = test.index
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
skf.get_n_splits(train_ids, y_train)
lgb_test_result = np.zeros(test_ids.shape[0])
#lgb_train_result = np.zeros(train_ids.shape[0])
#xgb_test_result = np.zeros(test_ids.shape[0])
#xgb_train_result = np.zeros(train_ids.shape[0])
counter = 0
#Transform data using small groups to reduce memory usage
m = 100000
print('\nLightGBM\n')
for train_index, test_index in skf.split(train_ids, y_train):
print('Fold {}\n'.format(counter + 1))
print("**************************")
print("train_index:",train_index)
print("**************************")
print("test_index:",test_index)
sys.stdout.flush()
##train = load_npz('train.npz')
#X_fit = vstack([train.iloc[train_index[i*m:(i+1)*m]] for i in range(train_index.shape[0] // m + 1)])
#X_val = vstack([train.iloc[test_index[i*m:(i+1)*m]] for i in range(test_index.shape[0] // m + 1)])
X_fit = train.iloc[train_index]
X_val = train.iloc[test_index]
X_fit, X_val = csr_matrix(X_fit, dtype='float32'), csr_matrix(X_val, dtype='float32')
y_fit, y_val = y_train[train_index], y_train[test_index]
lgb_model = lgb.LGBMClassifier(max_depth=-1,
n_estimators=30000,
learning_rate=0.01,
num_leaves=2**12-1,
colsample_bytree=0.28,
objective='binary',
n_jobs=30)
lgb_model.fit(X_fit, y_fit, eval_metric='auc',
eval_set=[(X_val, y_val)],
verbose=100, early_stopping_rounds=100)
##test = load_npz('test.npz')
test = csr_matrix(test, dtype='float32')
lgb_test_result += lgb_model.predict_proba(test)[:,1]
#xgb_test_result += xgb_model.predict_proba(test)[:,1]
counter += 1
feature_imp = pd.DataFrame(sorted(zip(lgb_model.feature_importances_,train.columns)), columns=['Value','Feature'])
feature_imp.to_csv('base_lightgbm_feat_imp2.csv', index=False)
submission = pd.read_csv('data/sample_submission.csv')
submission['HasDetections'] = lgb_test_result / counter
submission.to_csv('lgb_submission2.csv', index=False)
print('\nDone.')
| mit |
potash/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 112 | 3203 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
verloop/rasa_nlu | rasa_nlu/classifiers/sklearn_intent_classifier.py | 1 | 6712 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import typing
from builtins import zip
import os
import io
from future.utils import PY3
from typing import Any
from typing import Dict
from typing import List
from typing import Text
from typing import Tuple
from rasa_nlu.components import Component
from rasa_nlu.training_data import TrainingData
# How many intents are at max put into the output intent ranking, everything else will be cut off
INTENT_RANKING_LENGTH = 10
# We try to find a good number of cross folds to use during intent training, this specifies the max number of folds
MAX_CV_FOLDS = 5
if typing.TYPE_CHECKING:
import sklearn
import numpy as np
class SklearnIntentClassifier(Component):
"""Intent classifier using the sklearn framework"""
name = "intent_classifier_sklearn"
context_provides = {
"process": ["intent", "intent_ranking"],
}
output_provides = ["intent", "intent_ranking"]
def __init__(self, clf=None, le=None):
# type: (sklearn.model_selection.GridSearchCV, sklearn.preprocessing.LabelEncoder) -> None
"""Construct a new intent classifier using the sklearn framework."""
from sklearn.preprocessing import LabelEncoder
if le is not None:
self.le = le
else:
self.le = LabelEncoder()
self.clf = clf
@classmethod
def required_packages(cls):
# type: () -> List[Text]
return ["numpy", "sklearn"]
def transform_labels_str2num(self, labels):
# type: (List[Text]) -> np.ndarray
"""Transforms a list of strings into numeric label representation.
:param labels: List of labels to convert to numeric representation"""
return self.le.fit_transform(labels)
def transform_labels_num2str(self, y):
# type: (np.ndarray) -> np.ndarray
"""Transforms a list of strings into numeric label representation.
:param y: List of labels to convert to numeric representation"""
return self.le.inverse_transform(y)
def train(self, training_data, intent_features, num_threads):
# type: (TrainingData, np.ndarray, int) -> None
"""Train the intent classifier on a data set.
:param num_threads: number of threads used during training time"""
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
import numpy as np
labels = [e["intent"] for e in training_data.intent_examples]
if len(set(labels)) < 2:
logging.warn("Can not train an intent classifier. Need at least 2 different classes. " +
"Skipping training of intent classifier.")
else:
y = self.transform_labels_str2num(labels)
X = intent_features
# dirty str fix because sklearn is expecting str not instance of basestr...
tuned_parameters = [{'C': [1, 2, 5, 10, 20, 100], 'kernel': [str('linear')]}]
cv_splits = max(2, min(MAX_CV_FOLDS, np.min(np.bincount(y)) // 5)) # aim for 5 examples in each fold
self.clf = GridSearchCV(SVC(C=1, probability=True),
param_grid=tuned_parameters, n_jobs=num_threads,
cv=cv_splits, scoring='f1_weighted', verbose=1)
self.clf.fit(X, y)
def process(self, intent_features):
# type: (np.ndarray) -> Dict[Text, Any]
"""Returns the most likely intent and its probability for the input text."""
if not self.clf:
# component is either not trained or didn't receive enough training data
return {"intent": None, "intent_ranking": []}
X = intent_features.reshape(1, -1)
intent_ids, probabilities = self.predict(X)
intents = self.transform_labels_num2str(intent_ids)
# `predict` returns a matrix as it is supposed to work for multiple examples as well, hence we need to flatten
intents, probabilities = intents.flatten(), probabilities.flatten()
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[:INTENT_RANKING_LENGTH]
return {
"intent": {
"name": intents[0],
"confidence": probabilities[0],
},
"intent_ranking": [{"name": intent, "confidence": score} for intent, score in ranking]
}
else:
return {"intent": {"name": None, "confidence": 0.0}, "intent_ranking": []}
def predict_prob(self, X):
# type: (np.ndarray) -> np.ndarray
"""Given a bow vector of an input text, predict the intent label. Returns probabilities for all labels.
:param X: bow of input text
:return: vector of probabilities containing one entry for each label"""
return self.clf.predict_proba(X)
def predict(self, X):
# type: (np.ndarray) -> Tuple[np.ndarray, np.ndarray]
"""Given a bow vector of an input text, predict most probable label. Returns only the most likely label.
:param X: bow of input text
:return: tuple of first, the most probable label and second, its probability"""
import numpy as np
pred_result = self.predict_prob(X)
# sort the probabilities retrieving the indices of the elements in sorted order
sorted_indices = np.fliplr(np.argsort(pred_result, axis=1))
return sorted_indices, pred_result[:, sorted_indices]
@classmethod
def load(cls, model_dir, intent_classifier_sklearn):
# type: (Text, Text) -> SklearnIntentClassifier
import cloudpickle
if model_dir and intent_classifier_sklearn:
classifier_file = os.path.join(model_dir, intent_classifier_sklearn)
with io.open(classifier_file, 'rb') as f: # pragma: no test
if PY3:
return cloudpickle.load(f, encoding="latin-1")
else:
return cloudpickle.load(f)
else:
return SklearnIntentClassifier()
def persist(self, model_dir):
# type: (Text) -> Dict[Text, Any]
"""Persist this model into the passed directory. Returns the metadata necessary to load the model again."""
import cloudpickle
classifier_file = os.path.join(model_dir, "intent_classifier.pkl")
with io.open(classifier_file, 'wb') as f:
cloudpickle.dump(self, f)
return {
"intent_classifier_sklearn": "intent_classifier.pkl"
}
| apache-2.0 |
walterreade/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
hugobowne/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 70 | 4523 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
MathieuLeocmach/colloids | python/colloids/h52traj.py | 1 | 1614 | import sys, os, os.path, argparse, shutil
import numpy as np
from pandas import DataFrame
import trackpy as tp
from colloids import experiment as xp
from colloids.progressbar import ProgressBar
from colloids.particles import Linker
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert a HDF5 format compatible with trackpy.PandasHDFStoreSingleNode to coordinates and trajectory data.')
parser.add_argument('h5name', help='HDF5 file name')
parser.add_argument('--out', help='The folder where to save the data. By default the same name as HDF5 file, without extension.')
args = parser.parse_args()
h5name = args.h5name
print(h5name)
if args.out is None:
args.out = os.path.splitext(h5name)[0]
print('to %s'%args.out)
os.mkdir(args.out)
basename = os.path.join(args.out, os.path.split(os.path.basename(h5name))[0])
with tp.PandasHDFStoreSingleNode(h5name) as s:
nbzeros = len('%d'%(len(s)-1))
datfile = +'_t%0{:d}d.dat'.format(nbzeros)
p2trfile = +'_t%0{:d}d.p2tr'.format(nbzeros)
pro = ProgressBar(len(s))
for t, frame in s:
pos = np.vstack((np.zeros(2,3), frame.as_matrix(['x','y','z'])))
pos[0] = 1, len(pos)-2, 1
np.savetxt(datfile%t, pos, fmt='%g')
np.savetxt(p2trfile%t, frame.asmatrix(['particle'], fmt='%d')
#if t==0:
# linker = Linker(len(frame))
#else:
# linker.loadFrame(frame.asmatrix(['particle'])
pro.animate(t)
#TODO save the linker to a traj file
| gpl-3.0 |
rcrowder/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/legend.py | 69 | 30705 | """
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Return value is a sequence of text, line instances that make
up the legend
"""
from __future__ import division
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection
from matplotlib.transforms import Bbox
from matplotlib.offsetbox import HPacker, VPacker, PackerBase, TextArea, DrawingArea
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
Return value is a sequence of text, line instances that make
up the legend
"""
codes = {'best' : 0, # only implemented for axis legends
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc = None,
numpoints = None, # the number of points in the legend line
markerscale = None, # the relative size of legend markers vs. original
scatterpoints = 3, # TODO: may be an rcParam
scatteryoffsets=None,
prop = None, # properties for the legend texts
# the following dimensions are in axes coords
pad = None, # deprecated; use borderpad
labelsep = None, # deprecated; use labelspacing
handlelen = None, # deprecated; use handlelength
handletextsep = None, # deprecated; use handletextpad
axespad = None, # deprecated; use borderaxespad
# spacing & pad defined as a fractionof the font-size
borderpad = None, # the whitespace inside the legend border
labelspacing=None, #the vertical space between the legend entries
handlelength=None, # the length of the legend handles
handletextpad=None, # the pad between the legend handle and text
borderaxespad=None, # the pad between the axes and legend border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns. None, "expand"
fancybox=None, # True use a fancy box, false use a rounded box, none use rc
shadow = None,
):
"""
- *parent* : the artist that contains the legend
- *handles* : a list of artists (lines, patches) to add to the legend
- *labels* : a list of strings to label the legend
Optional keyword arguments:
================ ==================================================================
Keyword Description
================ ==================================================================
loc a location code or a tuple of coordinates
numpoints the number of points in the legend line
prop the font property
markerscale the relative size of legend markers vs. original
fancybox if True, draw a frame with a round fancybox. If None, use rc
shadow if True, draw a shadow behind legend
scatteryoffsets a list of yoffsets for scatter symbols in legend
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
The dimensions of pad and spacing are given as a fraction of the
fontsize. Values from rcParams will be used if None.
"""
from matplotlib.axes import Axes # local import only to avoid circularity
from matplotlib.figure import Figure # local import only to avoid circularity
Artist.__init__(self)
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
else:
self.prop=prop
self.fontsize = self.prop.get_size_in_points()
propnames=['numpoints', 'markerscale', 'shadow', "columnspacing",
"scatterpoints"]
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend."+name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad":"borderpad",
"labelsep":"labelspacing",
"handlelen":"handlelength",
"handletextsep":"handletextpad",
"axespad":"borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height)/self.fontsize
for k, v in deprecated_kwds.items():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
DeprecationWarning)
setattr(self, v, localdict[k]*axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend."+v])
else:
setattr(self, v, localdict[v])
del localdict
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be >= 0; it was %d"% numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3./8., 4./8., 2.5/8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.numpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets, reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent,Axes):
self.isaxes = True
self.set_figure(parent.figure)
elif isinstance(parent,Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0,'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back on "best"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._loc = loc
self._mode = mode
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox == True:
self.legendPatch.set_boxstyle("round",pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square",pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = True
# init with null renderer
self._init_legend_box(handles, labels)
self._last_fontsize_points = self.fontsize
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
for c in self.get_children():
c.set_figure(self.figure)
a.set_transform(self.get_transform())
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox+xdescent, oy+ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc)==2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.parent.bbox
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox, self.parent.bbox, renderer)
return x+xdescent, y+ydescent
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible(): return
self._update_legend_box(renderer)
renderer.open_group('legend')
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
if self._loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
def findoffset(width, height, xdescent, ydescent):
return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(findoffset)
fontsize = renderer.points_to_pixels(self.fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2*(self.borderaxespad+self.borderpad)*fontsize
self._legend_box.set_width(self.parent.bbox.width-pad)
if self._drawFrame:
# update the location and size of the legend
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self.fontsize
else:
return renderer.points_to_pixels(self.fontsize)
def _init_legend_box(self, handles, labels):
"""
Initiallize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self.fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
for l in labels:
textbox = TextArea(l, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height() * 0.7
descent = 0.
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their corrdinates should
# be given in the display coordinates.
# NOTE : the coordinates will be updated again in
# _update_legend_box() method.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
for handle in handles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
legline.update_from(handle)
self._set_artist_props(legline) # after update
legline.set_clip_box(None)
legline.set_clip_path(None)
legline.set_drawstyle('default')
legline.set_marker('None')
handle_list.append(legline)
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
legline_marker.update_from(handle)
self._set_artist_props(legline_marker)
legline_marker.set_clip_box(None)
legline_marker.set_clip_path(None)
legline_marker.set_linestyle('None')
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correpondence.
legline._legmarker = legline_marker
elif isinstance(handle, Patch):
p = Rectangle(xy=(0., 0.),
width = self.handlelength*fontsize,
height=(height-descent),
)
p.update_from(handle)
self._set_artist_props(p)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
elif isinstance(handle, LineCollection):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self._set_artist_props(legline)
legline.set_clip_box(None)
legline.set_clip_path(None)
lw = handle.get_linewidth()[0]
dashes = handle.get_dashes()[0]
color = handle.get_colors()[0]
legline.set_color(color)
legline.set_linewidth(lw)
legline.set_dashes(dashes)
handle_list.append(legline)
elif isinstance(handle, RegularPolyCollection):
#ydata = self._scatteryoffsets
ydata = height*self._scatteryoffsets
size_max, size_min = max(handle.get_sizes()),\
min(handle.get_sizes())
# we may need to scale these sizes by "markerscale"
# attribute. But other handle types does not seem
# to care about this attribute and it is currently ignored.
if self.scatterpoints < 4:
sizes = [.5*(size_max+size_min), size_max,
size_min]
else:
sizes = (size_max-size_min)*np.linspace(0,1,self.scatterpoints)+size_min
p = type(handle)(handle.get_numsides(),
rotation=handle.get_rotation(),
sizes=sizes,
offsets=zip(xdata_marker,ydata),
transOffset=self.get_transform(),
)
p.update_from(handle)
p.set_figure(self.figure)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
else:
handle_list.append(None)
handlebox = DrawingArea(width=self.handlelength*fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handle_list[-1]
handlebox.add_artist(handle)
if hasattr(handle, "_legmarker"):
handlebox.add_artist(handle._legmarker)
handleboxes.append(handlebox)
# We calculate number of lows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaing
# (num_smallcol) columns will have (nrows) rows.
nrows, num_largecol = divmod(len(handleboxes), self._ncol)
num_smallcol = self._ncol-num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0, num_largecol*(nrows+1), (nrows+1)),
[nrows+1] * num_largecol)
smallcol = safezip(range(num_largecol*(nrows+1), len(handleboxes), nrows),
[nrows] * num_smallcol)
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol+smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad*fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0+di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing*fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing*fontsize
self._legend_box = HPacker(pad=self.borderpad*fontsize,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _update_legend_box(self, renderer):
"""
Update the dimension of the legend_box. This is required
becuase the paddings, the hadle size etc. depends on the dpi
of the renderer.
"""
# fontsize in points.
fontsize = renderer.points_to_pixels(self.fontsize)
if self._last_fontsize_points == fontsize:
# no update is needed
return
# each handle needs to be drawn inside a box of
# (x, y, w, h) = (0, -descent, width, height).
# And their corrdinates should be given in the display coordinates.
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height(renderer) * 0.7
descent = 0.
for handle in self.legendHandles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
legline = handle
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline.set_data(xdata, ydata)
legline_marker = legline._legmarker
legline_marker.set_data(xdata_marker, ydata[:len(xdata_marker)])
elif isinstance(handle, Patch):
p = handle
p.set_bounds(0., 0.,
self.handlelength*fontsize,
(height-descent),
)
elif isinstance(handle, RegularPolyCollection):
p = handle
ydata = height*self._scatteryoffsets
p.set_offsets(zip(xdata_marker,ydata))
# correction factor
cor = fontsize / self._last_fontsize_points
# helper function to iterate over all children
def all_children(parent):
yield parent
for c in parent.get_children():
for cc in all_children(c): yield cc
#now update paddings
for box in all_children(self._legend_box):
if isinstance(box, PackerBase):
box.pad = box.pad * cor
box.sep = box.sep * cor
elif isinstance(box, DrawingArea):
box.width = self.handlelength*fontsize
box.height = height
box.xdescent = 0.
box.ydescent=descent
self._last_fontsize_points = fontsize
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
assert self.isaxes # should always hold because function is only called internally
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self._drawFrame = b
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch', [h for h in self.legendHandles if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def get_window_extent(self):
'return a extent of the the legend'
return self.legendPatch.get_window_extent()
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding "best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self.fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
assert self.isaxes # should always hold because function is only called internally
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.parent.bbox, renderer) for x in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
| agpl-3.0 |
cudmore/treadmill | treadmillAnalysis.py | 1 | 6121 | #20160307
import plotly
print 'plotly.__version__=', plotly.__version__ # version >1.9.4 required
import plotly.graph_objs as go
import pandas
import numpy as np
import os.path
import glob
import ntpath #this shuod get paths on windws?
class treadmillAnalysis():
def __init__(self):
print 'construct treadmillAnalysis'
self.folder = ''
self.list = ''
self.dbfile = ''
def assignfolder(self, folder):
'''folder ends in "/"'''
print 'treadmillAnalysis.assignfolder', folder
if os.path.exists(folder) :
self.folder = folder
else:
print 'ERROR: treadmillAnalysis.assignFolder() got bad path:', folder
def builddb(self, sessionStr):
'''build a db'''
'''this is assuming the data format for ALL files in folder are same'''
'''todo: add a file version to header and check if this is the case'''
#open db file
writefile = 'treadmilldb.csv'
if sessionStr:
writefile = 'treadmilldb_' + sessionStr + '.csv'
dbFile = self.folder + writefile
print 'writing dbfile:', dbFile, 'sessionStr:', sessionStr
dbID = open(dbFile, 'w')
numFiles = 0
firstfile = 1
rowIdx = 0
for root, subdirs, files in os.walk(self.folder):
#print 'root:', root
#print '\tsubdirs:', subdirs
#print '\t\tfiles:', files
#print 'root:', root
for filename in files:
if filename.endswith('.txt'):
sessionName = ntpath.basename(root) # session name is name of enclosing folder
if sessionStr and not (sessionName.find(sessionStr) >= 0):
#print ' rejecting:', sessionName
continue
#else:
# print ' accepting', sessionName
file_path = os.path.join(root, filename)
with open(file_path, 'r') as f:
numFiles += 1
header = f.readline()
#
#print header
if firstfile:
dbID.write('Idx' + ',')
dbID.write('Session' + ',')
for nv in header.split(';'):
#print nv
if nv.find('=')>=0:
k, v = nv.split('=')
dbID.write(k + ',')
dbID.write('file_path' + ',')
dbID.write('\n')
firstfile = 0
#
#write values
dbID.write(str(rowIdx) + ',') #Idx
dbID.write(sessionName + ',') #sessionName
for nv in header.split(';'):
#print nv
if nv.find('=')>=0:
k, v = nv.split('=')
#print k, v
dbID.write(v + ',')
dbID.write(file_path + ',')
dbID.write('\n')
rowIdx += 1
dbID.close()
print 'db has', numFiles
return writefile
def getlist(self):
'''return a list of files'''
print 'treadmillAnalysis.getlist', self.folder
dPathList = glob.glob(self.folder + '*.txt')
dFileList = []
for path in dPathList:
dFileList.append(os.path.basename(path))
#print 'treadmillAnalysis.getlist() dFileList=', dFileList
theRet = ''
for file in dFileList:
#theRet += '"' + file + '"' + ","
theRet += file + ","
self.list = theRet
return theRet
def loadheader(self, file):
print 'treadmillAnalysis.loadheader()', self.folder + file
fileID = open(self.folder + file, 'r')
header = fileID.readline() # ';' seperated list of k=v
return header
def plottrialparams(self, trialDict):
''' - plot trial parameters. use this as we are building a trial with main interface {pre, post, ....}
- i will have another version of this to do exactly the same from a trial file
'''
print 'plottrialparams()'
print 'trialDict:', trialDict
preDur = long(trialDict['preDur'])
postDur = long(trialDict['postDur'])
numPulse = long(trialDict['numPulse'])
pulseDur = long(trialDict['pulseDur'])
#
useMotor = trialDict['useMotor'] #{motorOn, motorLocked, motorFree}
motorDel = long(trialDict['motorDel'])
motorDur = long(trialDict['motorDur'])
print 'plottrialparams() useMotor:', useMotor
totalDur = preDur + (numPulse*pulseDur) + postDur
totalDurSec = totalDur / 1000
#
# build a square for each {pre, post, motor i}
myShapes = []
myShapes.append(
{
'type': 'rect',
'xref': 'x',
'yref': 'paper',
'x0': 0,
'y0': 0,
'x1': preDur,
'y1': 1,
'fillcolor': '#888888',
'opacity': 0.6,
'line': {
'width': 0,
},
}
)
myShapes.append(
{
'type': 'rect',
'xref': 'x',
'yref': 'paper',
'x0': preDur + (numPulse*pulseDur),
'y0': 0,
'x1': totalDur,
'y1': 1,
'fillcolor': '#888888',
'opacity': 0.6,
'line': {
'width': 0,
},
}
)
if useMotor=='motorOn':
for i in np.arange(numPulse):
motorStart = preDur + (i*pulseDur) + motorDel
motorStop = motorStart + motorDur
motorDict = {
'type': 'rect',
'xref': 'x',
'yref': 'paper',
'x0': motorStart,
'y0': 0,
'x1': motorStop,
'y1': 1,
'fillcolor': '#FF8888',
'opacity': 0.6,
'line': {
'width': 0,
}
}
myShapes.append(motorDict)
#print 'myShapes:', myShapes
#
layout = {
'title': '',
'xaxis': {
#'range': [0, totalDur],
'range': [0, totalDur],
'autotick':True,
},
'yaxis': {
#'range': [0, 5],
'showgrid': False,
'ticks': '',
'showticklabels': False
},
'width': 500,
'height': 250,
'shapes': myShapes,
}
trace0 = go.Scatter(
x = totalDurSec,
y = 222,
mode = 'lines+markers',
name = 'trial'
)
#
data = [trace0]
#data = [trace0, trace1, trace2, trace3]
fig = {
'data': data,
'layout': layout,
#'config': {'displayModeBar': False}
}
output_type = 'div'
#{displaylogo: false}
#{displayModeBar: false}
#fileordiv = plotly.offline.plot(fig, filename='templates/yyy.html', output_type=output_type, auto_open=False)
#fileordiv = plotly.offline.plot(fig, filename='templates/yyy.html', output_type=output_type, auto_open=False)
fileordiv = plotly.offline.plot(fig, show_link=False, filename='templates/yyy.html', output_type=output_type, auto_open=False)
return fileordiv
if __name__ == '__main__':
folder = '/home/cudmore/Sites/treadmill/v1/data/'
t = treadmillAnalysis()
t.assignfolder(folder)
print t.getlist()
| gpl-3.0 |
neilhan/tensorflow | tensorflow/examples/skflow/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
mas-dse-greina/neon | autoencoder_example_neon.py | 1 | 4692 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2017 Intel Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Auto-Encoder implementation in neon.
We'll use a simple auto-encoder topology to create a model
that removes noise from images.
To do this, we'll load the MNIST digits dataset. We then add noise
to the images. The input of the model is the image with noise.
The output of the model is the image without noise.
"""
import numpy as np
from neon import logger as neon_logger
from neon.data import ArrayIterator, MNIST
from neon.initializers import Uniform
from neon.layers import Conv, Pooling, GeneralizedCost, Deconv
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, SumSquared, Logistic, CrossEntropyMulti
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
width = 28 # Image width
height = 28 # Image height
amount_of_noise = 1.0 # Positive number (usually between 0-1)
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# Load dataset
dataset = MNIST(path=args.data_dir)
(X_train, y_train), (X_test, y_test), nclass = dataset.load_data()
y_train = X_train # In an autoencoder the output predicts the input
# Add some random noise to the input images
X_train += np.random.uniform(low=0.0, high=amount_of_noise, size=np.shape(X_train))
y_test = X_test # In an autoencoder the output predicts the input
# Add some random noise to the input images
X_test += np.random.uniform(low=0.0, high=amount_of_noise, size=np.shape(X_test))
# Create iterators for the training and testing sets
train_set = ArrayIterator(X=X_train, y=y_train, lshape=(1, height, width), make_onehot=False)
test_set = ArrayIterator(X=X_test, y=y_test, lshape=(1, height, width), make_onehot=False)
# Initialize the weights and the learning rule
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.001, momentum_coef=0.9)
# Strided conv autoencoder
bn = False
layers = [Conv((4, 4, 8), init=init_uni, activation=Rectlin(), batch_norm=bn),
Pooling(2),
Conv((4, 4, 32), init=init_uni, activation=Rectlin(), batch_norm=bn),
Pooling(2),
Deconv(fshape=(4, 4, 8), init=init_uni,
activation=Rectlin(), batch_norm=bn),
Deconv(fshape=(3, 3, 8), init=init_uni,
activation=Rectlin(), strides=2, batch_norm=bn),
Deconv(fshape=(2, 2, 1), init=init_uni, activation=Logistic(), strides=2, padding=1)]
# Define the cost
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
# Fit the model
model.fit(train_set, optimizer=opt_gdm, num_epochs=args.epochs,
cost=cost, callbacks=callbacks)
# Let's predict the test set with the current model
results = model.get_outputs(test_set)
# Plot the predicted images
try:
import matplotlib.pyplot as plt
fi = 0
# Plot a 10x12 set of predictions and originals
nrows = 10
ncols = 12
preds = np.zeros((height * nrows, width * ncols))
origs = np.zeros((height * nrows, width * ncols))
idxs = [(row, col) for row in range(nrows) for col in range(ncols)]
for row, col in idxs:
im = results[fi,:].reshape((height, width))
preds[height * row:height * (row + 1):, width * col:width * (col + 1)] = im
im = X_test[fi,:].reshape(height, width)
origs[height * row:height * (row + 1):, width * col:width * (col + 1)] = im
fi = fi + 1
plt.subplot(1,2,1)
plt.imshow(preds, cmap='gray')
plt.title('Predicted masks')
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(origs, cmap='gray')
plt.title('Original images')
plt.axis('off')
plt.savefig('Reconstructed.png')
except ImportError:
neon_logger.display(
'matplotlib needs to be manually installed to generate plots\npip install matplotlib') | apache-2.0 |
soulmachine/scikit-learn | sklearn/decomposition/__init__.py | 3 | 1258 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
bikong2/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
xwolf12/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
sandeepdsouza93/TensorFlow-15712 | tensorflow/examples/learn/boston.py | 25 | 1932 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset
boston = learn.datasets.load_dataset('boston')
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
regressor = learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Fit
regressor.fit(x_train, y_train, steps=5000, batch_size=1)
# Predict and score
y_predicted = list(
regressor.predict(scaler.transform(x_test), as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
rsivapr/scikit-learn | sklearn/feature_extraction/image.py | 7 | 16121 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils.fixes import in1d
from ..utils import array2d, check_random_state
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(in1d(edges[0], inds),
in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.todense()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Parameters
===========
img: ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
===========
n_x: int
Dimension in x axis
n_y: int
Dimension in y axis
n_z: int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as: np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype: dtype, optional, default int
The data of the returned sparse matrix. By default it is int
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Parameters
===========
i_h: int
The image height
i_w: int
The image with
p_h: int
The height of a patch
p_w: int
The width of a patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Parameters
----------
arr: ndarray
n-dimensional array of which patches are to be extracted
patch_shape: integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step: integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches: strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) /
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Parameters
----------
image: array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
image = array2d(image)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Parameters
----------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size: tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image: array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Parameters
----------
patch_size: tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches: integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h / 10, i_w / 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
codrut3/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 9 | 53510 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, training_util.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = training_util.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
training_util.get_global_step().assign_add(1))
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(os.path.join(est.model_dir, 'export'), serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir,
model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(
['model.ckpt-1', 'model.ckpt-5'], ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1,
model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2,
model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testSummaryWritingWithSummaryProto(self):
def _streaming_mean_squared_error_histogram(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
metrics, update_ops = metric_ops.streaming_mean_squared_error(
predictions,
labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
return summary.histogram('histogram', metrics), update_ops
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={'MSE': _streaming_mean_squared_error_histogram})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('MSE' in output_values)
self.assertTrue(output_values['MSE'].HasField('histo'))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertItemsEqual(
['bogus_lookup', 'feature'],
[compat.as_str_any(x) for x in graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS)])
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertTrue('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertTrue('hash_table_Lookup' in graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
marcocaccin/scikit-learn | sklearn/feature_extraction/hashing.py | 41 | 6175 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional, default np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional, default "dict"
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
rhiever/bokeh | bokeh/charts/builder/line_builder.py | 43 | 5360 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
import numpy as np
from ..utils import cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line as LineGlyph
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Line(values, index=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builder.line_builder.LineBuilder>` to
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
return create_and_build(LineBuilder, values, index=index, **kws)
class LineBuilder(Builder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Calculate the chart properties accordingly from line.values.
Then build a dict containing references to all the points to be
used by the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
xs = self._values_index
self.set_and_get("x", "", np.array(xs))
for col, values in self._values.items():
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""
Push the Line data into the ColumnDataSource and calculate the
proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1:]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the Line.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._attr, self.palette)
for i, duplet in enumerate(self._attr[1:], start=1):
glyph = LineGlyph(x='x', y=duplet, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
| bsd-3-clause |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tseries/tests/test_period.py | 7 | 205020 | """Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- Pierre Gerard-Marchant & Matt Knox
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from datetime import datetime, date, timedelta
from pandas import Timestamp, _period
from pandas.tseries.frequencies import MONTHS, DAYS, _period_code_map
from pandas.tseries.period import Period, PeriodIndex, period_range
from pandas.tseries.index import DatetimeIndex, date_range, Index
from pandas.tseries.tools import to_datetime
import pandas.tseries.period as period
import pandas.tseries.offsets as offsets
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.compat import range, lrange, lmap, zip, text_type, PY3, iteritems
from pandas.compat.numpy import np_datetime64_compat
from pandas import (Series, DataFrame,
_np_version_under1p9, _np_version_under1p10,
_np_version_under1p12)
from pandas import tslib
import pandas.util.testing as tm
class TestPeriodProperties(tm.TestCase):
"Test properties such as year, month, weekday, etc...."
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 4)
self.assertIsInstance(p, Period)
p = Period(ordinal=-2, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 3)
self.assertIsInstance(p, Period)
p = Period(ordinal=-2, freq='M')
self.assertEqual(p.year, 1969)
self.assertEqual(p.month, 11)
self.assertIsInstance(p, Period)
def test_period_cons_quarterly(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'Q-%s' % month
exp = Period('1989Q3', freq=freq)
self.assertIn('1989Q3', str(exp))
stamp = exp.to_timestamp('D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
stamp = exp.to_timestamp('3D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
def test_period_cons_annual(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'A-%s' % month
exp = Period('1989', freq=freq)
stamp = exp.to_timestamp('D', how='end') + timedelta(days=30)
p = Period(stamp, freq=freq)
self.assertEqual(p, exp + 1)
self.assertIsInstance(p, Period)
def test_period_cons_weekly(self):
for num in range(10, 17):
daystr = '2011-02-%d' % num
for day in DAYS:
freq = 'W-%s' % day
result = Period(daystr, freq=freq)
expected = Period(daystr, freq='D').asfreq(freq)
self.assertEqual(result, expected)
self.assertIsInstance(result, Period)
def test_period_from_ordinal(self):
p = pd.Period('2011-01', freq='M')
res = pd.Period._from_ordinal(p.ordinal, freq='M')
self.assertEqual(p, res)
self.assertIsInstance(res, Period)
def test_period_cons_nat(self):
p = Period('NaT', freq='M')
self.assertIs(p, pd.NaT)
p = Period('nat', freq='W-SUN')
self.assertIs(p, pd.NaT)
p = Period(tslib.iNaT, freq='D')
self.assertIs(p, pd.NaT)
p = Period(tslib.iNaT, freq='3D')
self.assertIs(p, pd.NaT)
p = Period(tslib.iNaT, freq='1D1H')
self.assertIs(p, pd.NaT)
p = Period('NaT')
self.assertIs(p, pd.NaT)
p = Period(tslib.iNaT)
self.assertIs(p, pd.NaT)
def test_cons_null_like(self):
# check Timestamp compat
self.assertIs(Timestamp('NaT'), pd.NaT)
self.assertIs(Period('NaT'), pd.NaT)
self.assertIs(Timestamp(None), pd.NaT)
self.assertIs(Period(None), pd.NaT)
self.assertIs(Timestamp(float('nan')), pd.NaT)
self.assertIs(Period(float('nan')), pd.NaT)
self.assertIs(Timestamp(np.nan), pd.NaT)
self.assertIs(Period(np.nan), pd.NaT)
def test_period_cons_mult(self):
p1 = Period('2011-01', freq='3M')
p2 = Period('2011-01', freq='M')
self.assertEqual(p1.ordinal, p2.ordinal)
self.assertEqual(p1.freq, offsets.MonthEnd(3))
self.assertEqual(p1.freqstr, '3M')
self.assertEqual(p2.freq, offsets.MonthEnd())
self.assertEqual(p2.freqstr, 'M')
result = p1 + 1
self.assertEqual(result.ordinal, (p2 + 3).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '3M')
result = p1 - 1
self.assertEqual(result.ordinal, (p2 - 3).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '3M')
msg = ('Frequency must be positive, because it'
' represents span: -3M')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='-3M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='0M')
def test_period_cons_combined(self):
p = [(Period('2011-01', freq='1D1H'),
Period('2011-01', freq='1H1D'),
Period('2011-01', freq='H')),
(Period(ordinal=1, freq='1D1H'),
Period(ordinal=1, freq='1H1D'),
Period(ordinal=1, freq='H'))]
for p1, p2, p3 in p:
self.assertEqual(p1.ordinal, p3.ordinal)
self.assertEqual(p2.ordinal, p3.ordinal)
self.assertEqual(p1.freq, offsets.Hour(25))
self.assertEqual(p1.freqstr, '25H')
self.assertEqual(p2.freq, offsets.Hour(25))
self.assertEqual(p2.freqstr, '25H')
self.assertEqual(p3.freq, offsets.Hour())
self.assertEqual(p3.freqstr, 'H')
result = p1 + 1
self.assertEqual(result.ordinal, (p3 + 25).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '25H')
result = p2 + 1
self.assertEqual(result.ordinal, (p3 + 25).ordinal)
self.assertEqual(result.freq, p2.freq)
self.assertEqual(result.freqstr, '25H')
result = p1 - 1
self.assertEqual(result.ordinal, (p3 - 25).ordinal)
self.assertEqual(result.freq, p1.freq)
self.assertEqual(result.freqstr, '25H')
result = p2 - 1
self.assertEqual(result.ordinal, (p3 - 25).ordinal)
self.assertEqual(result.freq, p2.freq)
self.assertEqual(result.freqstr, '25H')
msg = ('Frequency must be positive, because it'
' represents span: -25H')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='-1D1H')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='-1H1D')
with tm.assertRaisesRegexp(ValueError, msg):
Period(ordinal=1, freq='-1D1H')
with tm.assertRaisesRegexp(ValueError, msg):
Period(ordinal=1, freq='-1H1D')
msg = ('Frequency must be positive, because it'
' represents span: 0D')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='0D0H')
with tm.assertRaisesRegexp(ValueError, msg):
Period(ordinal=1, freq='0D0H')
# You can only combine together day and intraday offsets
msg = ('Invalid frequency: 1W1D')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='1W1D')
msg = ('Invalid frequency: 1D1W')
with tm.assertRaisesRegexp(ValueError, msg):
Period('2011-01', freq='1D1W')
def test_timestamp_tz_arg(self):
tm._skip_if_no_pytz()
import pytz
for case in ['Europe/Brussels', 'Asia/Tokyo', 'US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='3H').to_timestamp(tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=case)
exp = Timestamp('31/12/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=case)
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
exp_zone = pytz.timezone(case).normalize(p)
self.assertEqual(p, exp)
self.assertEqual(p.tz, exp_zone.tzinfo)
self.assertEqual(p.tz, exp.tz)
def test_timestamp_tz_arg_dateutil(self):
from pandas.tslib import _dateutil_gettz as gettz
from pandas.tslib import maybe_get_tz
for case in ['dateutil/Europe/Brussels', 'dateutil/Asia/Tokyo',
'dateutil/US/Pacific']:
p = Period('1/1/2005', freq='M').to_timestamp(
tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
self.assertEqual(p, exp)
self.assertEqual(p.tz, gettz(case.split('/', 1)[1]))
self.assertEqual(p.tz, exp.tz)
p = Period('1/1/2005',
freq='M').to_timestamp(freq='3H', tz=maybe_get_tz(case))
exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case)
self.assertEqual(p, exp)
self.assertEqual(p.tz, gettz(case.split('/', 1)[1]))
self.assertEqual(p.tz, exp.tz)
def test_timestamp_tz_arg_dateutil_from_string(self):
from pandas.tslib import _dateutil_gettz as gettz
p = Period('1/1/2005',
freq='M').to_timestamp(tz='dateutil/Europe/Brussels')
self.assertEqual(p.tz, gettz('Europe/Brussels'))
def test_timestamp_mult(self):
p = pd.Period('2011-01', freq='M')
self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01'))
self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-01-31'))
p = pd.Period('2011-01', freq='3M')
self.assertEqual(p.to_timestamp(how='S'), pd.Timestamp('2011-01-01'))
self.assertEqual(p.to_timestamp(how='E'), pd.Timestamp('2011-03-31'))
def test_period_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEqual(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assertRaises(ValueError, i1.__ne__, i4)
self.assertEqual(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/10/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/11/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/12/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEqual(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEqual(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEqual(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEqual(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i1 = Period('05Q1')
self.assertEqual(i1, i2)
lower = Period('05q1')
self.assertEqual(i1, lower)
i1 = Period('1Q2005')
self.assertEqual(i1, i2)
lower = Period('1q2005')
self.assertEqual(i1, lower)
i1 = Period('1Q05')
self.assertEqual(i1, i2)
lower = Period('1q05')
self.assertEqual(i1, lower)
i1 = Period('4Q1984')
self.assertEqual(i1.year, 1984)
lower = Period('4q1984')
self.assertEqual(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEqual(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEqual(i1, i2)
expected = Period('2007-01', freq='M')
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period(200701, freq='M')
self.assertEqual(i1, expected)
i1 = Period(ordinal=200701, freq='M')
self.assertEqual(i1.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
self.assertEqual(i1, i4)
self.assertEqual(i1, i5)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
expected = Period(np_datetime64_compat(
'2007-01-01 09:00:00.001Z'), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'),
freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_period_constructor_offsets(self):
self.assertEqual(Period('1/1/2005', freq=offsets.MonthEnd()),
Period('1/1/2005', freq='M'))
self.assertEqual(Period('2005', freq=offsets.YearEnd()),
Period('2005', freq='A'))
self.assertEqual(Period('2005', freq=offsets.MonthEnd()),
Period('2005', freq='M'))
self.assertEqual(Period('3/10/12', freq=offsets.BusinessDay()),
Period('3/10/12', freq='B'))
self.assertEqual(Period('3/10/12', freq=offsets.Day()),
Period('3/10/12', freq='D'))
self.assertEqual(Period(year=2005, quarter=1,
freq=offsets.QuarterEnd(startingMonth=12)),
Period(year=2005, quarter=1, freq='Q'))
self.assertEqual(Period(year=2005, quarter=2,
freq=offsets.QuarterEnd(startingMonth=12)),
Period(year=2005, quarter=2, freq='Q'))
self.assertEqual(Period(year=2005, month=3, day=1, freq=offsets.Day()),
Period(year=2005, month=3, day=1, freq='D'))
self.assertEqual(Period(year=2012, month=3, day=10,
freq=offsets.BDay()),
Period(year=2012, month=3, day=10, freq='B'))
expected = Period('2005-03-01', freq='3D')
self.assertEqual(Period(year=2005, month=3, day=1,
freq=offsets.Day(3)), expected)
self.assertEqual(Period(year=2005, month=3, day=1, freq='3D'),
expected)
self.assertEqual(Period(year=2012, month=3, day=10,
freq=offsets.BDay(3)),
Period(year=2012, month=3, day=10, freq='3B'))
self.assertEqual(Period(200701, freq=offsets.MonthEnd()),
Period(200701, freq='M'))
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1.year, 18695)
self.assertEqual(i2.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
i3 = Period(np.datetime64('2007-01-01'), freq='M')
i4 = Period(np_datetime64_compat('2007-01-01 00:00:00Z'), freq='M')
i5 = Period(np_datetime64_compat('2007-01-01 00:00:00.000Z'), freq='M')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
self.assertEqual(i1, i4)
self.assertEqual(i1, i5)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
expected = Period(np_datetime64_compat(
'2007-01-01 09:00:00.001Z'), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
expected = Period(np_datetime64_compat('2007-01-01 09:00:00.00101Z'),
freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assertEqual(i1.freq, offsets.Minute())
self.assertEqual(i1.freqstr, 'T')
def test_period_deprecated_freq(self):
cases = {"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"],
"B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"],
"D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"],
"H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"],
"T": ["minute", "MINUTE", "MINUTELY", "minutely"],
"S": ["sec", "SEC", "SECOND", "SECONDLY", "second"],
"L": ["MILLISECOND", "MILLISECONDLY", "millisecond"],
"U": ["MICROSECOND", "MICROSECONDLY", "microsecond"],
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"]}
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
for exp, freqs in iteritems(cases):
for freq in freqs:
with self.assertRaisesRegexp(ValueError, msg):
Period('2016-03-01 09:00', freq=freq)
with self.assertRaisesRegexp(ValueError, msg):
Period(ordinal=1, freq=freq)
# check supported freq-aliases still works
p1 = Period('2016-03-01 09:00', freq=exp)
p2 = Period(ordinal=1, freq=exp)
tm.assertIsInstance(p1, Period)
tm.assertIsInstance(p2, Period)
def test_hash(self):
self.assertEqual(hash(Period('2011-01', freq='M')),
hash(Period('2011-01', freq='M')))
self.assertNotEqual(hash(Period('2011-01-01', freq='D')),
hash(Period('2011-01', freq='M')))
self.assertNotEqual(hash(Period('2011-01', freq='3M')),
hash(Period('2011-01', freq='2M')))
self.assertNotEqual(hash(Period('2011-01', freq='M')),
hash(Period('2011-02', freq='M')))
def test_repr(self):
p = Period('Jan-2000')
self.assertIn('2000-01', repr(p))
p = Period('2000-12-15')
self.assertIn('2000-12-15', repr(p))
def test_repr_nat(self):
p = Period('nat', freq='M')
self.assertIn(repr(tslib.NaT), repr(p))
def test_millisecond_repr(self):
p = Period('2000-01-01 12:15:02.123')
self.assertEqual("Period('2000-01-01 12:15:02.123', 'L')", repr(p))
def test_microsecond_repr(self):
p = Period('2000-01-01 12:15:02.123567')
self.assertEqual("Period('2000-01-01 12:15:02.123567', 'U')", repr(p))
def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
self.assertEqual(res, '2000-01-01 12:34:12')
tm.assertIsInstance(res, text_type) # GH3363
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
self.assertEqual(result, 4)
with self.assertRaises(period.IncompatibleFrequency):
left - Period('2007-01', freq='M')
def test_to_timestamp(self):
p = Period('1982', freq='A')
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEqual(start_ts, p.to_timestamp('D', how=a))
# freq with mult should not affect to the result
self.assertEqual(start_ts, p.to_timestamp('3D', how=a))
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEqual(end_ts, p.to_timestamp('D', how=a))
self.assertEqual(end_ts, p.to_timestamp('3D', how=a))
from_lst = ['A', 'Q', 'M', 'W', 'B', 'D', 'H', 'Min', 'S']
def _ex(p):
return Timestamp((p + 1).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period('1982', freq=fcode)
result = p.to_timestamp().to_period(fcode)
self.assertEqual(result, p)
self.assertEqual(p.start_time, p.to_timestamp(how='S'))
self.assertEqual(p.end_time, _ex(p))
# Frequency other than daily
p = Period('1985', freq='A')
result = p.to_timestamp('H', how='end')
expected = datetime(1985, 12, 31, 23)
self.assertEqual(result, expected)
result = p.to_timestamp('3H', how='end')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='end')
expected = datetime(1985, 12, 31, 23, 59)
self.assertEqual(result, expected)
result = p.to_timestamp('2T', how='end')
self.assertEqual(result, expected)
result = p.to_timestamp(how='end')
expected = datetime(1985, 12, 31)
self.assertEqual(result, expected)
expected = datetime(1985, 1, 1)
result = p.to_timestamp('H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('S', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('3H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('5S', how='start')
self.assertEqual(result, expected)
def test_start_time(self):
freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S']
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period('2012', freq=f)
self.assertEqual(p.start_time, xp)
self.assertEqual(Period('2012', freq='B').start_time,
datetime(2012, 1, 2))
self.assertEqual(Period('2012', freq='W').start_time,
datetime(2011, 12, 26))
def test_end_time(self):
p = Period('2012', freq='A')
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='Q')
xp = _ex(2012, 4, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='M')
xp = _ex(2012, 2, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='D')
xp = _ex(2012, 1, 2)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='H')
xp = _ex(2012, 1, 1, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='B')
xp = _ex(2012, 1, 3)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='W')
xp = _ex(2012, 1, 2)
self.assertEqual(xp, p.end_time)
# Test for GH 11738
p = Period('2012', freq='15D')
xp = _ex(2012, 1, 16)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='1D1H')
xp = _ex(2012, 1, 2, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='1H1D')
xp = _ex(2012, 1, 2, 1)
self.assertEqual(xp, p.end_time)
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period('2013-1-1', 'W-SAT')
xp = _ex(2013, 1, 6)
self.assertEqual(p.end_time, xp)
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
self.assertEqual(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
self.assertEqual((qd + x).qyear, 2007)
self.assertEqual((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
self.assertEqual(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
self.assertEqual(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
self.assertEqual(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
self.assertEqual(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
self.assertEqual(m_ival_x.quarter, 4)
self.assertEqual(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
#
self.assertEqual(w_date.year, 2007)
self.assertEqual(w_date.quarter, 1)
self.assertEqual(w_date.month, 1)
self.assertEqual(w_date.week, 1)
self.assertEqual((w_date - 1).week, 52)
self.assertEqual(w_date.days_in_month, 31)
self.assertEqual(Period(freq='W', year=2012,
month=2, day=1).days_in_month, 29)
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='W', year=2007, month=1, day=7)
self.assertEqual(w_date.year, 2007)
self.assertEqual(w_date.quarter, 1)
self.assertEqual(w_date.month, 1)
self.assertEqual(w_date.week, 1)
self.assertEqual((w_date - 1).week, 52)
self.assertEqual(w_date.days_in_month, 31)
exp = Period(freq='W', year=2012, month=2, day=1)
self.assertEqual(exp.days_in_month, 29)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK', year=2007, month=1, day=7)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
self.assertEqual(b_date.year, 2007)
self.assertEqual(b_date.quarter, 1)
self.assertEqual(b_date.month, 1)
self.assertEqual(b_date.day, 1)
self.assertEqual(b_date.weekday, 0)
self.assertEqual(b_date.dayofyear, 1)
self.assertEqual(b_date.days_in_month, 31)
self.assertEqual(Period(freq='B', year=2012,
month=2, day=1).days_in_month, 29)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
self.assertEqual(d_date.year, 2007)
self.assertEqual(d_date.quarter, 1)
self.assertEqual(d_date.month, 1)
self.assertEqual(d_date.day, 1)
self.assertEqual(d_date.weekday, 0)
self.assertEqual(d_date.dayofyear, 1)
self.assertEqual(d_date.days_in_month, 31)
self.assertEqual(Period(freq='D', year=2012, month=2,
day=1).days_in_month, 29)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq='H', year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq='2H', year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
self.assertEqual(h_date.year, 2007)
self.assertEqual(h_date.quarter, 1)
self.assertEqual(h_date.month, 1)
self.assertEqual(h_date.day, 1)
self.assertEqual(h_date.weekday, 0)
self.assertEqual(h_date.dayofyear, 1)
self.assertEqual(h_date.hour, 0)
self.assertEqual(h_date.days_in_month, 31)
self.assertEqual(Period(freq='H', year=2012, month=2, day=1,
hour=0).days_in_month, 29)
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
self.assertEqual(t_date.quarter, 1)
self.assertEqual(t_date.month, 1)
self.assertEqual(t_date.day, 1)
self.assertEqual(t_date.weekday, 0)
self.assertEqual(t_date.dayofyear, 1)
self.assertEqual(t_date.hour, 0)
self.assertEqual(t_date.minute, 0)
self.assertEqual(t_date.days_in_month, 31)
self.assertEqual(Period(freq='D', year=2012, month=2, day=1, hour=0,
minute=0).days_in_month, 29)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
#
self.assertEqual(s_date.year, 2007)
self.assertEqual(s_date.quarter, 1)
self.assertEqual(s_date.month, 1)
self.assertEqual(s_date.day, 1)
self.assertEqual(s_date.weekday, 0)
self.assertEqual(s_date.dayofyear, 1)
self.assertEqual(s_date.hour, 0)
self.assertEqual(s_date.minute, 0)
self.assertEqual(s_date.second, 0)
self.assertEqual(s_date.days_in_month, 31)
self.assertEqual(Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month, 29)
def test_properties_nat(self):
p_nat = Period('NaT', freq='M')
t_nat = pd.Timestamp('NaT')
self.assertIs(p_nat, t_nat)
# confirm Period('NaT') work identical with Timestamp('NaT')
for f in ['year', 'month', 'day', 'hour', 'minute', 'second', 'week',
'dayofyear', 'quarter', 'days_in_month']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
self.assertTrue(np.isnan(getattr(t_nat, f)))
def test_pnow(self):
dt = datetime.now()
val = period.pnow('D')
exp = Period(dt, freq='D')
self.assertEqual(val, exp)
val2 = period.pnow('2D')
exp2 = Period(dt, freq='2D')
self.assertEqual(val2, exp2)
self.assertEqual(val.ordinal, val2.ordinal)
self.assertEqual(val.ordinal, exp2.ordinal)
def test_constructor_corner(self):
expected = Period('2007-01', freq='2M')
self.assertEqual(Period(year=2007, month=1, freq='2M'), expected)
self.assertRaises(ValueError, Period, datetime.now())
self.assertRaises(ValueError, Period, datetime.now().date())
self.assertRaises(ValueError, Period, 1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=2, value=1, freq='D')
self.assertIs(Period(None), pd.NaT)
self.assertRaises(ValueError, Period, month=1)
p = Period('2007-01-01', freq='D')
result = Period(p, freq='A')
exp = Period('2007', freq='A')
self.assertEqual(result, exp)
def test_constructor_infer_freq(self):
p = Period('2007-01-01')
self.assertEqual(p.freq, 'D')
p = Period('2007-01-01 07')
self.assertEqual(p.freq, 'H')
p = Period('2007-01-01 07:10')
self.assertEqual(p.freq, 'T')
p = Period('2007-01-01 07:10:15')
self.assertEqual(p.freq, 'S')
p = Period('2007-01-01 07:10:15.123')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123000')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123400')
self.assertEqual(p.freq, 'U')
def test_asfreq_MS(self):
initial = Period("2013")
self.assertEqual(initial.asfreq(freq="M", how="S"),
Period('2013-01', 'M'))
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
initial.asfreq(freq="MS", how="S")
with tm.assertRaisesRegexp(ValueError, msg):
pd.Period('2013-01', 'MS')
self.assertTrue(_period_code_map.get("MS") is None)
def noWrap(item):
return item
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7, hour=23,
minute=59, second=59)
self.assertEqual(ival_W.asfreq('A'), ival_W_to_A)
self.assertEqual(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
self.assertEqual(ival_W.asfreq('Q'), ival_W_to_Q)
self.assertEqual(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
self.assertEqual(ival_W.asfreq('M'), ival_W_to_M)
self.assertEqual(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
self.assertEqual(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
self.assertEqual(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
self.assertEqual(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
self.assertEqual(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
self.assertEqual(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
self.assertEqual(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
self.assertEqual(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
self.assertEqual(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
self.assertEqual(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
self.assertEqual(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
self.assertEqual(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
self.assertEqual(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
self.assertEqual(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
self.assertEqual(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
self.assertEqual(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
self.assertEqual(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
self.assertEqual(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
self.assertEqual(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
self.assertEqual(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
self.assertEqual(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
self.assertEqual(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
self.assertEqual(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
self.assertEqual(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
self.assertEqual(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
self.assertEqual(ival_W.asfreq('W'), ival_W)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
ival_W.asfreq('WK')
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK', year=2007, month=1, day=1)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-SAT', year=2007, month=1, day=6)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-FRI', year=2007, month=1, day=5)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-THU', year=2007, month=1, day=4)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-WED', year=2007, month=1, day=3)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-TUE', year=2007, month=1, day=2)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-MON', year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
self.assertEqual(ival_B.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B_end_of_week.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B.asfreq('D'), ival_B_to_D)
self.assertEqual(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
self.assertEqual(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
self.assertEqual(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
self.assertEqual(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
self.assertEqual(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
self.assertEqual(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
self.assertEqual(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
self.assertEqual(ival_D.asfreq('A'), ival_D_to_A)
self.assertEqual(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
self.assertEqual(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
self.assertEqual(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
self.assertEqual(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
self.assertEqual(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
self.assertEqual(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
self.assertEqual(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
self.assertEqual(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
self.assertEqual(ival_D.asfreq('M'), ival_D_to_M)
self.assertEqual(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
self.assertEqual(ival_D.asfreq('W'), ival_D_to_W)
self.assertEqual(ival_D_end_of_week.asfreq('W'), ival_D_to_W)
self.assertEqual(ival_D_friday.asfreq('B'), ival_B_friday)
self.assertEqual(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
self.assertEqual(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
self.assertEqual(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
self.assertEqual(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
self.assertEqual(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
self.assertEqual(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
self.assertEqual(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
self.assertEqual(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
self.assertEqual(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
self.assertEqual(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
self.assertEqual(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=59, second=59)
self.assertEqual(ival_H.asfreq('A'), ival_H_to_A)
self.assertEqual(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
self.assertEqual(ival_H.asfreq('Q'), ival_H_to_Q)
self.assertEqual(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
self.assertEqual(ival_H.asfreq('M'), ival_H_to_M)
self.assertEqual(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
self.assertEqual(ival_H.asfreq('W'), ival_H_to_W)
self.assertEqual(ival_H_end_of_week.asfreq('W'), ival_H_to_W)
self.assertEqual(ival_H.asfreq('D'), ival_H_to_D)
self.assertEqual(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
self.assertEqual(ival_H.asfreq('B'), ival_H_to_B)
self.assertEqual(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
self.assertEqual(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
self.assertEqual(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
self.assertEqual(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
self.assertEqual(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
self.assertEqual(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=59)
self.assertEqual(ival_T.asfreq('A'), ival_T_to_A)
self.assertEqual(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
self.assertEqual(ival_T.asfreq('Q'), ival_T_to_Q)
self.assertEqual(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
self.assertEqual(ival_T.asfreq('M'), ival_T_to_M)
self.assertEqual(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
self.assertEqual(ival_T.asfreq('W'), ival_T_to_W)
self.assertEqual(ival_T_end_of_week.asfreq('W'), ival_T_to_W)
self.assertEqual(ival_T.asfreq('D'), ival_T_to_D)
self.assertEqual(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
self.assertEqual(ival_T.asfreq('B'), ival_T_to_B)
self.assertEqual(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
self.assertEqual(ival_T.asfreq('H'), ival_T_to_H)
self.assertEqual(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
self.assertEqual(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
self.assertEqual(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
self.assertEqual(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1, hour=0, minute=0,
second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
self.assertEqual(ival_S.asfreq('A'), ival_S_to_A)
self.assertEqual(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
self.assertEqual(ival_S.asfreq('Q'), ival_S_to_Q)
self.assertEqual(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
self.assertEqual(ival_S.asfreq('M'), ival_S_to_M)
self.assertEqual(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
self.assertEqual(ival_S.asfreq('W'), ival_S_to_W)
self.assertEqual(ival_S_end_of_week.asfreq('W'), ival_S_to_W)
self.assertEqual(ival_S.asfreq('D'), ival_S_to_D)
self.assertEqual(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
self.assertEqual(ival_S.asfreq('B'), ival_S_to_B)
self.assertEqual(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
self.assertEqual(ival_S.asfreq('H'), ival_S_to_H)
self.assertEqual(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
self.assertEqual(ival_S.asfreq('Min'), ival_S_to_T)
self.assertEqual(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
self.assertEqual(ival_S.asfreq('S'), ival_S)
def test_asfreq_mult(self):
# normal freq to mult freq
p = Period(freq='A', year=2007)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq)
expected = Period('2007', freq='3A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# ordinal will not change
for freq in ['3A', offsets.YearEnd(3)]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='3A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# mult freq to normal freq
p = Period(freq='3A', year=2007)
# ordinal will change because how=E is the default
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq)
expected = Period('2009', freq='A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# ordinal will not change
for freq in ['A', offsets.YearEnd()]:
result = p.asfreq(freq, how='S')
expected = Period('2007', freq='A')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
p = Period(freq='A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2007-12', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
p = Period(freq='3A', year=2007)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq)
expected = Period('2009-12', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
for freq in ['2M', offsets.MonthEnd(2)]:
result = p.asfreq(freq, how='S')
expected = Period('2007-01', freq='2M')
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
def test_asfreq_combined(self):
# normal freq to combined freq
p = Period('2007', freq='H')
# ordinal will not change
expected = Period('2007', freq='25H')
for freq, how in zip(['1D1H', '1H1D'], ['E', 'S']):
result = p.asfreq(freq, how=how)
self.assertEqual(result, expected)
self.assertEqual(result.ordinal, expected.ordinal)
self.assertEqual(result.freq, expected.freq)
# combined freq to normal freq
p1 = Period(freq='1D1H', year=2007)
p2 = Period(freq='1H1D', year=2007)
# ordinal will change because how=E is the default
result1 = p1.asfreq('H')
result2 = p2.asfreq('H')
expected = Period('2007-01-02', freq='H')
self.assertEqual(result1, expected)
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freq, expected.freq)
self.assertEqual(result2, expected)
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freq, expected.freq)
# ordinal will not change
result1 = p1.asfreq('H', how='S')
result2 = p2.asfreq('H', how='S')
expected = Period('2007-01-01', freq='H')
self.assertEqual(result1, expected)
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freq, expected.freq)
self.assertEqual(result2, expected)
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freq, expected.freq)
def test_is_leap_year(self):
# GH 13727
for freq in ['A', 'M', 'D', 'H']:
p = Period('2000-01-01 00:00:00', freq=freq)
self.assertTrue(p.is_leap_year)
self.assertIsInstance(p.is_leap_year, bool)
p = Period('1999-01-01 00:00:00', freq=freq)
self.assertFalse(p.is_leap_year)
p = Period('2004-01-01 00:00:00', freq=freq)
self.assertTrue(p.is_leap_year)
p = Period('2100-01-01 00:00:00', freq=freq)
self.assertFalse(p.is_leap_year)
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
tm.assertIsInstance(series, Series)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
self.assertRaises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_nano(self):
idx = period_range(start=Period(ordinal=1, freq='N'),
end=Period(ordinal=4, freq='N'), freq='N')
exp = PeriodIndex([Period(ordinal=1, freq='N'),
Period(ordinal=2, freq='N'),
Period(ordinal=3, freq='N'),
Period(ordinal=4, freq='N')], freq='N')
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
self.assert_numpy_array_equal(pindex.year, years)
self.assert_numpy_array_equal(pindex.quarter, quarters)
def test_constructor_invalid_quarters(self):
self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
self.assertRaises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
self.assertRaises(ValueError, PeriodIndex, start=start, end=end)
self.assertRaises(ValueError, PeriodIndex, start=start)
self.assertRaises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
self.assertRaises(ValueError, PeriodIndex, idx._values)
self.assertRaises(ValueError, PeriodIndex, list(idx._values))
self.assertRaises(ValueError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq='M')
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
self.assertTrue(result.freq, 'M')
result = PeriodIndex(idx, freq='2M')
tm.assert_index_equal(result, idx.asfreq('2M'))
self.assertTrue(result.freq, '2M')
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq('2M'))
self.assertTrue(result.freq, '2M')
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
self.assertRaises(ValueError, PeriodIndex, vals, freq='D')
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-03'], freq='M')
tm.assert_index_equal(idx, exp)
self.assertEqual(idx.dtype, 'period[M]')
idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]')
exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D')
tm.assert_index_equal(idx, exp)
self.assertEqual(idx.dtype, 'period[3D]')
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D')
res = PeriodIndex(idx, dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-01'], freq='M')
tm.assert_index_equal(res, exp)
self.assertEqual(res.dtype, 'period[M]')
res = PeriodIndex(idx, freq='M')
tm.assert_index_equal(res, exp)
self.assertEqual(res.dtype, 'period[M]')
msg = 'specified freq and dtype are different'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
PeriodIndex(['2011-01'], freq='M', dtype='period[D]')
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq='M')
tm.assertIsInstance(idx, PeriodIndex)
self.assertEqual(len(idx), 0)
self.assertEqual(idx.freq, 'M')
with tm.assertRaisesRegexp(ValueError, 'freq not specified'):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([pd.NaT, pd.NaT,
Period('2011-01', freq='M'),
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
with tm.assertRaisesRegexp(ValueError, 'freq not specified'):
PeriodIndex([pd.NaT, pd.NaT])
with tm.assertRaisesRegexp(ValueError, 'freq not specified'):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with tm.assertRaisesRegexp(ValueError, 'freq not specified'):
PeriodIndex(['NaT', 'NaT'])
with tm.assertRaisesRegexp(ValueError, 'freq not specified'):
PeriodIndex(np.array(['NaT', 'NaT']))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')])
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')]))
# first element is pd.NaT
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
PeriodIndex([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')])
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')]))
def test_constructor_mixed(self):
idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT,
'2012-01-01'])
exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D')
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=2, freq='M')
result = idx._simple_new(idx, 'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new([pd.Period('2007-01', freq='M'),
pd.Period('2007-02', freq='M')],
'p', freq=idx.freq)
self.assert_index_equal(result, idx)
result = idx._simple_new(np.array([pd.Period('2007-01', freq='M'),
pd.Period('2007-02', freq='M')]),
'p', freq=idx.freq)
self.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq='M', name='p')
result = idx._simple_new(idx, name='p', freq='M')
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_floats(self):
# GH13079
for floats in [[1.1], np.array([1.1])]:
with self.assertRaises(TypeError):
pd.PeriodIndex._simple_new(floats, freq='M')
def test_shallow_copy_empty(self):
# GH13067
idx = PeriodIndex([], freq='M')
result = idx._shallow_copy()
expected = idx
tm.assert_index_equal(result, expected)
def test_constructor_nat(self):
self.assertRaises(ValueError, period_range, start='NaT',
end='2011-01-01', freq='M')
self.assertRaises(ValueError, period_range, start='2011-01-01',
end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
for func in [PeriodIndex, period_range]:
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03',
'2014-05', '2014-07'], freq='2M')
tm.assert_index_equal(pidx, expected)
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05',
'2014-01-08', '2014-01-11',
'2014-01-14'], freq='3D')
tm.assert_index_equal(pidx, expected)
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with tm.assertRaisesRegexp(ValueError, msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assertRaisesRegexp(ValueError, msg):
period_range('2011-01', periods=3, freq='0M')
def test_constructor_freq_mult_dti_compat(self):
import itertools
mults = [1, 2, 3, 4, 5]
freqs = ['A', 'M', 'D', 'T', 'S']
for mult, freq in itertools.product(mults, freqs):
freqstr = str(mult) + freq
pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr,
periods=10).to_period(freqstr)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ['1D1H', '1H1D']:
pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'],
freq='25H')
for freq, func in zip(['1D1H', '1H1D'], [PeriodIndex, period_range]):
pidx = func(start='2016-01-01', periods=2, freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'],
freq='25H')
tm.assert_index_equal(pidx, expected)
def test_dtype_str(self):
pi = pd.PeriodIndex([], freq='M')
self.assertEqual(pi.dtype_str, 'period[M]')
self.assertEqual(pi.dtype_str, str(pi.dtype))
pi = pd.PeriodIndex([], freq='3M')
self.assertEqual(pi.dtype_str, 'period[3M]')
self.assertEqual(pi.dtype_str, str(pi.dtype))
def test_view_asi8(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.view('i8'), exp)
tm.assert_numpy_array_equal(idx.asi8, exp)
def test_values(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=np.object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([492, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
tm.assert_numpy_array_equal(idx.values, exp)
tm.assert_numpy_array_equal(idx.get_values(), exp)
exp = np.array([14975, -9223372036854775808], dtype=np.int64)
tm.assert_numpy_array_equal(idx._values, exp)
def test_asobject_like(self):
idx = pd.PeriodIndex([], freq='M')
exp = np.array([], dtype=object)
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
idx = pd.PeriodIndex(['2011-01', pd.NaT], freq='M')
exp = np.array([pd.Period('2011-01', freq='M'), pd.NaT], dtype=object)
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
exp = np.array([pd.Period('2011-01-01', freq='D'), pd.NaT],
dtype=object)
idx = pd.PeriodIndex(['2011-01-01', pd.NaT], freq='D')
tm.assert_numpy_array_equal(idx.asobject.values, exp)
tm.assert_numpy_array_equal(idx._mpl_repr(), exp)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
self.assertEqual(index.is_(index), True)
self.assertEqual(index.is_(create_index()), False)
self.assertEqual(index.is_(index.view()), True)
self.assertEqual(
index.is_(index.view().view().view().view().view()), True)
self.assertEqual(index.view().is_(index), True)
ind2 = index.view()
index.name = "Apple"
self.assertEqual(ind2.is_(index), True)
self.assertEqual(index.is_(index[:]), False)
self.assertEqual(index.is_(index.asfreq('M')), False)
self.assertEqual(index.is_(index.asfreq('A')), False)
self.assertEqual(index.is_(index - 2), False)
self.assertEqual(index.is_(index - 0), False)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
self.assert_numpy_array_equal(result, exp)
def test_getitem_index(self):
idx = period_range('2007-01', periods=10, freq='M', name='x')
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(['2007-02', '2007-04', '2007-06'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False,
True, True, False, False, False]]
exp = pd.PeriodIndex(['2007-01', '2007-02', '2007-06', '2007-07'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
self.assertRaises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
self.assertTrue((result.index.year == 2008).all())
result = ts['2008':'2009']
self.assertEqual(len(result), 24)
result = ts['2008-1':'2009-12']
self.assertEqual(len(result), 24)
result = ts['2008Q1':'2009Q4']
self.assertEqual(len(result), 24)
result = ts[:'2009']
self.assertEqual(len(result), 36)
result = ts['2009':]
self.assertEqual(len(result), 50 - 24)
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
self.assertRaisesRegexp(KeyError,
"left slice bound for non-unique "
"label: '2008'",
ts.__getitem__, slice('2008', '2009'))
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
self.assertEqual(idx[0], pd.Period('2011-01', freq='M'))
self.assertIs(idx[1], tslib.NaT)
s = pd.Series([0, 1, 2], index=idx)
self.assertEqual(s[pd.NaT], 1)
s = pd.Series(idx, index=idx)
self.assertEqual(s[pd.Period('2011-01', freq='M')],
pd.Period('2011-01', freq='M'))
self.assertIs(s[pd.NaT], tslib.NaT)
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start='2012-01-01', periods=10, freq='D')
ts = Series(lrange(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period('2012-01-02', freq='D')]], exp)
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Period('2014-10')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Period('2014-10'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02':'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):Period('2014-10'):-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02':Period('2014-10'):-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):'2014-10':-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10':'2015-02':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
def test_contains(self):
rng = period_range('2007-01', freq='M', periods=10)
self.assertTrue(Period('2007-01', freq='M') in rng)
self.assertFalse(Period('2007-01', freq='D') in rng)
self.assertFalse(Period('2007-01', freq='2M') in rng)
def test_contains_nat(self):
# GH13582
idx = period_range('2007-01', freq='M', periods=10)
self.assertFalse(pd.NaT in idx)
self.assertFalse(None in idx)
self.assertFalse(float('nan') in idx)
self.assertFalse(np.nan in idx)
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
self.assertTrue(pd.NaT in idx)
self.assertTrue(None in idx)
self.assertTrue(float('nan') in idx)
self.assertTrue(np.nan in idx)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_periods_number_check(self):
with tm.assertRaises(ValueError):
period_range('2011-1-1', '2012-1-1', 'B')
def test_tolist(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
rs = index.tolist()
[tm.assertIsInstance(x, Period) for x in rs]
recon = PeriodIndex(rs)
tm.assert_index_equal(index, recon)
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp(how='end')
tm.assert_index_equal(result.index, exp_index)
self.assertEqual(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = series.to_timestamp(how='start')
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59',
freq='H')
result = series.to_timestamp(how='end')
tm.assert_index_equal(result.index, exp_index)
self.assertEqual(result.name, 'foo')
def test_to_timestamp_quarterly_bug(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
stamps = pindex.to_timestamp('D', 'end')
expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex])
tm.assert_index_equal(stamps, expected)
def test_to_timestamp_preserve_name(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009',
name='foo')
self.assertEqual(index.name, 'foo')
conv = index.to_timestamp('D')
self.assertEqual(conv.name, 'foo')
def test_to_timestamp_repr_is_code(self):
zs = [Timestamp('99-04-17 00:00:00', tz='UTC'),
Timestamp('2001-04-17 00:00:00', tz='UTC'),
Timestamp('2001-04-17 00:00:00', tz='America/Los_Angeles'),
Timestamp('2001-04-17 00:00:00', tz=None)]
for z in zs:
self.assertEqual(eval(repr(z)), z)
def test_to_timestamp_pi_nat(self):
# GH 7228
index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M',
name='idx')
result = index.to_timestamp('D')
expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1),
datetime(2011, 2, 1)], name='idx')
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, 'idx')
result2 = result.to_period(freq='M')
tm.assert_index_equal(result2, index)
self.assertEqual(result2.name, 'idx')
result3 = result.to_period(freq='3M')
exp = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='3M', name='idx')
self.assert_index_equal(result3, exp)
self.assertEqual(result3.freqstr, '3M')
msg = ('Frequency must be positive, because it'
' represents span: -2A')
with tm.assertRaisesRegexp(ValueError, msg):
result.to_period(freq='-2A')
def test_to_timestamp_pi_mult(self):
idx = PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='2M', name='idx')
result = idx.to_timestamp()
expected = DatetimeIndex(
['2011-01-01', 'NaT', '2011-02-01'], name='idx')
self.assert_index_equal(result, expected)
result = idx.to_timestamp(how='E')
expected = DatetimeIndex(
['2011-02-28', 'NaT', '2011-03-31'], name='idx')
self.assert_index_equal(result, expected)
def test_to_timestamp_pi_combined(self):
idx = PeriodIndex(start='2011', periods=2, freq='1D1H', name='idx')
result = idx.to_timestamp()
expected = DatetimeIndex(
['2011-01-01 00:00', '2011-01-02 01:00'], name='idx')
self.assert_index_equal(result, expected)
result = idx.to_timestamp(how='E')
expected = DatetimeIndex(
['2011-01-02 00:59:59', '2011-01-03 01:59:59'], name='idx')
self.assert_index_equal(result, expected)
result = idx.to_timestamp(how='E', freq='H')
expected = DatetimeIndex(
['2011-01-02 00:00', '2011-01-03 01:00'], name='idx')
self.assert_index_equal(result, expected)
def test_to_timestamp_to_period_astype(self):
idx = DatetimeIndex([pd.NaT, '2011-01-01', '2011-02-01'], name='idx')
res = idx.astype('period[M]')
exp = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx')
tm.assert_index_equal(res, exp)
res = idx.astype('period[3M]')
exp = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='3M', name='idx')
self.assert_index_equal(res, exp)
def test_start_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='MS')
tm.assert_index_equal(index.start_time, expected_index)
def test_end_time(self):
index = PeriodIndex(freq='M', start='2016-01-01', end='2016-05-31')
expected_index = date_range('2016-01-01', end='2016-05-31', freq='M')
tm.assert_index_equal(index.end_time, expected_index)
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.ix[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
tm.assert_series_equal(ts, df.ix[:, 0])
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
self.assertEqual(expected, result)
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5, name='index')
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
tm.assert_index_equal(rs, rng, check_names=False)
self.assertEqual(rs.name, 'Index')
self.assertEqual(rng.name, 'index')
rs = df.reset_index().set_index('index')
tm.assertIsInstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.set_index(idx2)
tm.assert_index_equal(df.index, idx2)
def test_frame_to_time_stamp(self):
K = 5
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
df = DataFrame(randn(len(index), K), index=index)
df['mix'] = 'a'
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end')
tm.assert_index_equal(result.index, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start')
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
result = df.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
# columns
df = df.T
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end', axis=1)
tm.assert_index_equal(result.columns, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start', axis=1)
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end', axis=1)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end', axis=1)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.columns, exp_index)
result = df.to_timestamp('S', 'end', axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.columns, exp_index)
# invalid axis
tm.assertRaisesRegexp(ValueError, 'axis', df.to_timestamp, axis=2)
result1 = df.to_timestamp('5t', axis=1)
result2 = df.to_timestamp('t', axis=1)
expected = pd.date_range('2001-01-01', '2009-01-01', freq='AS')
self.assertTrue(isinstance(result1.columns, DatetimeIndex))
self.assertTrue(isinstance(result2.columns, DatetimeIndex))
self.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
self.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
self.assertEqual(result1.columns.freqstr, 'AS-JAN')
self.assertEqual(result2.columns.freqstr, 'AS-JAN')
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
tm.assert_series_equal(result, expected)
result[:] = 1
self.assertTrue((ts[1:3] == 1).all())
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
tm.assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
self.assert_index_equal(idx.unique(), expected)
self.assertEqual(idx.nunique(), 3)
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN',
tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN',
tz='US/Eastern')
self.assert_index_equal(idx.unique(), expected)
self.assertEqual(idx.nunique(), 3)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
self.assertEqual(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
self.assertEqual(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
self.assertEqual(len(pi), 12 * 9)
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
self.assertEqual(len(pi), 365 * 9 + 2)
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
self.assertEqual(len(pi), 261 * 9)
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
self.assertEqual(len(pi), 365 * 24)
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
self.assertEqual(len(pi), 24 * 60)
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
self.assertEqual(len(pi), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
self.assertEqual(len(i1), 20)
self.assertEqual(i1.freq, start.freq)
self.assertEqual(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), 10)
self.assertEqual(i1.freq, end_intv.freq)
self.assertEqual(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
self.assertEqual(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
self.assertEqual(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
self.assertEqual(len(i2), 2)
self.assertEqual(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
self.assertEqual(len(i2), 2)
self.assertEqual(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_numpy_repeat(self):
index = period_range('20010101', periods=2)
expected = PeriodIndex([Period('2001-01-01'), Period('2001-01-01'),
Period('2001-01-02'), Period('2001-01-02')])
tm.assert_index_equal(np.repeat(index, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.repeat, index, 2, axis=1)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
tm.assert_index_equal(pi1.shift(0), pi1)
self.assertEqual(len(pi1), len(pi2))
self.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
self.assertEqual(len(pi1), len(pi2))
self.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
self.assertEqual(len(pi1), len(pi2))
self.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
self.assertEqual(len(pi1), len(pi2))
self.assert_index_equal(pi1.shift(-1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
self.assertEqual(len(pi1), len(pi2))
self.assert_index_equal(pi1.shift(1), pi2)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
self.assertEqual(len(pi1), len(pi2))
self.assert_index_equal(pi1.shift(-1), pi2)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT',
'2011-05'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
def test_shift_ndarray(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, 2, 3, 4]))
expected = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
result = idx.shift(np.array([1, -2, 3, -4]))
expected = PeriodIndex(['2011-02', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
tm.assert_index_equal(result, expected)
def test_asfreq(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
pi3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001')
pi4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001')
pi5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00')
pi6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00')
pi7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00')
self.assertEqual(pi1.asfreq('Q', 'S'), pi2)
self.assertEqual(pi1.asfreq('Q', 's'), pi2)
self.assertEqual(pi1.asfreq('M', 'start'), pi3)
self.assertEqual(pi1.asfreq('D', 'StarT'), pi4)
self.assertEqual(pi1.asfreq('H', 'beGIN'), pi5)
self.assertEqual(pi1.asfreq('Min', 'S'), pi6)
self.assertEqual(pi1.asfreq('S', 'S'), pi7)
self.assertEqual(pi2.asfreq('A', 'S'), pi1)
self.assertEqual(pi2.asfreq('M', 'S'), pi3)
self.assertEqual(pi2.asfreq('D', 'S'), pi4)
self.assertEqual(pi2.asfreq('H', 'S'), pi5)
self.assertEqual(pi2.asfreq('Min', 'S'), pi6)
self.assertEqual(pi2.asfreq('S', 'S'), pi7)
self.assertEqual(pi3.asfreq('A', 'S'), pi1)
self.assertEqual(pi3.asfreq('Q', 'S'), pi2)
self.assertEqual(pi3.asfreq('D', 'S'), pi4)
self.assertEqual(pi3.asfreq('H', 'S'), pi5)
self.assertEqual(pi3.asfreq('Min', 'S'), pi6)
self.assertEqual(pi3.asfreq('S', 'S'), pi7)
self.assertEqual(pi4.asfreq('A', 'S'), pi1)
self.assertEqual(pi4.asfreq('Q', 'S'), pi2)
self.assertEqual(pi4.asfreq('M', 'S'), pi3)
self.assertEqual(pi4.asfreq('H', 'S'), pi5)
self.assertEqual(pi4.asfreq('Min', 'S'), pi6)
self.assertEqual(pi4.asfreq('S', 'S'), pi7)
self.assertEqual(pi5.asfreq('A', 'S'), pi1)
self.assertEqual(pi5.asfreq('Q', 'S'), pi2)
self.assertEqual(pi5.asfreq('M', 'S'), pi3)
self.assertEqual(pi5.asfreq('D', 'S'), pi4)
self.assertEqual(pi5.asfreq('Min', 'S'), pi6)
self.assertEqual(pi5.asfreq('S', 'S'), pi7)
self.assertEqual(pi6.asfreq('A', 'S'), pi1)
self.assertEqual(pi6.asfreq('Q', 'S'), pi2)
self.assertEqual(pi6.asfreq('M', 'S'), pi3)
self.assertEqual(pi6.asfreq('D', 'S'), pi4)
self.assertEqual(pi6.asfreq('H', 'S'), pi5)
self.assertEqual(pi6.asfreq('S', 'S'), pi7)
self.assertEqual(pi7.asfreq('A', 'S'), pi1)
self.assertEqual(pi7.asfreq('Q', 'S'), pi2)
self.assertEqual(pi7.asfreq('M', 'S'), pi3)
self.assertEqual(pi7.asfreq('D', 'S'), pi4)
self.assertEqual(pi7.asfreq('H', 'S'), pi5)
self.assertEqual(pi7.asfreq('Min', 'S'), pi6)
self.assertRaises(ValueError, pi7.asfreq, 'T', 'foo')
result1 = pi1.asfreq('3M')
result2 = pi1.asfreq('M')
expected = PeriodIndex(freq='M', start='2001-12', end='2001-12')
self.assert_numpy_array_equal(result1.asi8, expected.asi8)
self.assertEqual(result1.freqstr, '3M')
self.assert_numpy_array_equal(result2.asi8, expected.asi8)
self.assertEqual(result2.freqstr, 'M')
def test_asfreq_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M')
result = idx.asfreq(freq='Q')
expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q')
tm.assert_index_equal(result, expected)
def test_asfreq_mult_pi(self):
pi = PeriodIndex(['2001-01', '2001-02', 'NaT', '2001-03'], freq='2M')
for freq in ['D', '3D']:
result = pi.asfreq(freq)
exp = PeriodIndex(['2001-02-28', '2001-03-31', 'NaT',
'2001-04-30'], freq=freq)
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
result = pi.asfreq(freq, how='S')
exp = PeriodIndex(['2001-01-01', '2001-02-01', 'NaT',
'2001-03-01'], freq=freq)
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
def test_asfreq_combined_pi(self):
pi = pd.PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00', 'NaT'],
freq='H')
exp = PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00', 'NaT'],
freq='25H')
for freq, how in zip(['1D1H', '1H1D'], ['S', 'E']):
result = pi.asfreq(freq, how=how)
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
for freq in ['1D1H', '1H1D']:
pi = pd.PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00',
'NaT'], freq=freq)
result = pi.asfreq('H')
exp = PeriodIndex(['2001-01-02 00:00', '2001-01-03 02:00', 'NaT'],
freq='H')
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
pi = pd.PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00',
'NaT'], freq=freq)
result = pi.asfreq('H', how='S')
exp = PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00', 'NaT'],
freq='H')
self.assert_index_equal(result, exp)
self.assertEqual(result.freq, exp.freq)
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
self.assertEqual(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
self.assertEqual(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
self.assertEqual(len(pi), 12 * 9)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
self.assertEqual(len(i1), 20)
self.assertEqual(i1.freq, start.freq)
self.assertEqual(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), 10)
self.assertEqual(i1.freq, end_intv.freq)
self.assertEqual(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
self.assertEqual(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
self.assertEqual(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
self.assertEqual(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
self.assertEqual(len(i2), 2)
self.assertEqual(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
self.assertEqual(len(i2), 2)
self.assertEqual(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_asfreq_ts(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010')
ts = Series(np.random.randn(len(index)), index=index)
df = DataFrame(np.random.randn(len(index), 3), index=index)
result = ts.asfreq('D', how='end')
df_result = df.asfreq('D', how='end')
exp_index = index.asfreq('D', how='end')
self.assertEqual(len(result), len(ts))
tm.assert_index_equal(result.index, exp_index)
tm.assert_index_equal(df_result.index, exp_index)
result = ts.asfreq('D', how='start')
self.assertEqual(len(result), len(ts))
tm.assert_index_equal(result.index, index.asfreq('D', how='start'))
def test_badinput(self):
self.assertRaises(ValueError, Period, '-2000', 'A')
self.assertRaises(tslib.DateParseError, Period, '0', 'A')
self.assertRaises(tslib.DateParseError, Period, '1/1/-2000', 'A')
def test_negative_ordinals(self):
Period(ordinal=-1000, freq='A')
Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
tm.assert_index_equal(idx1, idx2)
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
pi1 = dti.to_period()
pi2 = dti.to_period(freq='D')
pi3 = dti.to_period(freq='3D')
self.assertEqual(pi1[0], Period('Jan 2005', freq='M'))
self.assertEqual(pi2[0], Period('1/31/2005', freq='D'))
self.assertEqual(pi3[0], Period('1/31/2005', freq='3D'))
self.assertEqual(pi1[-1], Period('Nov 2005', freq='M'))
self.assertEqual(pi2[-1], Period('11/30/2005', freq='D'))
self.assertEqual(pi3[-1], Period('11/30/2005', freq='3D'))
tm.assert_index_equal(pi1, period_range('1/1/2005', '11/1/2005',
freq='M'))
tm.assert_index_equal(pi2, period_range('1/1/2005', '11/1/2005',
freq='M').asfreq('D'))
tm.assert_index_equal(pi3, period_range('1/1/2005', '11/1/2005',
freq='M').asfreq('3D'))
def test_pindex_slice_index(self):
pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(pi)), index=pi)
res = s['2010']
exp = s[0:12]
tm.assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
tm.assert_series_equal(res, exp)
def test_getitem_day(self):
# GH 6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01'], s[0:31])
tm.assert_series_equal(s['2013/02'], s[31:59])
tm.assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(KeyError):
s[v]
def test_range_slice_day(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
# changed to TypeError in 1.12
# https://github.com/numpy/numpy/pull/6271
exc = IndexError if _np_version_under1p12 else TypeError
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
with tm.assertRaises(exc):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/02':], s[1:])
tm.assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5])
tm.assert_series_equal(s['2013/02':], s[31:])
tm.assert_series_equal(s['2014':], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(exc):
idx[v:]
def test_getitem_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S',
periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
tm.assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
tm.assert_series_equal(s[d], s)
def test_range_slice_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S',
periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
# changed to TypeError in 1.12
# https://github.com/numpy/numpy/pull/6271
exc = IndexError if _np_version_under1p12 else TypeError
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
with tm.assertRaises(exc):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'],
s[300:660])
tm.assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'],
s[3600:3960])
tm.assert_series_equal(s['2013/01/01 10H':], s[3600:])
tm.assert_series_equal(s[:'2013/01/01 09:30'], s[:1860])
for d in ['2013/01/01', '2013/01', '2013']:
tm.assert_series_equal(s[d:], s)
def test_range_slice_outofbounds(self):
# GH 5407
didx = DatetimeIndex(start='2013/10/01', freq='D', periods=10)
pidx = PeriodIndex(start='2013/10/01', freq='D', periods=10)
for idx in [didx, pidx]:
df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
empty = DataFrame(index=idx.__class__([], freq='D'),
columns=['units'])
empty['units'] = empty['units'].astype('int64')
tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty)
tm.assert_frame_equal(df['2013-06':'2013-09'], empty)
tm.assert_frame_equal(df['2013-11':'2013-12'], empty)
def test_astype_asfreq(self):
pi1 = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01'], freq='D')
exp = PeriodIndex(['2011-01', '2011-02', '2011-03'], freq='M')
tm.assert_index_equal(pi1.asfreq('M'), exp)
tm.assert_index_equal(pi1.astype('period[M]'), exp)
exp = PeriodIndex(['2011-01', '2011-02', '2011-03'], freq='3M')
tm.assert_index_equal(pi1.asfreq('3M'), exp)
tm.assert_index_equal(pi1.astype('period[3M]'), exp)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2012-03', '2012-04'], freq='D')
exp = np.array([2011, 2011, -1, 2012, 2012], dtype=np.int64)
self.assert_numpy_array_equal(idx.year, exp)
exp = np.array([1, 2, -1, 3, 4], dtype=np.int64)
self.assert_numpy_array_equal(idx.month, exp)
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
self.assertEqual(s['05Q4'], s[2])
def test_period_dt64_round_trip(self):
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period()
tm.assert_index_equal(pi.to_timestamp(), dti)
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period(freq='H')
tm.assert_index_equal(pi.to_timestamp(), dti)
def test_period_astype_to_timestamp(self):
pi = pd.PeriodIndex(['2011-01', '2011-02', '2011-03'], freq='M')
exp = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'])
tm.assert_index_equal(pi.astype('datetime64[ns]'), exp)
exp = pd.DatetimeIndex(['2011-01-31', '2011-02-28', '2011-03-31'])
tm.assert_index_equal(pi.astype('datetime64[ns]', how='end'), exp)
exp = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
tz='US/Eastern')
res = pi.astype('datetime64[ns, US/Eastern]')
tm.assert_index_equal(pi.astype('datetime64[ns, US/Eastern]'), exp)
exp = pd.DatetimeIndex(['2011-01-31', '2011-02-28', '2011-03-31'],
tz='US/Eastern')
res = pi.astype('datetime64[ns, US/Eastern]', how='end')
tm.assert_index_equal(res, exp)
def test_to_period_quarterly(self):
# make sure we can make the round trip
for month in MONTHS:
freq = 'Q-%s' % month
rng = period_range('1989Q3', '1991Q3', freq=freq)
stamps = rng.to_timestamp()
result = stamps.to_period(freq)
tm.assert_index_equal(rng, result)
def test_to_period_quarterlyish(self):
offsets = ['BQ', 'QS', 'BQS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'Q-DEC')
def test_to_period_annualish(self):
offsets = ['BA', 'AS', 'BAS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'A-DEC')
def test_to_period_monthish(self):
offsets = ['MS', 'BM']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
rng = date_range('01-Jan-2012', periods=8, freq='M')
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
date_range('01-Jan-2012', periods=8, freq='EOM')
def test_multiples(self):
result1 = Period('1989', freq='2A')
result2 = Period('1989', freq='A')
self.assertEqual(result1.ordinal, result2.ordinal)
self.assertEqual(result1.freqstr, '2A-DEC')
self.assertEqual(result2.freqstr, 'A-DEC')
self.assertEqual(result1.freq, offsets.YearEnd(2))
self.assertEqual(result2.freq, offsets.YearEnd())
self.assertEqual((result1 + 1).ordinal, result1.ordinal + 2)
self.assertEqual((1 + result1).ordinal, result1.ordinal + 2)
self.assertEqual((result1 - 1).ordinal, result2.ordinal - 2)
self.assertEqual((-1 + result1).ordinal, result2.ordinal - 2)
def test_pindex_multiples(self):
pi = PeriodIndex(start='1/1/11', end='12/31/11', freq='2M')
expected = PeriodIndex(['2011-01', '2011-03', '2011-05', '2011-07',
'2011-09', '2011-11'], freq='2M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
pi = period_range(start='1/1/11', end='12/31/11', freq='2M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
pi = period_range(start='1/1/11', periods=6, freq='2M')
tm.assert_index_equal(pi, expected)
self.assertEqual(pi.freq, offsets.MonthEnd(2))
self.assertEqual(pi.freqstr, '2M')
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
tm.assertIsInstance(result[0], Period)
self.assertEqual(result[0].freq, index.freq)
def test_take(self):
index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D',
name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
tm.assertIsInstance(taken, PeriodIndex)
self.assertEqual(taken.freq, index.freq)
self.assertEqual(taken.name, expected.name)
def test_take_fill_value(self):
# GH 12631
idx = pd.PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx', freq='D')
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
idx.take(np.array([1, -5]))
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assertIsInstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(
3, 2, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_align_series(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
# it works!
for kind in ['inner', 'outer', 'left', 'right']:
ts.align(ts[::2], join=kind)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
ts + ts.asfreq('D', how="end")
def test_align_frame(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_frame_equal(result, expected)
def test_union(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
tm.assert_index_equal(result, index)
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
tm.assert_index_equal(result, index)
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with tm.assertRaises(period.IncompatibleFrequency):
index.union(index2)
msg = 'can only call with other PeriodIndex-ed objects'
with tm.assertRaisesRegexp(ValueError, msg):
index.join(index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with tm.assertRaises(period.IncompatibleFrequency):
index.join(index3)
def test_union_dataframe_index(self):
rng1 = pd.period_range('1/1/1999', '1/1/2012', freq='M')
s1 = pd.Series(np.random.randn(len(rng1)), rng1)
rng2 = pd.period_range('1/1/1980', '12/1/2001', freq='M')
s2 = pd.Series(np.random.randn(len(rng2)), rng2)
df = pd.DataFrame({'s1': s1, 's2': s2})
exp = pd.period_range('1/1/1980', '1/1/2012', freq='M')
self.assert_index_equal(df.index, exp)
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
tm.assert_index_equal(result, index[10:-5])
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).sort_values()
tm.assert_index_equal(result, index[10:-5])
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with tm.assertRaises(period.IncompatibleFrequency):
index.intersection(index2)
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with tm.assertRaises(period.IncompatibleFrequency):
index.intersection(index3)
def test_intersection_cases(self):
base = period_range('6/1/2000', '6/30/2000', freq='D', name='idx')
# if target has the same name, it is preserved
rng2 = period_range('5/15/2000', '6/20/2000', freq='D', name='idx')
expected2 = period_range('6/1/2000', '6/20/2000', freq='D',
name='idx')
# if target name is different, it will be reset
rng3 = period_range('5/15/2000', '6/20/2000', freq='D', name='other')
expected3 = period_range('6/1/2000', '6/20/2000', freq='D',
name=None)
rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = PeriodIndex([], name='idx', freq='D')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
# non-monotonic
base = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
rng2 = PeriodIndex(['2011-01-04', '2011-01-02',
'2011-02-02', '2011-02-03'],
freq='D', name='idx')
expected2 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name='idx')
rng3 = PeriodIndex(['2011-01-04', '2011-01-02', '2011-02-02',
'2011-02-03'],
freq='D', name='other')
expected3 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name=None)
rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = PeriodIndex([], freq='D', name='idx')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, 'D')
# empty same freq
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
self.assertEqual(len(result), 0)
result = rng.intersection(rng[0:0])
self.assertEqual(len(result), 0)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'dayofweek', 'weekday', 'dayofyear',
'quarter', 'qyear', 'days_in_month', 'is_leap_year']
periods = list(periodindex)
s = pd.Series(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
self.assertEqual(len(periodindex), len(field_idx))
for x, val in zip(periods, field_idx):
self.assertEqual(getattr(x, field), val)
if len(s) == 0:
continue
field_s = getattr(s.dt, field)
self.assertEqual(len(periodindex), len(field_s))
for x, val in zip(periods, field_s):
self.assertEqual(getattr(x, field), val)
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2006, 2007], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2005, 2005, 2007], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2005, 2006], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2006, 2005, 2005], freq='A')
self.assertRaises(ValueError, getattr, index, 'is_full')
self.assertTrue(index[:0].is_full)
def test_map(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x + 1)
expected = index + 1
tm.assert_index_equal(result, expected)
result = index.map(lambda x: x.ordinal)
exp = np.array([x.ordinal for x in index], dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if PY3:
# unicode
types += text_type,
for t in types:
expected = np.array(lmap(t, raw), dtype=object)
res = index.map(t)
# should return an array
tm.assertIsInstance(res, np.ndarray)
# preserve element types
self.assertTrue(all(isinstance(resi, t) for resi in res))
# dtype should be object
self.assertEqual(res.dtype, np.dtype('object').type)
# lastly, values should compare equal
tm.assert_numpy_array_equal(res, expected)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
tm.assertIsInstance(result, PeriodIndex)
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
tm.assertIsInstance(s.index.levels[0], PeriodIndex)
tm.assertIsInstance(s.index.values[0][0], Period)
def test_to_timestamp_1703(self):
index = period_range('1/1/2012', periods=4, freq='D')
result = index.to_timestamp()
self.assertEqual(result[0], Timestamp('1/1/2012'))
def test_to_datetime_depr(self):
index = period_range('1/1/2012', periods=4, freq='D')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = index.to_datetime()
self.assertEqual(result[0], Timestamp('1/1/2012'))
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
self.assertRaises(KeyError, idx.get_loc, bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
self.assertEqual(inst.args[0], bad_period)
def test_get_loc_nat(self):
didx = DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03'])
pidx = PeriodIndex(['2011-01-01', 'NaT', '2011-01-03'], freq='M')
# check DatetimeIndex compat
for idx in [didx, pidx]:
self.assertEqual(idx.get_loc(pd.NaT), 1)
self.assertEqual(idx.get_loc(None), 1)
self.assertEqual(idx.get_loc(float('nan')), 1)
self.assertEqual(idx.get_loc(np.nan), 1)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
tm.assertIsInstance(result.index, PeriodIndex)
self.assertEqual(result.index[0], s1.index[0])
def test_pickle_freq(self):
# GH2891
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = self.round_trip_pickle(prng)
self.assertEqual(new_prng.freq, offsets.MonthEnd())
self.assertEqual(new_prng.freqstr, 'M')
def test_slice_keep_name(self):
idx = period_range('20010101', periods=10, freq='D', name='bob')
self.assertEqual(idx.name, idx[1:].name)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
def test_recreate_from_data(self):
for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']:
org = PeriodIndex(start='2001/04/01', freq=o, periods=1)
idx = PeriodIndex(org.values, freq=o)
tm.assert_index_equal(idx, org)
def test_combine_first(self):
# GH 3367
didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M')
pidx = pd.PeriodIndex(start=pd.Period('1950-1'),
end=pd.Period('1950-7'), freq='M')
# check to be consistent with DatetimeIndex
for idx in [didx, pidx]:
a = pd.Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
b = pd.Series([9, 9, 9, 9, 9, 9, 9], index=idx)
result = a.combine_first(b)
expected = pd.Series([1, 9, 9, 4, 5, 9, 7], index=idx,
dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_searchsorted(self):
for freq in ['D', '2D']:
pidx = pd.PeriodIndex(['2014-01-01', '2014-01-02', '2014-01-03',
'2014-01-04', '2014-01-05'], freq=freq)
p1 = pd.Period('2014-01-01', freq=freq)
self.assertEqual(pidx.searchsorted(p1), 0)
p2 = pd.Period('2014-01-04', freq=freq)
self.assertEqual(pidx.searchsorted(p2), 3)
msg = "Input has different freq=H from PeriodIndex"
with self.assertRaisesRegexp(period.IncompatibleFrequency, msg):
pidx.searchsorted(pd.Period('2014-01-01', freq='H'))
msg = "Input has different freq=5D from PeriodIndex"
with self.assertRaisesRegexp(period.IncompatibleFrequency, msg):
pidx.searchsorted(pd.Period('2014-01-01', freq='5D'))
def test_round_trip(self):
p = Period('2000Q1')
new_p = self.round_trip_pickle(p)
self.assertEqual(new_p, p)
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestMethods(tm.TestCase):
def test_add(self):
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
self.assertEqual(dt1 + 1, dt2)
self.assertEqual(1 + dt1, dt2)
def test_add_pdnat(self):
p = pd.Period('2011-01', freq='M')
self.assertIs(p + pd.NaT, pd.NaT)
self.assertIs(pd.NaT + p, pd.NaT)
p = pd.Period('NaT', freq='M')
self.assertIs(p + pd.NaT, pd.NaT)
self.assertIs(pd.NaT + p, pd.NaT)
def test_add_raises(self):
# GH 4731
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
msg = r"unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + "str"
msg = r"unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
"str" + dt1
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + dt2
def test_sub(self):
dt1 = Period('2011-01-01', freq='D')
dt2 = Period('2011-01-15', freq='D')
self.assertEqual(dt1 - dt2, -14)
self.assertEqual(dt2 - dt1, 14)
msg = r"Input has different freq=M from Period\(freq=D\)"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
dt1 - pd.Period('2011-02', freq='M')
def test_add_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
exp = Period('2013', freq=freq)
self.assertEqual(p + offsets.YearEnd(2), exp)
self.assertEqual(offsets.YearEnd(2) + p, exp)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with tm.assertRaises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
with tm.assertRaises(period.IncompatibleFrequency):
o + p
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
exp = Period('2011-05', freq=freq)
self.assertEqual(p + offsets.MonthEnd(2), exp)
self.assertEqual(offsets.MonthEnd(2) + p, exp)
exp = Period('2012-03', freq=freq)
self.assertEqual(p + offsets.MonthEnd(12), exp)
self.assertEqual(offsets.MonthEnd(12) + p, exp)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with tm.assertRaises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
with tm.assertRaises(period.IncompatibleFrequency):
o + p
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
exp = Period('2011-04-06', freq=freq)
self.assertEqual(p + offsets.Day(5), exp)
self.assertEqual(offsets.Day(5) + p, exp)
exp = Period('2011-04-02', freq=freq)
self.assertEqual(p + offsets.Hour(24), exp)
self.assertEqual(offsets.Hour(24) + p, exp)
exp = Period('2011-04-03', freq=freq)
self.assertEqual(p + np.timedelta64(2, 'D'), exp)
with tm.assertRaises(TypeError):
np.timedelta64(2, 'D') + p
exp = Period('2011-04-02', freq=freq)
self.assertEqual(p + np.timedelta64(3600 * 24, 's'), exp)
with tm.assertRaises(TypeError):
np.timedelta64(3600 * 24, 's') + p
exp = Period('2011-03-30', freq=freq)
self.assertEqual(p + timedelta(-2), exp)
self.assertEqual(timedelta(-2) + p, exp)
exp = Period('2011-04-03', freq=freq)
self.assertEqual(p + timedelta(hours=48), exp)
self.assertEqual(timedelta(hours=48) + p, exp)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
with tm.assertRaises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
with tm.assertRaises(period.IncompatibleFrequency):
o + p
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
exp = Period('2011-04-03 09:00', freq=freq)
self.assertEqual(p + offsets.Day(2), exp)
self.assertEqual(offsets.Day(2) + p, exp)
exp = Period('2011-04-01 12:00', freq=freq)
self.assertEqual(p + offsets.Hour(3), exp)
self.assertEqual(offsets.Hour(3) + p, exp)
exp = Period('2011-04-01 12:00', freq=freq)
self.assertEqual(p + np.timedelta64(3, 'h'), exp)
with tm.assertRaises(TypeError):
np.timedelta64(3, 'h') + p
exp = Period('2011-04-01 10:00', freq=freq)
self.assertEqual(p + np.timedelta64(3600, 's'), exp)
with tm.assertRaises(TypeError):
np.timedelta64(3600, 's') + p
exp = Period('2011-04-01 11:00', freq=freq)
self.assertEqual(p + timedelta(minutes=120), exp)
self.assertEqual(timedelta(minutes=120) + p, exp)
exp = Period('2011-04-05 12:00', freq=freq)
self.assertEqual(p + timedelta(days=4, minutes=180), exp)
self.assertEqual(timedelta(days=4, minutes=180) + p, exp)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
with tm.assertRaises(period.IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
with tm.assertRaises(period.IncompatibleFrequency):
o + p
def test_add_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
self.assertIs(p + o, tslib.NaT)
self.assertIs(o + p, tslib.NaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
self.assertIs(p + o, tslib.NaT)
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
self.assertIs(o + p, tslib.NaT)
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertIs(p + o, tslib.NaT)
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
self.assertIs(o + p, tslib.NaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
self.assertIs(p + o, tslib.NaT)
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
self.assertIs(o + p, tslib.NaT)
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2),
timedelta(hours=48)]:
self.assertIs(p + o, tslib.NaT)
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
self.assertIs(o + p, tslib.NaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
self.assertIs(p + o, tslib.NaT)
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
self.assertIs(o + p, tslib.NaT)
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertIs(p + o, tslib.NaT)
if not isinstance(o, np.timedelta64):
self.assertIs(o + p, tslib.NaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
self.assertIs(p + o, tslib.NaT)
if isinstance(o, np.timedelta64):
with tm.assertRaises(TypeError):
o + p
else:
self.assertIs(o + p, tslib.NaT)
def test_sub_pdnat(self):
# GH 13071
p = pd.Period('2011-01', freq='M')
self.assertIs(p - pd.NaT, pd.NaT)
self.assertIs(pd.NaT - p, pd.NaT)
p = pd.Period('NaT', freq='M')
self.assertIs(p - pd.NaT, pd.NaT)
self.assertIs(pd.NaT - p, pd.NaT)
def test_sub_offset(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('2011', freq=freq)
self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with tm.assertRaises(period.IncompatibleFrequency):
p - o
for freq in ['M', '2M', '3M']:
p = Period('2011-03', freq=freq)
self.assertEqual(p - offsets.MonthEnd(2),
Period('2011-01', freq=freq))
self.assertEqual(p - offsets.MonthEnd(12),
Period('2010-03', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
with tm.assertRaises(period.IncompatibleFrequency):
p - o
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('2011-04-01', freq=freq)
self.assertEqual(p - offsets.Day(5),
Period('2011-03-27', freq=freq))
self.assertEqual(p - offsets.Hour(24),
Period('2011-03-31', freq=freq))
self.assertEqual(p - np.timedelta64(2, 'D'),
Period('2011-03-30', freq=freq))
self.assertEqual(p - np.timedelta64(3600 * 24, 's'),
Period('2011-03-31', freq=freq))
self.assertEqual(p - timedelta(-2),
Period('2011-04-03', freq=freq))
self.assertEqual(p - timedelta(hours=48),
Period('2011-03-30', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
with tm.assertRaises(period.IncompatibleFrequency):
p - o
for freq in ['H', '2H', '3H']:
p = Period('2011-04-01 09:00', freq=freq)
self.assertEqual(p - offsets.Day(2),
Period('2011-03-30 09:00', freq=freq))
self.assertEqual(p - offsets.Hour(3),
Period('2011-04-01 06:00', freq=freq))
self.assertEqual(p - np.timedelta64(3, 'h'),
Period('2011-04-01 06:00', freq=freq))
self.assertEqual(p - np.timedelta64(3600, 's'),
Period('2011-04-01 08:00', freq=freq))
self.assertEqual(p - timedelta(minutes=120),
Period('2011-04-01 07:00', freq=freq))
self.assertEqual(p - timedelta(days=4, minutes=180),
Period('2011-03-28 06:00', freq=freq))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
with tm.assertRaises(period.IncompatibleFrequency):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
for freq in ['A', '2A', '3A']:
p = Period('NaT', freq=freq)
for o in [offsets.YearEnd(2)]:
self.assertIs(p - o, tslib.NaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
self.assertIs(p - o, tslib.NaT)
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertIs(p - o, tslib.NaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
self.assertIs(p - o, tslib.NaT)
# freq is Tick
for freq in ['D', '2D', '3D']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2),
timedelta(hours=48)]:
self.assertIs(p - o, tslib.NaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
self.assertIs(p - o, tslib.NaT)
for freq in ['H', '2H', '3H']:
p = Period('NaT', freq=freq)
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertIs(p - o, tslib.NaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1),
offsets.Minute(), np.timedelta64(3200, 's'),
timedelta(hours=23, minutes=30)]:
self.assertIs(p - o, tslib.NaT)
def test_nat_ops(self):
for freq in ['M', '2M', '3M']:
p = Period('NaT', freq=freq)
self.assertIs(p + 1, tslib.NaT)
self.assertIs(1 + p, tslib.NaT)
self.assertIs(p - 1, tslib.NaT)
self.assertIs(p - Period('2011-01', freq=freq), tslib.NaT)
self.assertIs(Period('2011-01', freq=freq) - p, tslib.NaT)
def test_period_ops_offset(self):
p = Period('2011-04-01', freq='D')
result = p + offsets.Day()
exp = pd.Period('2011-04-02', freq='D')
self.assertEqual(result, exp)
result = p - offsets.Day(2)
exp = pd.Period('2011-03-30', freq='D')
self.assertEqual(result, exp)
msg = r"Input cannot be converted to Period\(freq=D\)"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
p + offsets.Hour(2)
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
p - offsets.Hour(2)
class TestPeriodIndexSeriesMethods(tm.TestCase):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assertRaisesRegexp(TypeError, msg):
obj + ng
with tm.assertRaises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assertRaisesRegexp(TypeError, msg):
obj - ng
with tm.assertRaises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.add(ng, obj)
with tm.assertRaises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
self.assertIs(np.subtract(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj + offsets.Hour(2)
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
offsets.Hour(2) + obj
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj - offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, -11, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, 11, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
if _np_version_under1p10:
self.assertIs(result, NotImplemented)
else:
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_sub_pdnat(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
exp = pd.TimedeltaIndex([pd.NaT] * 4, name='idx')
tm.assert_index_equal(pd.NaT - idx, exp)
tm.assert_index_equal(idx - pd.NaT, exp)
def test_pi_sub_period_nat(self):
# GH 13071
idx = PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, np.nan, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, np.nan, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
f = lambda x: x == pd.Period('2011-03', freq='M')
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period('2011-03', freq='M')
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') != x
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period('2011-03', freq='M')
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(['2011-01', 'NaT', '2011-03',
'2011-04'], freq='M', name='idx')
f = lambda x: x == pd.Period('2011-03', freq='M')
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') == x
self._check(idx, f, exp)
f = lambda x: x == tslib.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: tslib.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period('2011-03', freq='M')
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') != x
self._check(idx, f, exp)
f = lambda x: x != tslib.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: tslib.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period('2011-03', freq='M') >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period('2011-03', freq='M')
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > tslib.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: tslib.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
class TestPeriodRepresentation(tm.TestCase):
"""
Wish to match NumPy units
"""
def test_annual(self):
self._check_freq('A', 1970)
def test_monthly(self):
self._check_freq('M', '1970-01')
def test_weekly(self):
self._check_freq('W-THU', '1970-01-01')
def test_daily(self):
self._check_freq('D', '1970-01-01')
def test_business_daily(self):
self._check_freq('B', '1970-01-01')
def test_hourly(self):
self._check_freq('H', '1970-01-01')
def test_minutely(self):
self._check_freq('T', '1970-01-01')
def test_secondly(self):
self._check_freq('S', '1970-01-01')
def test_millisecondly(self):
self._check_freq('L', '1970-01-01')
def test_microsecondly(self):
self._check_freq('U', '1970-01-01')
def test_nanosecondly(self):
self._check_freq('N', '1970-01-01')
def _check_freq(self, freq, base_date):
rng = PeriodIndex(start=base_date, periods=10, freq=freq)
exp = np.arange(10, dtype=np.int64)
self.assert_numpy_array_equal(rng._values, exp)
self.assert_numpy_array_equal(rng.asi8, exp)
def test_negone_ordinals(self):
freqs = ['A', 'M', 'Q', 'D', 'H', 'T', 'S']
period = Period(ordinal=-1, freq='D')
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
self.assertEqual(period.year, 1969)
period = Period(ordinal=-1, freq='B')
repr(period)
period = Period(ordinal=-1, freq='W')
repr(period)
class TestComparisons(tm.TestCase):
def setUp(self):
self.january1 = Period('2000-01', 'M')
self.january2 = Period('2000-01', 'M')
self.february = Period('2000-02', 'M')
self.march = Period('2000-03', 'M')
self.day = Period('2012-01-01', 'D')
def test_equal(self):
self.assertEqual(self.january1, self.january2)
def test_equal_Raises_Value(self):
with tm.assertRaises(period.IncompatibleFrequency):
self.january1 == self.day
def test_notEqual(self):
self.assertNotEqual(self.january1, 1)
self.assertNotEqual(self.january1, self.february)
def test_greater(self):
self.assertTrue(self.february > self.january1)
def test_greater_Raises_Value(self):
with tm.assertRaises(period.IncompatibleFrequency):
self.january1 > self.day
def test_greater_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
self.assertTrue(self.january1 >= self.january2)
def test_greaterEqual_Raises_Value(self):
with tm.assertRaises(period.IncompatibleFrequency):
self.january1 >= self.day
with tm.assertRaises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
self.assertTrue(self.january1 <= self.january2)
def test_smallerEqual_Raises_Value(self):
with tm.assertRaises(period.IncompatibleFrequency):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 <= 1
def test_smaller(self):
self.assertTrue(self.january1 < self.february)
def test_smaller_Raises_Value(self):
with tm.assertRaises(period.IncompatibleFrequency):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
self.assertEqual(sorted(periods), correctPeriods)
def test_period_nat_comp(self):
p_nat = Period('NaT', freq='D')
p = Period('2011-01-01', freq='D')
nat = pd.Timestamp('NaT')
t = pd.Timestamp('2011-01-01')
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat), (nat, t),
(t, nat), (nat, nat)]:
self.assertEqual(left < right, False)
self.assertEqual(left > right, False)
self.assertEqual(left == right, False)
self.assertEqual(left != right, True)
self.assertEqual(left <= right, False)
self.assertEqual(left >= right, False)
def test_pi_pi_comp(self):
for freq in ['M', '2M', '3M']:
base = PeriodIndex(['2011-01', '2011-02',
'2011-03', '2011-04'], freq=freq)
p = Period('2011-02', freq=freq)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base == p, exp)
self.assert_numpy_array_equal(p == base, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base != p, exp)
self.assert_numpy_array_equal(p != base, exp)
exp = np.array([False, False, True, True])
self.assert_numpy_array_equal(base > p, exp)
self.assert_numpy_array_equal(p < base, exp)
exp = np.array([True, False, False, False])
self.assert_numpy_array_equal(base < p, exp)
self.assert_numpy_array_equal(p > base, exp)
exp = np.array([False, True, True, True])
self.assert_numpy_array_equal(base >= p, exp)
self.assert_numpy_array_equal(p <= base, exp)
exp = np.array([True, True, False, False])
self.assert_numpy_array_equal(base <= p, exp)
self.assert_numpy_array_equal(p >= base, exp)
idx = PeriodIndex(['2011-02', '2011-01', '2011-03',
'2011-05'], freq=freq)
exp = np.array([False, False, True, False])
self.assert_numpy_array_equal(base == idx, exp)
exp = np.array([True, True, False, True])
self.assert_numpy_array_equal(base != idx, exp)
exp = np.array([False, True, False, False])
self.assert_numpy_array_equal(base > idx, exp)
exp = np.array([True, False, False, True])
self.assert_numpy_array_equal(base < idx, exp)
exp = np.array([False, True, True, False])
self.assert_numpy_array_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
self.assert_numpy_array_equal(base <= idx, exp)
# different base freq
msg = "Input has different freq=A-DEC from PeriodIndex"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
base <= Period('2011', freq='A')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
Period('2011', freq='A') >= base
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='A')
base <= idx
# different mult
msg = "Input has different freq=4M from PeriodIndex"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
base <= Period('2011', freq='4M')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
Period('2011', freq='4M') >= base
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
idx = PeriodIndex(['2011', '2012', '2013', '2014'], freq='4M')
base <= idx
def test_pi_nat_comp(self):
for freq in ['M', '2M', '3M']:
idx1 = PeriodIndex(
['2011-01', '2011-02', 'NaT', '2011-05'], freq=freq)
result = idx1 > Period('2011-02', freq=freq)
exp = np.array([False, False, False, True])
self.assert_numpy_array_equal(result, exp)
result = Period('2011-02', freq=freq) < idx1
self.assert_numpy_array_equal(result, exp)
result = idx1 == Period('NaT', freq=freq)
exp = np.array([False, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = Period('NaT', freq=freq) == idx1
self.assert_numpy_array_equal(result, exp)
result = idx1 != Period('NaT', freq=freq)
exp = np.array([True, True, True, True])
self.assert_numpy_array_equal(result, exp)
result = Period('NaT', freq=freq) != idx1
self.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04',
'NaT'], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
self.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
self.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
self.assert_numpy_array_equal(result, exp)
diff = PeriodIndex(['2011-02', '2011-01', '2011-04',
'NaT'], freq='4M')
msg = "Input has different freq=4M from PeriodIndex"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
idx1 > diff
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
idx1 == diff
class TestSeriesPeriod(tm.TestCase):
def setUp(self):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_auto_conversion(self):
series = Series(list(period_range('2000-01-01', periods=10, freq='D')))
self.assertEqual(series.dtype, 'object')
series = pd.Series([pd.Period('2011-01-01', freq='D'),
pd.Period('2011-02-01', freq='D')])
self.assertEqual(series.dtype, 'object')
def test_getitem(self):
self.assertEqual(self.series[1], pd.Period('2000-01-02', freq='D'))
result = self.series[[2, 4]]
exp = pd.Series([pd.Period('2000-01-03', freq='D'),
pd.Period('2000-01-05', freq='D')],
index=[2, 4])
self.assert_series_equal(result, exp)
self.assertEqual(result.dtype, 'object')
def test_constructor_cant_cast_period(self):
with tm.assertRaises(TypeError):
Series(period_range('2000-01-01', periods=10, freq='D'),
dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range('1/1/2000', periods=10), dtype=object)
exp = Series(period_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
def test_isnull(self):
# GH 13737
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('NaT', freq='M')])
tm.assert_series_equal(s.isnull(), Series([False, True]))
tm.assert_series_equal(s.notnull(), Series([True, False]))
def test_fillna(self):
# GH 13737
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('NaT', freq='M')])
res = s.fillna(pd.Period('2012-01', freq='M'))
exp = Series([pd.Period('2011-01', freq='M'),
pd.Period('2012-01', freq='M')])
tm.assert_series_equal(res, exp)
self.assertEqual(res.dtype, 'object')
res = s.fillna('XXX')
exp = Series([pd.Period('2011-01', freq='M'), 'XXX'])
tm.assert_series_equal(res, exp)
self.assertEqual(res.dtype, 'object')
def test_dropna(self):
# GH 13737
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('NaT', freq='M')])
tm.assert_series_equal(s.dropna(),
Series([pd.Period('2011-01', freq='M')]))
def test_series_comparison_scalars(self):
val = pd.Period('2000-01-04', freq='D')
result = self.series > val
expected = pd.Series([x > val for x in self.series])
tm.assert_series_equal(result, expected)
val = self.series[5]
result = self.series > val
expected = pd.Series([x > val for x in self.series])
tm.assert_series_equal(result, expected)
def test_between(self):
left, right = self.series[[2, 7]]
result = self.series.between(left, right)
expected = (self.series >= left) & (self.series <= right)
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# NaT support
"""
# ToDo: Enable when support period dtype
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype='period[D]')
val = series[3]
self.assertTrue(isnull(val))
series[2] = val
self.assertTrue(isnull(series[2]))
def test_NaT_cast(self):
result = Series([np.nan]).astype('period[D]')
expected = Series([NaT])
tm.assert_series_equal(result, expected)
"""
def test_set_none_nan(self):
# currently Period is stored as object dtype, not as NaT
self.series[3] = None
self.assertIs(self.series[3], None)
self.series[3:5] = None
self.assertIs(self.series[4], None)
self.series[5] = np.nan
self.assertTrue(np.isnan(self.series[5]))
self.series[5:7] = np.nan
self.assertTrue(np.isnan(self.series[6]))
def test_intercept_astype_object(self):
expected = self.series.astype('object')
df = DataFrame({'a': self.series,
'b': np.random.randn(len(self.series))})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
df = DataFrame({'a': self.series, 'b': ['foo'] * len(self.series)})
result = df.values.squeeze()
self.assertTrue((result[:, 0] == expected.values).all())
def test_ops_series_timedelta(self):
# GH 13043
s = pd.Series([pd.Period('2015-01-01', freq='D'),
pd.Period('2015-01-02', freq='D')], name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Period('2015-01-02', freq='D'),
pd.Period('2015-01-03', freq='D')], name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
tm.assert_series_equal(s + pd.tseries.offsets.Day(), exp)
tm.assert_series_equal(pd.tseries.offsets.Day() + s, exp)
def test_ops_series_period(self):
# GH 13043
s = pd.Series([pd.Period('2015-01-01', freq='D'),
pd.Period('2015-01-02', freq='D')], name='xxx')
self.assertEqual(s.dtype, object)
p = pd.Period('2015-01-10', freq='D')
# dtype will be object because of original dtype
exp = pd.Series([9, 8], name='xxx', dtype=object)
tm.assert_series_equal(p - s, exp)
tm.assert_series_equal(s - p, -exp)
s2 = pd.Series([pd.Period('2015-01-05', freq='D'),
pd.Period('2015-01-04', freq='D')], name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([4, 2], name='xxx', dtype=object)
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
def test_comp_series_period_scalar(self):
# GH 13200
for freq in ['M', '2M', '3M']:
base = Series([Period(x, freq=freq) for x in
['2011-01', '2011-02', '2011-03', '2011-04']])
p = Period('2011-02', freq=freq)
exp = pd.Series([False, True, False, False])
tm.assert_series_equal(base == p, exp)
tm.assert_series_equal(p == base, exp)
exp = pd.Series([True, False, True, True])
tm.assert_series_equal(base != p, exp)
tm.assert_series_equal(p != base, exp)
exp = pd.Series([False, False, True, True])
tm.assert_series_equal(base > p, exp)
tm.assert_series_equal(p < base, exp)
exp = pd.Series([True, False, False, False])
tm.assert_series_equal(base < p, exp)
tm.assert_series_equal(p > base, exp)
exp = pd.Series([False, True, True, True])
tm.assert_series_equal(base >= p, exp)
tm.assert_series_equal(p <= base, exp)
exp = pd.Series([True, True, False, False])
tm.assert_series_equal(base <= p, exp)
tm.assert_series_equal(p >= base, exp)
# different base freq
msg = "Input has different freq=A-DEC from Period"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
base <= Period('2011', freq='A')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
Period('2011', freq='A') >= base
def test_comp_series_period_series(self):
# GH 13200
for freq in ['M', '2M', '3M']:
base = Series([Period(x, freq=freq) for x in
['2011-01', '2011-02', '2011-03', '2011-04']])
s = Series([Period(x, freq=freq) for x in
['2011-02', '2011-01', '2011-03', '2011-05']])
exp = Series([False, False, True, False])
tm.assert_series_equal(base == s, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != s, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > s, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < s, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= s, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= s, exp)
s2 = Series([Period(x, freq='A') for x in
['2011', '2011', '2011', '2011']])
# different base freq
msg = "Input has different freq=A-DEC from Period"
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
base <= s2
def test_comp_series_period_object(self):
# GH 13200
base = Series([Period('2011', freq='A'), Period('2011-02', freq='M'),
Period('2013', freq='A'), Period('2011-04', freq='M')])
s = Series([Period('2012', freq='A'), Period('2011-01', freq='M'),
Period('2013', freq='A'), Period('2011-05', freq='M')])
exp = Series([False, False, True, False])
tm.assert_series_equal(base == s, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != s, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > s, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < s, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= s, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= s, exp)
def test_ops_frame_period(self):
# GH 13043
df = pd.DataFrame({'A': [pd.Period('2015-01', freq='M'),
pd.Period('2015-02', freq='M')],
'B': [pd.Period('2014-01', freq='M'),
pd.Period('2014-02', freq='M')]})
self.assertEqual(df['A'].dtype, object)
self.assertEqual(df['B'].dtype, object)
p = pd.Period('2015-03', freq='M')
# dtype will be object because of original dtype
exp = pd.DataFrame({'A': np.array([2, 1], dtype=object),
'B': np.array([14, 13], dtype=object)})
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -exp)
df2 = pd.DataFrame({'A': [pd.Period('2015-05', freq='M'),
pd.Period('2015-06', freq='M')],
'B': [pd.Period('2015-05', freq='M'),
pd.Period('2015-06', freq='M')]})
self.assertEqual(df2['A'].dtype, object)
self.assertEqual(df2['B'].dtype, object)
exp = pd.DataFrame({'A': np.array([4, 4], dtype=object),
'B': np.array([16, 16], dtype=object)})
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -exp)
class TestPeriodField(tm.TestCase):
def test_get_period_field_raises_on_out_of_range(self):
self.assertRaises(ValueError, _period.get_period_field, -1, 0, 0)
def test_get_period_field_array_raises_on_out_of_range(self):
self.assertRaises(ValueError, _period.get_period_field_arr, -1,
np.empty(1), 0)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| apache-2.0 |
hathix/searchbetter | src/analysis/plots.py | 1 | 5699 | import numpy as np
import scipy
import plotly
import plotly.graph_objs as go
import plotly.offline as py
py.init_notebook_mode()
import matplotlib.pyplot as plt
import stats
def plotSeriesWithRegression(xs, ys, name, color):
"""
Returns a list of traces:
- A scatter plots of ys
- A linear regression of ys
"""
# setting legend group lets user toggle the series AND its
# line of best fit together
legendgroup_name = '%s group' % name
# plot points
traceScatter = go.Scattergl(
x = xs,
y = ys,
mode = 'markers',
name = name,
legendgroup = legendgroup_name,
marker = dict(
color = color,
opacity = 0.5,
size = 12,
line = dict(
width = 2,
color = 'rgb(0, 0, 0)'
)
)
)
# plot line of best fit
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(xs, ys)
line_of_best_fit = np.poly1d([slope, intercept])
r_squared = r_value ** 2
# to show line of best fit
# y = %.2fx + %.2f, r^2 = %.2f' % (slope, intercept, r_squared),
traceRegression = go.Scattergl(
x = np.unique(xs),
y = line_of_best_fit(np.unique(xs)),
mode = 'lines',
name = name + ' linear regression',
legendgroup = legendgroup_name,
hoverinfo = 'text+name',
line = dict(
color = color
)
)
return [traceScatter, traceRegression]
def plotly_scatter(control_hits, wiki_hits, w2v_hits):
# reference: https://plot.ly/python/reference/#scattergl
traceControl = go.Scattergl(
x = control_hits,
y = control_hits,
mode = 'lines',
name = 'Control (no rewriting)',
hoverinfo = 'text+name',
line = dict(
color = color_strings[0]
)
)
# plot wiki
wikiTraces = plots.plotSeriesWithRegression(
control_hits, wiki_hits, name='Wikipedia Categories', color=color_strings[1])
w2vTraces = plots.plotSeriesWithRegression(
control_hits, w2v_hits, name='Word2Vec', color=color_strings[2])
plot = [traceControl] + wikiTraces + w2vTraces
layout = go.Layout(
title='Effect of query rewriting on search engine hits (edX)',
xaxis=dict(
title='# hits before rewriting'
),
yaxis=dict(
title='# hits after rewriting'
)
)
fig = go.Figure(data=plot, layout=layout)
# Plot and embed in ipython notebook!
py.iplot(fig)
def matplotlib_scatter(subplot, xs, ys, max_x, max_y, x_label, y_label, title, rewriter_name, color):
# scatter plot
scatter = subplot.scatter(x=xs, y=ys, alpha=0.5, c=color)
# line of best fit
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(xs, ys)
line_of_best_fit = np.poly1d([slope, intercept])
r_squared = r_value ** 2
regression, = subplot.plot(np.unique(xs), line_of_best_fit(np.unique(xs)), c=color)
print("y = %.2fx + %.2f, r^2 = %.2f" % (slope, intercept, r_squared))
# control line
control, = subplot.plot(np.unique(xs), np.unique(xs), c="#444444", linestyle="dashed")
subplot.set_xlim(0, max_x)
subplot.set_ylim(0, max_y)
subplot.set_xlabel(x_label)
subplot.set_ylabel(y_label)
subplot.set_title(title)
subplot.legend(
handles=[scatter, regression, control],
labels=[rewriter_name, "Regression for {}".format(rewriter_name), "Control"]
)
return subplot
def summary_bar_chart(df, engine_name):
# BAR CHARTS
# more stats
rewriter_names =[
'control',
'wiki',
'word2vec'
]
# filter out any rows where there are always zero hits
fdf = df[(df['control'] > 0) | (df['wiki'] > 0) | (df['word2vec'] > 0)]
fdf = fdf.reset_index(drop=True)
print stats.summary_of_frame(fdf)
# series containing # of hits for each search term
data_series = [fdf[name] for name in rewriter_names]
average_hits = [s.mean() for s in data_series]
# now filter on just those terms where the control gives nothing
df_where_no_hits = fdf[fdf['control'] == 0]
data_series_zero = [df_where_no_hits[name] for name in rewriter_names]
average_hits_zero = [s.mean() for s in data_series_zero]
print stats.summary_of_frame(df_where_no_hits)
# bar chart of hits
# first trace: all search terms
rewriter_fancy_names = [
'Control (no rewriting)',
'Wikipedia Categories',
'Word2Vec'
]
traceAllTerms = go.Bar(
x=rewriter_fancy_names,
y=average_hits,
name='All terms'
)
traceJustMisses = go.Bar(
x=rewriter_fancy_names,
y=average_hits_zero,
# for some reason, when you export the image, it chops off the
# trailing letters of this label. so let's pad it with spaces to
# save the text
name='Terms with no hits by default '
)
traces = [traceAllTerms, traceJustMisses]
layout = go.Layout(
barmode='group',
title='Average hits per rewriter (%s)' % engine_name,
xaxis=dict(
title='Query rewriter'
),
yaxis=dict(
title='Average # hits'
),
legend=dict(
# center the legend and put it below the graph
x=0.35,
# when you export plot to image, for some reason it moves the
# legend up no matter what you do. so we need to exaggerate how
# far down the legend is here.
y=-0.7
)
)
fig = go.Figure(data=traces, layout=layout)
return fig
| mit |
yaroslavvb/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 5 | 9272 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| apache-2.0 |
yunfeilu/scikit-learn | sklearn/linear_model/ridge.py | 60 | 44642 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
ilyes14/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
mikebenfield/scikit-learn | sklearn/metrics/tests/test_common.py | 19 | 43631 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
"matthews_corrcoef_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"hamming_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
def test_inf_nan_input():
invalids =[([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
METRICS = dict()
METRICS.update(THRESHOLDED_METRICS)
METRICS.update(REGRESSION_METRICS)
for metric in METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"contains NaN, infinity",
metric, y_true, y_score)
# Classification metrics all raise a mixed input exception
for metric in CLASSIFICATION_METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"Can't handle mix of binary and continuous",
metric, y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name not in REGRESSION_METRICS:
continue
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_score)
else:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause |
bgalbraith/bandits | examples/bayesian.py | 1 | 1188 | """
Takes advantage of multicore systems to speed up the simulation runs.
"""
import matplotlib
matplotlib.use('qt4agg')
from bandits.agent import Agent, BetaAgent
from bandits.bandit import BernoulliBandit, BinomialBandit
from bandits.policy import GreedyPolicy, EpsilonGreedyPolicy, UCBPolicy
from bandits.environment import Environment
class BernoulliExample:
label = 'Bayesian Bandits - Bernoulli'
bandit = BernoulliBandit(10, t=3*1000)
agents = [
Agent(bandit, EpsilonGreedyPolicy(0.1)),
Agent(bandit, UCBPolicy(1)),
BetaAgent(bandit, GreedyPolicy())
]
class BinomialExample:
label = 'Bayesian Bandits - Binomial (n=5)'
bandit = BinomialBandit(10, n=5, t=3*1000)
agents = [
Agent(bandit, EpsilonGreedyPolicy(0.1)),
Agent(bandit, UCBPolicy(1)),
BetaAgent(bandit, GreedyPolicy())
]
if __name__ == '__main__':
experiments = 500
trials = 1000
example = BernoulliExample()
# example = BinomialExample()
env = Environment(example.bandit, example.agents, example.label)
scores, optimal = env.run(trials, experiments)
env.plot_results(scores, optimal)
env.plot_beliefs()
| apache-2.0 |
VerifiableRobotics/LTLMoP | src/lib/handlers/share/MotionControl/RRTControllerHandler.py | 8 | 36569 | #!/usr/bin/env python
"""
===================================================================
RRTController.py - Rapidly-Exploring Random Trees Motion Controller
===================================================================
Uses Rapidly-exploring Random Tree Algorithm to generate paths given the starting position and the goal point.
"""
from numpy import *
from __is_inside import *
import math
import sys,os
from scipy.linalg import norm
from numpy.matlib import zeros
import __is_inside
import time, sys,os
import scipy as Sci
import scipy.linalg
import Polygon, Polygon.IO
import Polygon.Utils as PolyUtils
import Polygon.Shapes as PolyShapes
from math import sqrt, fabs , pi
import random
import thread
import threading
# importing matplotlib to show the path if possible
try:
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import_matplotlib = True
except:
print "matplotlib is not imported. Plotting is disabled"
import_matplotlib = False
import lib.handlers.handlerTemplates as handlerTemplates
class RRTControllerHandler(handlerTemplates.MotionControlHandler):
def __init__(self, executor, shared_data,robot_type,max_angle_goal,max_angle_overlap,plotting):
"""
Rapidly-Exploring Random Trees alogorithm motion planning controller
robot_type (int): Which robot is used for execution. BasicSim is 1, ODE is 2, ROS is 3, Nao is 4, Pioneer is 5(default=1)
max_angle_goal (float): The biggest difference in angle between the new node and the goal point that is acceptable. If it is bigger than the max_angle, the new node will not be connected to the goal point. The value should be within 0 to 6.28 = 2*pi. Default set to 6.28 = 2*pi (default=6.28)
max_angle_overlap (float): difference in angle allowed for two nodes overlapping each other. If you don't want any node overlapping with each other, put in 2*pi = 6.28. Default set to 1.57 = pi/2 (default=1.57)
plotting (bool): Check the box to enable plotting. Uncheck to disable plotting (default=True)
"""
self.system_print = False # for debugging. print on GUI ( a bunch of stuffs)
self.finish_print = False # set to 1 to print the original finished E and V before trimming the tree
self.orientation_print = False # show the orientation information of the robot
# Get references to handlers we'll need to communicate with
self.drive_handler = executor.hsub.getHandlerInstanceByType(handlerTemplates.DriveHandler)
self.pose_handler = executor.hsub.getHandlerInstanceByType(handlerTemplates.PoseHandler)
# Get information about regions
self.proj = executor.proj
self.coordmap_map2lab = executor.hsub.coordmap_map2lab
self.coordmap_lab2map = executor.hsub.coordmap_lab2map
self.last_warning = 0
self.previous_next_reg = None
# Store the Rapidly-Exploring Random Tress Built
self.RRT_V = None # array containing all the points on the RRT Tree
self.RRT_E = None # array specifying the connection of points on the Tree
self.E_current_column = None # the current column on the tree (to find the current heading point)
self.Velocity = None
self.currentRegionPoly = None
self.nextRegionPoly = None
self.map = {}
self.all = Polygon.Polygon()
self.trans_matrix = mat([[0,1],[-1,0]]) # transformation matrix for find the normal to the vector
self.stuck_thres = 20 # threshold for changing the range of sampling omega
# Information about the robot (default set to ODE)
if robot_type not in [1,2,3,4,5]:
robot_type = 1
self.system = robot_type
# Information about maximum turning angle allowed from the latest node to the goal point
if max_angle_goal > 2*pi:
max_angle_goal = 2*pi
if max_angle_goal < 0:
max_angle_goal = 0
self.max_angle_allowed = max_angle_goal
# Information about maximum difference in angle allowed between two overlapping nodes
if max_angle_overlap > 2*pi:
max_angle_overlap = 2*pi
if max_angle_overlap < 0:
max_angle_overlap = 0
self.max_angle_overlap = max_angle_overlap
# Information about whether plotting is enabled.
if plotting is True and import_matplotlib == True:
self.plotting = True
else:
self.plotting = False
# Specify the size of the robot
# 1: basicSim; 2: ODE; 3: ROS 4: Nao; 5: Pioneer
# self.radius: radius of the robot
# self.timestep : number of linear segments to break the curve into for calculation of x, y position
# self.step_size : the length of each step for connection to goal point
# self.velocity : Velocity of the robot in m/s in control space (m/s)
if self.system == 1:
self.radius = 5
self.step_size = 25
self.timeStep = 10
self.velocity = 2 # 1.5
if self.system == 2:
self.radius = 5
self.step_size = 15
self.timeStep = 10
self.velocity = 2 # 1.5
elif self.system == 3:
self.ROSInitHandler = shared_data['ROS_INIT_HANDLER']
self.radius = self.ROSInitHandler.robotPhysicalWidth/2
self.step_size = self.radius*3 #0.2
self.timeStep = 8
self.velocity = self.radius/2 #0.08
elif self.system == 4:
self.radius = 0.15*1.2
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
elif self.system == 5:
self.radius = 0.15
self.step_size = 0.2 #set the step_size for points be 1/5 of the norm ORIGINAL = 0.4
self.timeStep = 5
self.velocity = 0.05
# Operate_system (int): Which operating system is used for execution.
# Ubuntu and Mac is 1, Windows is 2
if sys.platform in ['win32', 'cygwin']:
self.operate_system = 2
else:
self.operate_system = 1
if self.system_print == True:
print "The operate_system is "+ str(self.operate_system)
# Generate polygon for regions in the map
for region in self.proj.rfi.regions:
self.map[region.name] = self.createRegionPolygon(region)
for n in range(len(region.holeList)): # no of holes
self.map[region.name] -= self.createRegionPolygon(region,n)
# Generate the boundary polygon
for regionName,regionPoly in self.map.iteritems():
self.all += regionPoly
# Start plotting if operating in Windows
if self.operate_system == 2 and self.plotting ==True:
# start using anmination to plot the robot
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.scope = _Scope(self.ax,self)
thread.start_new_thread(self.jplot,())
def gotoRegion(self, current_reg, next_reg, last=False):
"""
If ``last`` is True, we will move to the center of the destination region.
Returns ``True`` if we've reached the destination region.
"""
if current_reg == next_reg and not last:
# No need to move!
self.drive_handler.setVelocity(0, 0) # So let's stop
return True
# Find our current configuration
pose = self.pose_handler.getPose()
# Check if Vicon has cut out
# TODO: this should probably go in posehandler?
if math.isnan(pose[2]):
print "WARNING: No Vicon data! Pausing."
self.drive_handler.setVelocity(0, 0) # So let's stop
time.sleep(1)
return False
###This part will be run when the robot goes to a new region, otherwise, the original tree will be used.
if not self.previous_next_reg == next_reg:
# Entered a new region. New tree should be formed.
self.nextRegionPoly = self.map[self.proj.rfi.regions[next_reg].name]
self.currentRegionPoly = self.map[self.proj.rfi.regions[current_reg].name]
if self.system_print == True:
print "next Region is " + str(self.proj.rfi.regions[next_reg].name)
print "Current Region is " + str(self.proj.rfi.regions[current_reg].name)
#set to zero velocity before tree is generated
self.drive_handler.setVelocity(0, 0)
if last:
transFace = None
else:
# Determine the mid points on the faces connecting to the next region (one goal point will be picked among all the mid points later in buildTree)
transFace = None
q_gBundle = [[],[]] # list of goal points (midpoints of transition faces)
face_normal = [[],[]] # normal of the trnasition faces
for i in range(len(self.proj.rfi.transitions[current_reg][next_reg])):
pointArray_transface = [x for x in self.proj.rfi.transitions[current_reg][next_reg][i]]
transFace = asarray(map(self.coordmap_map2lab,pointArray_transface))
bundle_x = (transFace[0,0] +transFace[1,0])/2 #mid-point coordinate x
bundle_y = (transFace[0,1] +transFace[1,1])/2 #mid-point coordinate y
q_gBundle = hstack((q_gBundle,vstack((bundle_x,bundle_y))))
#find the normal vector to the face
face = transFace[0,:] - transFace[1,:]
distance_face = norm(face)
normal = face/distance_face * self.trans_matrix
face_normal = hstack((face_normal,vstack((normal[0,0],normal[0,1]))))
if transFace is None:
print "ERROR: Unable to find transition face between regions %s and %s. Please check the decomposition (try viewing projectname_decomposed.regions in RegionEditor or a text editor)." % (self.proj.rfi.regions[current_reg].name, self.proj.rfi.regions[next_reg].name)
# Run algorithm to build the Rapid-Exploring Random Trees
self.RRT_V = None
self.RRT_E = None
# For plotting
if self.operate_system == 2:
if self.plotting == True:
self.ax.cla()
else:
self.ax = None
else:
self.ax = None
if self.operate_system == 1 and self.plotting == True:
plt.cla()
self.plotMap(self.map)
plt.plot(pose[0],pose[1],'ko')
self.RRT_V,self.RRT_E,self.E_current_column = self.buildTree(\
[pose[0], pose[1]],pose[2],self.currentRegionPoly, self.nextRegionPoly,q_gBundle,face_normal)
"""
# map the lab coordinates back to pixels
V_tosend = array(mat(self.RRT_V[1:,:])).T
V_tosend = map(self.coordmap_lab2map, V_tosend)
V_tosend = mat(V_tosend).T
s = 'RRT:E'+"["+str(list(self.RRT_E[0]))+","+str(list(self.RRT_E[1]))+"]"+':V'+"["+str(list(self.RRT_V[0]))+","+str(list(V_tosend[0]))+","+str(list(V_tosend[1]))+"]"+':T'+"["+str(list(q_gBundle[0]))+","+str(list(q_gBundle[1]))+"]"
#print s
"""
# Run algorithm to find a velocity vector (global frame) to take the robot to the next region
self.Velocity = self.getVelocity([pose[0], pose[1]], self.RRT_V,self.RRT_E)
#self.Node = self.getNode([pose[0], pose[1]], self.RRT_V,self.RRT_E)
self.previous_next_reg = next_reg
# Pass this desired velocity on to the drive handler
self.drive_handler.setVelocity(self.Velocity[0,0], self.Velocity[1,0], pose[2])
#self.drive_handler.setVelocity(self.Node[0,0], self.Node[1,0], pose[2])
RobotPoly = Polygon.Shapes.Circle(self.radius,(pose[0],pose[1]))
# check if robot is inside the current region
departed = not self.currentRegionPoly.overlaps(RobotPoly)
arrived = self.nextRegionPoly.covers(RobotPoly)
if departed and (not arrived) and (time.time()-self.last_warning) > 0.5:
# Figure out what region we think we stumbled into
for r in self.proj.rfi.regions:
pointArray = [self.coordmap_map2lab(x) for x in r.getPoints()]
vertices = mat(pointArray).T
if is_inside([pose[0], pose[1]], vertices):
print "I think I'm in " + r.name
print pose
break
self.last_warning = time.time()
#print "arrived:"+str(arrived)
return arrived
def createRegionPolygon(self,region,hole = None):
"""
This function takes in the region points and make it a Polygon.
"""
if hole == None:
pointArray = [x for x in region.getPoints()]
else:
pointArray = [x for x in region.getPoints(hole_id = hole)]
pointArray = map(self.coordmap_map2lab, pointArray)
regionPoints = [(pt[0],pt[1]) for pt in pointArray]
formedPolygon= Polygon.Polygon(regionPoints)
return formedPolygon
def getVelocity(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
#else:
# dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- vstack((V[1,E[0,self.E_current_column]],V[2,E[0,self.E_current_column]]))
Vel = zeros([2,1])
Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Vel
def getNode(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
Node = zeros([2,1])
Node[0,0] = V[1,E[1,self.E_current_column]]
Node[1,0] = V[2,E[1,self.E_current_column]]
#Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Node
def buildTree(self,p,theta,regionPoly,nextRegionPoly,q_gBundle,face_normal, last=False):
"""
This function builds the RRT tree.
p : x,y position of the robot
theta : current orientation of the robot
regionPoly : current region polygon
nextRegionPoly : next region polygon
q_gBundle : coordinates of q_goals that the robot can reach
face_normal : the normal vector of each face corresponding to each goal point in q_gBundle
"""
q_init = mat(p).T
V = vstack((0,q_init))
theta = self.orientation_bound(theta)
V_theta = array([theta])
#!!! CONTROL SPACE: generate a list of omega for random sampling
omegaLowerBound = -math.pi/20 # upper bound for the value of omega
omegaUpperBound = math.pi/20 # lower bound for the value of omega
omegaNoOfSteps = 20
self.omega_range = linspace(omegaLowerBound,omegaUpperBound,omegaNoOfSteps)
self.omega_range_escape = linspace(omegaLowerBound*4,omegaUpperBound*4,omegaNoOfSteps*4) # range used when stuck > stuck_thres
regionPolyOld = Polygon.Polygon(regionPoly)
regionPoly += PolyShapes.Circle(self.radius*2.5,(q_init[0,0],q_init[1,0]))
# check faces of the current region for goal points
E = [[],[]] # the tree matrix
Other = [[],[]]
path = False # if path formed then = 1
stuck = 0 # count for changing the range of sampling omega
append_after_latest_node = False # append new nodes to the latest node
if self.system_print == True:
print "plotting in buildTree is " + str(self.plotting)
if self.plotting == True:
if not plt.isinteractive():
plt.ion()
plt.hold(True)
while not path:
#step -1: try connection to q_goal (generate path to goal)
i = 0
if self.system_print == True:
print "Try Connection to the goal points"
# pushing possible q_goals into the current region (ensure path is covered by the current region polygon)
q_pass = [[],[],[]]
q_pass_dist = []
q_gBundle = mat(q_gBundle)
face_normal = mat(face_normal)
while i < q_gBundle.shape[1]:
q_g_original = q_gBundle[:,i]
q_g = q_gBundle[:,i]+face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#q_g = q_gBundle[:,i]+(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
if not regionPolyOld.isInside(q_g[0],q_g[1]):
#q_g = q_gBundle[:,i]-(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,i]-V[1:,(shape(V)[1]-1)])*1.5*self.radius ##original 2*self.radius
q_g = q_gBundle[:,i]-face_normal[:,i]*1.5*self.radius ##original 2*self.radius
#forming polygon for path checking
EdgePolyGoal = PolyShapes.Circle(self.radius,(q_g[0,0],q_g[1,0])) + PolyShapes.Circle(self.radius,(V[1,shape(V)[1]-1],V[2:,shape(V)[1]-1]))
EdgePolyGoal = PolyUtils.convexHull(EdgePolyGoal)
dist = norm(q_g - V[1:,shape(V)[1]-1])
#check connection to goal
connect_goal = regionPoly.covers(EdgePolyGoal) #check coverage of path from new point to goal
# compare orientation difference
thetaPrev = V_theta[shape(V)[1]-1]
theta_orientation = abs(arctan((q_g[1,0]- V[2,shape(V)[1]-1])/(q_g[0,0]- V[1,shape(V)[1]-1])))
if q_g[1,0] > V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: # second quadrant
theta_orientation = pi - theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # first quadrant
theta_orientation = theta_orientation
elif q_g[1,0] < V[2,shape(V)[1]-1]:
if q_g[0,0] < V[1,shape(V)[1]-1]: #third quadrant
theta_orientation = pi + theta_orientation
elif q_g[0,0] > V[1,shape(V)[1]-1]: # foruth quadrant
theta_orientation = 2*pi - theta_orientation
# check the angle between vector(new goal to goal_original ) and vector( latest node to new goal)
Goal_to_GoalOriginal = q_g_original - q_g
LatestNode_to_Goal = q_g - V[1:,shape(V)[1]-1]
Angle_Goal_LatestNode= arccos(vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))/norm(Goal_to_GoalOriginal)/norm(LatestNode_to_Goal))
# if connection to goal can be established and the max change in orientation of the robot is smaller than max_angle, tree is said to be completed.
if self.orientation_print == True:
print "theta_orientation is " + str(theta_orientation)
print "thetaPrev is " + str(thetaPrev)
print "(theta_orientation - thetaPrev) is " + str(abs(theta_orientation - thetaPrev))
print "self.max_angle_allowed is " + str(self.max_angle_allowed)
print "abs(theta_orientation - thetaPrev) < self.max_angle_allowed" + str(abs(theta_orientation - thetaPrev) < self.max_angle_allowed)
print"Goal_to_GoalOriginal: " + str( array(Goal_to_GoalOriginal)) + "; LatestNode_to_Goal: " + str( array(LatestNode_to_Goal))
print vdot(array(Goal_to_GoalOriginal), array(LatestNode_to_Goal))
print "Angle_Goal_LatestNode" + str(Angle_Goal_LatestNode)
if connect_goal and (abs(theta_orientation - thetaPrev) < self.max_angle_allowed) and (Angle_Goal_LatestNode < self.max_angle_allowed):
path = True
q_pass = hstack((q_pass,vstack((i,q_g))))
q_pass_dist = hstack((q_pass_dist,dist))
i = i + 1
if self.system_print == True:
print "checked goal points"
self.E = E
self.V = V
# connection to goal has established
# Obtain the closest goal point that path can be formed.
if path:
if shape(q_pass_dist)[0] == 1:
cols = 0
else:
(cols,) = nonzero(q_pass_dist == min(q_pass_dist))
cols = asarray(cols)[0]
q_g = q_pass[1:,cols]
"""
q_g = q_g-(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*3*self.radius #org 3
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g+(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])/norm(q_gBundle[:,q_pass[0,cols]]-V[1:,(shape(V)[1]-1)])*6*self.radius #org 3
"""
if self.plotting == True :
if self.operate_system == 1:
plt.suptitle('Rapidly-exploring Random Tree', fontsize=12)
plt.xlabel('x')
plt.ylabel('y')
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
if shape(V)[1] <= 2:
self.ax.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
self.ax.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
# trim the path connecting current node to goal point into pieces if the path is too long now
numOfPoint = floor(norm(V[1:,shape(V)[1]-1]- q_g)/self.step_size)
if numOfPoint < 3:
numOfPoint = 3
x = linspace( V[1,shape(V)[1]-1], q_g[0,0], numOfPoint )
y = linspace( V[2,shape(V)[1]-1], q_g[1,0], numOfPoint )
for i in range(x.shape[0]):
if i != 0:
V = hstack((V,vstack((shape(V)[1],x[i],y[i]))))
E = hstack((E,vstack((shape(V)[1]-2,shape(V)[1]-1))))
#push the goal point to the next region
q_g = q_g+face_normal[:,q_pass[0,cols]]*3*self.radius ##original 2*self.radius
if not nextRegionPoly.isInside(q_g[0],q_g[1]):
q_g = q_g-face_normal[:,q_pass[0,cols]]*6*self.radius ##original 2*self.radius
V = hstack((V,vstack((shape(V)[1],q_g[0,0],q_g[1,0]))))
E = hstack((E,vstack((shape(V)[1]-2 ,shape(V)[1]-1))))
if self.plotting == True :
if self.operate_system == 1:
plt.plot(q_g[0,0],q_g[1,0],'ko')
plt.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
plt.figure(1).canvas.draw()
else:
self.ax.plot(q_g[0,0],q_g[1,0],'ko')
self.ax.plot(( V[1,shape(V)[1]-1],V[1,shape(V)[1]-2]),( V[2,shape(V)[1]-1],V[2,shape(V)[1]-2]),'b')
# path is not formed, try to append points onto the tree
if not path:
# connection_to_tree : connection to the tree is successful
if append_after_latest_node:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode(V,V_theta,E,Other,regionPoly,stuck, append_after_latest_node)
else:
connection_to_tree = False
while not connection_to_tree:
V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree = self.generateNewNode (V,V_theta,E,Other,regionPoly,stuck)
if self.finish_print:
print 'Here is the V matrix:', V, 'Here is the E matrix:',E
print >>sys.__stdout__, 'Here is the V matrix:\n', V, '\nHere is the E matrix:\n',E
#B: trim to a single path
single = 0
while single == 0:
trim = 0
for j in range(shape(V)[1]-3):
(row,col) = nonzero(E == j+1)
if len(col) == 1:
E = delete(E, col[0], 1)
trim = 1
if trim == 0:
single = 1;
####print with matlib
if self.plotting ==True :
if self.operate_system == 1:
plt.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
plt.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
plt.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
plt.figure(1).canvas.draw()
else:
BoundPolyPoints = asarray(PolyUtils.pointList(regionPoly))
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],'k')
self.ax.plot(V[1,:],V[2,:],'b')
for i in range(shape(E)[1]):
self.ax.text(V[1,E[0,i]],V[2,E[0,i]], V[0,E[0,i]], fontsize=12)
self.ax.text(V[1,E[1,i]],V[2,E[1,i]], V[0,E[1,i]], fontsize=12)
#return V, E, and the current node number on the tree
V = array(V)
return V, E, 0
def generateNewNode(self,V,V_theta,E,Other,regionPoly,stuck,append_after_latest_node =False):
"""
Generate a new node on the current tree matrix
V : the node matrix
V_theta : the orientation matrix
E : the tree matrix (or edge matrix)
Other : the matrix containing the velocity and angular velocity(omega) information
regionPoly: the polygon of current region
stuck : count on the number of times failed to generate new node
append_after_latest_node : append new nodes to the latest node (True only if the previous node addition is successful)
"""
if self.system_print == True:
print "In control space generating path,stuck = " + str(stuck)
connection_to_tree = False # True when connection to the tree is successful
if stuck > self.stuck_thres:
# increase the range of omega since path cannot ge generated
omega = random.choice(self.omega_range_escape)
else:
#!!!! CONTROL SPACE STEP 1 - generate random omega
omega = random.choice(self.omega_range)
#!!!! CONTROL SPACE STEP 2 - pick a random point on the tree
if append_after_latest_node:
tree_index = shape(V)[1]-1
else:
if random.choice([1,2]) == 1:
tree_index = random.choice(array(V[0])[0])
else:
tree_index = shape(V)[1]-1
xPrev = V[1,tree_index]
yPrev = V[2,tree_index]
thetaPrev = V_theta[tree_index]
j = 1
#!!!! CONTROL SPACE STEP 3 - Check path of the robot
path_robot = PolyShapes.Circle(self.radius,(xPrev,yPrev))
while j <= self.timeStep:
xOrg = xPrev
yOrg = yPrev
xPrev = xPrev + self.velocity/omega*(sin(omega* 1 + thetaPrev)-sin(thetaPrev))
yPrev = yPrev - self.velocity/omega*(cos(omega* 1 + thetaPrev)-cos(thetaPrev))
thetaPrev = omega* 1 + thetaPrev
path_robot = path_robot + PolyShapes.Circle(self.radius,(xPrev,yPrev))
j = j + 1
thetaPrev = self.orientation_bound(thetaPrev)
path_all = PolyUtils.convexHull(path_robot)
in_bound = regionPoly.covers(path_all)
"""
# plotting
if plotting == True:
self.plotPoly(path_all,'r',1)
"""
stuck = stuck + 1
if in_bound:
robot_new_node = PolyShapes.Circle(self.radius,(xPrev,yPrev))
# check how many nodes on the tree does the new node overlaps with
nodes_overlap_count = 0
for k in range(shape(V)[1]-1):
robot_old_node = PolyShapes.Circle(self.radius,(V[1,k],V[2,k]))
if robot_new_node.overlaps(robot_old_node):
if abs(thetaPrev - V_theta[k]) < self.max_angle_overlap:
nodes_overlap_count += 1
if nodes_overlap_count == 0 or (stuck > self.stuck_thres+1 and nodes_overlap_count < 2) or (stuck > self.stuck_thres+500):
if stuck > self.stuck_thres+1:
append_after_latest_node = False
if (stuck > self.stuck_thres+500):
stuck = 0
stuck = stuck - 20
# plotting
if self.plotting == True:
self.plotPoly(path_all,'b',1)
if self.system_print == True:
print "node connected"
V = hstack((V,vstack((shape(V)[1],xPrev,yPrev))))
V_theta = hstack((V_theta,thetaPrev))
E = hstack((E,vstack((tree_index ,shape(V)[1]-1))))
Other = hstack((Other,vstack((self.velocity,omega))))
##################### E should add omega and velocity
connection_to_tree = True
append_after_latest_node = True
else:
append_after_latest_node = False
if self.system_print == True:
print "node not connected. check goal point"
else:
append_after_latest_node = False
return V,V_theta,E,Other,stuck,append_after_latest_node, connection_to_tree
def orientation_bound(self,theta):
"""
make sure the returned angle is between 0 to 2*pi
"""
while theta > 2*pi or theta < 0:
if theta > 2*pi:
theta = theta - 2*pi
else:
theta = theta + 2*pi
return theta
def plotMap(self,mappedRegions):
"""
Plotting regions and obstacles with matplotlib.pyplot
number: figure number (see on top)
"""
#if not plt.isinteractive():
# plt.ion()
#plt.hold(True)
if self.operate_system == 1:
for regionName,regionPoly in mappedRegions.iteritems():
self.plotPoly(regionPoly,'k')
plt.figure(1).canvas.draw()
def plotPoly(self,c,string,w = 1):
"""
Plot polygons inside the boundary
c = polygon to be plotted with matlabplot
string = string that specify color
w = width of the line plotting
"""
if bool(c):
for i in range(len(c)):
#toPlot = Polygon.Polygon(c.contour(i))
toPlot = Polygon.Polygon(c.contour(i)) & self.all
if bool(toPlot):
for j in range(len(toPlot)):
#BoundPolyPoints = asarray(PolyUtils.pointList(toPlot.contour(j)))
BoundPolyPoints = asarray(PolyUtils.pointList(Polygon.Polygon(toPlot.contour(j))))
if self.operate_system == 2:
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
self.ax.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
else:
plt.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
plt.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
plt.figure(1).canvas.draw()
def data_gen(self):
#self.ax.cla()
for regionName,regionPoly in self.map.iteritems():
self.plotPoly(regionPoly,'k')
"""
#for i in range(len(self.V)):
if shape(V)[1] <= 2:
plt.plot(( V[1,shape(V)[1]-1],q_g[0,0]),( V[2,shape(V)[1]-1],q_g[1,0]),'b')
else:
plt.plot(( V[1,E[0,shape(E)[1]-1]], V[1,shape(V)[1]-1],q_g[0,0]),( V[2,E[0,shape(E)[1]-1]], V[2,shape(V)[1]-1],q_g[1,0]),'b')
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
"""
pose = self.pose_handler.getPose()
self.ax.plot(pose[0],pose[1],'bo')
"""
self.ax.plot(self.q_g[0],self.q_g[1],'ro')
self.plotPoly(self.overlap,'g')
self.plotPoly(self.m_line,'b')
"""
yield(pose[0],pose[1])
"""
self.ax.plot(self.prev_follow[0],self.prev_follow[1],'ko')
"""
def jplot(self):
ani = animation.FuncAnimation(self.fig, self.scope.update, self.data_gen)
plt.show()
class _Scope:
def __init__(self, ax, motion, maxt=2, dt=0.02):
self.i = 0
self.ax = ax
self.line, = self.ax.plot(1)
self.ax.set_ylim(0, 1)
self.motion = motion
def update(self,data):
(data1) = self.motion.data_gen()
a = data1.next()
self.line.set_data(a)
self.ax.relim()
self.ax.autoscale()
return self.line,
| gpl-3.0 |
jakirkham/nanshe | tests/test_nanshe/test_imp/test_segment.py | 3 | 110829 | from __future__ import print_function
__author__ = "John Kirkham <[email protected]>"
__date__ = "$Jul 30, 2014 19:35:11 EDT$"
import imp
import nose
import nose.plugins
import nose.plugins.attrib
import numpy
import scipy
import scipy.spatial
import scipy.spatial.distance
import scipy.stats
import nanshe.util.iters
import nanshe.util.xnumpy
import nanshe.imp.segment
import nanshe.syn.data
has_spams = False
try:
imp.find_module("spams")
has_spams = True
except ImportError:
pass
class TestSegment(object):
def test_remove_zeroed_lines_1(self):
a = numpy.ones((1, 100, 101))
erosion_shape = [21, 1]
dilation_shape = [1, 3]
r = numpy.array([[0, 0, 0], [a.shape[1]-2, 3, 4]]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0
b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape)
assert (a == b).all()
def test_remove_zeroed_lines_2(self):
a = numpy.ones((1, 100, 101))
erosion_shape = [21, 1]
dilation_shape = [1, 3]
r = numpy.array([[0, 0, 0], [1, 3, 4]]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0
b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape)
assert (a == b).all()
def test_remove_zeroed_lines_3(self):
a = numpy.ones((1, 100, 101))
p = 0.2
erosion_shape = [21, 1]
dilation_shape = [1, 3]
nr = numpy.random.geometric(p)
r = numpy.array([numpy.repeat(0, nr), numpy.random.random_integers(1, a.shape[1] - 2, nr)]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0
b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape)
assert (a == b).all()
def test_remove_zeroed_lines_4(self):
a = numpy.ones((1, 100, 101))
erosion_shape = [21, 1]
dilation_shape = [1, 3]
r = numpy.array([[0, 0, 0], [a.shape[1], 0, 4]]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0
b = nanshe.imp.segment.remove_zeroed_lines(ar, dilation_shape=dilation_shape, erosion_shape=erosion_shape)
assert (a == b).all()
def test_remove_zeroed_lines_5(self):
a = numpy.ones((1, 100, 101))
erosion_shape = [21, 1]
dilation_shape = [1, 3]
r = numpy.array([[0, 0, 0, 0], [a.shape[1], a.shape[1]-1, 0, 1]]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0
b = nanshe.imp.segment.remove_zeroed_lines(ar, dilation_shape=dilation_shape, erosion_shape=erosion_shape)
assert (a == b).all()
def test_remove_zeroed_lines_6(self):
a = numpy.repeat(numpy.arange(100)[None].T, 101, axis=1)[None].astype(float)
erosion_shape = [21, 1]
dilation_shape = [1, 3]
r = numpy.array([[0, 0, 0], [1, 3, 4]]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0
b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape)
assert numpy.allclose(a, b, rtol=0, atol=1e-13)
def test_remove_zeroed_lines_7(self):
a = numpy.repeat(numpy.arange(100)[None], 101, axis=0)[None].astype(float)
a[0, :, 0] = 1
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(a, 0, 0), -1, 0)[:] = 1
erosion_shape = [21, 1]
dilation_shape = [1, 3]
r = numpy.array([[0, 0, 0, 0], [0, 2, 3, 4]]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0
b = nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape)
assert numpy.allclose(a, b, rtol=0, atol=1e-13)
def test_remove_zeroed_lines_8(self):
a = numpy.ones((1, 100, 101))
erosion_shape = [21, 1]
dilation_shape = [1, 3]
r = numpy.array([[0, 0, 0], [a.shape[1]-2, 3, 4]]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0
b = numpy.zeros_like(a)
nanshe.imp.segment.remove_zeroed_lines(ar, erosion_shape=erosion_shape, dilation_shape=dilation_shape, out=b)
assert (a == b).all()
def test_remove_zeroed_lines_9(self):
a = numpy.ones((1, 100, 101))
erosion_shape = [21, 1]
dilation_shape = [1, 3]
r = numpy.array([[0, 0, 0], [a.shape[1]-2, 3, 4]]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1])[:] = 0
b = ar
nanshe.imp.segment.remove_zeroed_lines(b, erosion_shape=erosion_shape, dilation_shape=dilation_shape, out=b)
assert (a == b).all()
@nose.plugins.attrib.attr("3D")
def test_remove_zeroed_lines_10(self):
a = numpy.ones((1, 100, 101, 102))
erosion_shape = [21, 1, 1]
dilation_shape = [1, 3, 1]
r = numpy.array([[0, 0, 0], [a.shape[1]-2, 3, 4], [0, 0, 0]]).T.copy()
print(r)
ar = a.copy()
for each_r in r:
nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(nanshe.util.xnumpy.index_axis_at_pos(ar, 0, each_r[0]), -1, each_r[-1]), -1, each_r[-2])[:] = 0
b = ar
nanshe.imp.segment.remove_zeroed_lines(b, erosion_shape=erosion_shape, dilation_shape=dilation_shape, out=b)
assert (a == b).all()
def test_estimate_f0_1(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 20
a = numpy.ones((100, 101, 102))
b = nanshe.imp.segment.estimate_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size
)
assert (b == a).all()
def test_estimate_f0_1b(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 20
a = numpy.ones((100, 101, 102))
b = a.copy()
nanshe.imp.segment.estimate_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size,
out=b
)
assert (b == a).all()
def test_estimate_f0_1c(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 20
a = numpy.ones((100, 101, 102))
b = a.copy()
nanshe.imp.segment.estimate_f0(
b,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size,
out=b
)
assert (b == a).all()
def test_estimate_f0_2(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 49
mean = 0.0
stdev = 1.0
a = numpy.random.normal(mean, stdev, (100, 101, 102))
b = nanshe.imp.segment.estimate_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size
)
# Seems to be basically 2 orders of magnitude in reduction. However, it may be a little above exactly two.
# Hence, multiplication by 99 instead of 100.
assert ((99.0*b.std()) < a.std())
@nose.plugins.attrib.attr("3D")
def test_estimate_f0_3(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 20
a = numpy.ones((100, 101, 102, 103))
b = nanshe.imp.segment.estimate_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size
)
assert (b == a).all()
@nose.plugins.attrib.attr("3D")
def test_estimate_f0_4(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 49
mean = 0.0
stdev = 1.0
a = numpy.random.normal(mean, stdev, (100, 101, 102, 103))
b = nanshe.imp.segment.estimate_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size
)
# Seems to be basically 2 orders of magnitude in reduction. However, it may be a little above exactly two.
# Hence, multiplication by 99 instead of 100.
assert ((99.0*b.std()) < a.std())
def test_extract_f0_1(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 20
bias = 100
a = numpy.ones((100, 101, 102))
b = nanshe.imp.segment.extract_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size,
bias=bias
)
assert (b == 0).all()
def test_extract_f0_1b(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 20
bias = 100
a = numpy.ones((100, 101, 102))
b = a.copy()
nanshe.imp.segment.extract_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size,
bias=bias,
out=b
)
assert (b == 0).all()
def test_extract_f0_1c(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 20
bias = 100
a = numpy.ones((100, 101, 102))
b = a.copy()
nanshe.imp.segment.extract_f0(
b,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size,
bias=bias,
out=b
)
assert (b == 0).all()
def test_extract_f0_2(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 49
bias = 100
mean = 0.0
stdev = 1.0
a = numpy.random.normal(mean, stdev, (100, 101, 102))
b = nanshe.imp.segment.extract_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size,
bias=bias
)
# Seems to be basically 2 orders of magnitude in reduction. However, it may be a little above exactly two.
# Hence, multiplication by 99 instead of 100.
assert ((99.0*b.std()) < a.std())
# Turns out that a difference greater than 0.1 will be over 10 standard deviations away.
assert (((a - 100.0*b) < 0.1).all())
@nose.plugins.attrib.attr("3D")
def test_extract_f0_3(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 20
bias = 100
a = numpy.ones((100, 101, 102, 103))
b = nanshe.imp.segment.extract_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size,
bias=bias
)
assert (b == 0).all()
@nose.plugins.attrib.attr("3D")
def test_extract_f0_4(self):
spatial_smoothing_gaussian_filter_stdev = 5.0
spatial_smoothing_gaussian_filter_window_size = 5.0
which_quantile = 0.5
temporal_smoothing_gaussian_filter_stdev = 5.0
temporal_smoothing_gaussian_filter_window_size = 5.0
half_window_size = 49
bias = 100
mean = 0.0
stdev = 1.0
a = numpy.random.normal(mean, stdev, (100, 101, 102, 103))
b = nanshe.imp.segment.extract_f0(
a,
spatial_smoothing_gaussian_filter_stdev=spatial_smoothing_gaussian_filter_stdev,
spatial_smoothing_gaussian_filter_window_size=spatial_smoothing_gaussian_filter_window_size,
which_quantile=which_quantile,
temporal_smoothing_gaussian_filter_stdev=temporal_smoothing_gaussian_filter_stdev,
temporal_smoothing_gaussian_filter_window_size=temporal_smoothing_gaussian_filter_window_size,
half_window_size=half_window_size,
bias=bias
)
# Seems to be basically 2 orders of magnitude in reduction. However, it may be a little above exactly two.
# Hence, multiplication by 99 instead of 100.
assert ((99.0*b.std()) < a.std())
# Turns out that a difference greater than 0.1 will be over 10 standard deviations away.
assert (((a - 100.0*b) < 0.1).all())
def test_preprocess_data_1(self):
## Does NOT test accuracy.
config = {
"normalize_data" : {
"renormalized_images" : {
"ord" : 2
}
},
"extract_f0" : {
"spatial_smoothing_gaussian_filter_stdev" : 5.0,
"spatial_smoothing_gaussian_filter_window_size" : 5.0,
"which_quantile" : 0.5,
"temporal_smoothing_gaussian_filter_stdev" : 5.0,
"temporal_smoothing_gaussian_filter_window_size" : 5.0,
"half_window_size" : 20,
"bias" : 100
},
"remove_zeroed_lines" : {
"erosion_shape" : [
21,
1
],
"dilation_shape" : [
1,
3
]
},
"wavelet.transform" : {
"scale" : [
3,
4,
4
]
}
}
space = numpy.array([100, 100, 100])
radii = numpy.array([5, 6])
magnitudes = numpy.array([15, 16])
points = numpy.array([[20, 30, 24],
[70, 59, 65]])
masks = nanshe.syn.data.generate_hypersphere_masks(space, points, radii)
images = nanshe.syn.data.generate_gaussian_images(space, points, radii/3.0, magnitudes) * masks
image_stack = images.max(axis=0)
nanshe.imp.segment.preprocess_data(image_stack, **config)
def test_preprocess_data_2(self):
## Does NOT test accuracy.
config = {
"normalize_data" : {
"renormalized_images" : {
"ord" : 2
}
},
"remove_zeroed_lines" : {
"erosion_shape" : [
21,
1
],
"dilation_shape" : [
1,
3
]
},
"wavelet.transform" : {
"scale" : [
3,
4,
4
]
}
}
space = numpy.array([100, 100, 100])
radii = numpy.array([5, 6])
magnitudes = numpy.array([15, 16])
points = numpy.array([[20, 30, 24],
[70, 59, 65]])
masks = nanshe.syn.data.generate_hypersphere_masks(space, points, radii)
images = nanshe.syn.data.generate_gaussian_images(space, points, radii/3.0, magnitudes) * masks
image_stack = images.max(axis=0)
nanshe.imp.segment.preprocess_data(image_stack, **config)
def test_preprocess_data_3(self):
## Does NOT test accuracy.
config = {
"normalize_data" : {
"renormalized_images" : {
"ord" : 2
}
},
"extract_f0" : {
"spatial_smoothing_gaussian_filter_stdev" : 5.0,
"spatial_smoothing_gaussian_filter_window_size" : 5.0,
"which_quantile" : 0.5,
"temporal_smoothing_gaussian_filter_stdev" : 5.0,
"temporal_smoothing_gaussian_filter_window_size" : 5.0,
"half_window_size" : 20,
"bias" : 100
},
"wavelet.transform" : {
"scale" : [
3,
4,
4
]
}
}
space = numpy.array([100, 100, 100])
radii = numpy.array([5, 6])
magnitudes = numpy.array([15, 16])
points = numpy.array([[20, 30, 24],
[70, 59, 65]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
image_stack = images.max(axis=0)
nanshe.imp.segment.preprocess_data(image_stack, **config)
def test_preprocess_data_4(self):
## Does NOT test accuracy.
config = {
"normalize_data" : {
"renormalized_images" : {
"ord" : 2
}
},
"extract_f0" : {
"spatial_smoothing_gaussian_filter_stdev" : 5.0,
"spatial_smoothing_gaussian_filter_window_size" : 5.0,
"which_quantile" : 0.5,
"temporal_smoothing_gaussian_filter_stdev" : 5.0,
"temporal_smoothing_gaussian_filter_window_size" : 5.0,
"half_window_size" : 20,
"bias" : 100
},
"remove_zeroed_lines" : {
"erosion_shape" : [
21,
1
],
"dilation_shape" : [
1,
3
]
}
}
space = numpy.array([100, 100, 100])
radii = numpy.array([5, 6])
magnitudes = numpy.array([15, 16])
points = numpy.array([[20, 30, 24],
[70, 59, 65]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
image_stack = images.max(axis=0)
nanshe.imp.segment.preprocess_data(image_stack, **config)
@nose.plugins.attrib.attr("3D")
def test_preprocess_data_5(self):
## Does NOT test accuracy.
config = {
"normalize_data" : {
"renormalized_images" : {
"ord" : 2
}
},
"extract_f0" : {
"spatial_smoothing_gaussian_filter_stdev" : 5.0,
"spatial_smoothing_gaussian_filter_window_size" : 5.0,
"which_quantile" : 0.5,
"temporal_smoothing_gaussian_filter_stdev" : 5.0,
"temporal_smoothing_gaussian_filter_window_size" : 5.0,
"half_window_size" : 20,
"bias" : 100
},
"wavelet.transform" : {
"scale" : [
3,
4,
4,
4
]
}
}
space = numpy.array([100, 100, 100, 100])
radii = numpy.array([5, 6])
magnitudes = numpy.array([15, 16])
points = numpy.array([[20, 30, 24, 85],
[70, 59, 65, 17]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
image_stack = images.max(axis=0)
nanshe.imp.segment.preprocess_data(image_stack, **config)
@nose.plugins.attrib.attr("3D")
def test_preprocess_data_6(self):
## Does NOT test accuracy.
config = {
"normalize_data" : {
"renormalized_images" : {
"ord" : 2
}
},
"wavelet.transform" : {
"scale" : [
3,
4,
4,
4
]
}
}
space = numpy.array([100, 100, 100, 100])
radii = numpy.array([5, 6])
magnitudes = numpy.array([15, 16])
points = numpy.array([[20, 30, 24, 85],
[70, 59, 65, 17]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
image_stack = images.max(axis=0)
nanshe.imp.segment.preprocess_data(image_stack, **config)
@nose.plugins.attrib.attr("3D")
def test_preprocess_data_7(self):
## Does NOT test accuracy.
config = {
"normalize_data" : {
"renormalized_images" : {
"ord" : 2
}
},
"extract_f0" : {
"spatial_smoothing_gaussian_filter_stdev" : 5.0,
"spatial_smoothing_gaussian_filter_window_size" : 5.0,
"which_quantile" : 0.5,
"temporal_smoothing_gaussian_filter_stdev" : 5.0,
"temporal_smoothing_gaussian_filter_window_size" : 5.0,
"half_window_size" : 20,
"bias" : 100
}
}
space = numpy.array([100, 100, 100, 100])
radii = numpy.array([5, 6])
magnitudes = numpy.array([15, 16])
points = numpy.array([[20, 30, 24, 85],
[70, 59, 65, 17]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
image_stack = images.max(axis=0)
nanshe.imp.segment.preprocess_data(image_stack, **config)
def test_generate_dictionary_00(self):
if not has_spams:
raise nose.SkipTest(
"Cannot run this test without SPAMS being installed."
)
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(numpy.float32),
**{
"spams.trainDL" : {
"gamma2" : 0,
"gamma1" : 0,
"numThreads" : 1,
"K" : len(g),
"iter" : 10,
"modeD" : 0,
"posAlpha" : True,
"clean" : True,
"posD" : True,
"batchsize" : 256,
"lambda1" : 0.2,
"lambda2" : 0,
"mode" : 2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
def test_generate_dictionary_01(self):
if not has_spams:
raise nose.SkipTest(
"Cannot run this test without SPAMS being installed."
)
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(float),
**{
"spams.trainDL" : {
"gamma2" : 0,
"gamma1" : 0,
"numThreads" : 1,
"K" : len(g),
"iter" : 10,
"modeD" : 0,
"posAlpha" : True,
"clean" : True,
"posD" : True,
"batchsize" : 256,
"lambda1" : 0.2,
"lambda2" : 0,
"mode" : 2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
@nose.plugins.attrib.attr("3D")
def test_generate_dictionary_02(self):
if not has_spams:
raise nose.SkipTest(
"Cannot run this test without SPAMS being installed."
)
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(numpy.float32),
**{
"spams.trainDL" : {
"gamma2" : 0,
"gamma1" : 0,
"numThreads" : 1,
"K" : len(g),
"iter" : 10,
"modeD" : 0,
"posAlpha" : True,
"clean" : True,
"posD" : True,
"batchsize" : 256,
"lambda1" : 0.2,
"lambda2" : 0,
"mode" : 2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
@nose.plugins.attrib.attr("3D")
def test_generate_dictionary_03(self):
if not has_spams:
raise nose.SkipTest(
"Cannot run this test without SPAMS being installed."
)
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(float),
**{
"spams.trainDL" : {
"gamma2" : 0,
"gamma1" : 0,
"numThreads" : 1,
"K" : len(g),
"iter" : 10,
"modeD" : 0,
"posAlpha" : True,
"clean" : True,
"posD" : True,
"batchsize" : 256,
"lambda1" : 0.2,
"lambda2" : 0,
"mode" : 2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
def test_generate_dictionary_04(self):
if not has_spams:
raise nose.SkipTest(
"Cannot run this test without SPAMS being installed."
)
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(numpy.float32),
g.astype(numpy.float32),
len(g),
**{
"spams.trainDL" : {
"gamma2" : 0,
"gamma1" : 0,
"numThreads" : 1,
"iter" : 10,
"modeD" : 0,
"posAlpha" : True,
"clean" : True,
"posD" : True,
"batchsize" : 256,
"lambda1" : 0.2,
"lambda2" : 0,
"mode" : 2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
assert (g.astype(bool) == d.astype(bool)).all()
def test_generate_dictionary_05(self):
if not has_spams:
raise nose.SkipTest(
"Cannot run this test without SPAMS being installed."
)
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(float),
g.astype(float),
len(g),
**{
"spams.trainDL" : {
"gamma2" : 0,
"gamma1" : 0,
"numThreads" : 1,
"iter" : 10,
"modeD" : 0,
"posAlpha" : True,
"clean" : True,
"posD" : True,
"batchsize" : 256,
"lambda1" : 0.2,
"lambda2" : 0,
"mode" : 2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
assert (g.astype(bool) == d.astype(bool)).all()
@nose.plugins.attrib.attr("3D")
def test_generate_dictionary_06(self):
if not has_spams:
raise nose.SkipTest(
"Cannot run this test without SPAMS being installed."
)
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(numpy.float32),
g.astype(numpy.float32),
len(g),
**{
"spams.trainDL" : {
"gamma2" : 0,
"gamma1" : 0,
"numThreads" : 1,
"iter" : 10,
"modeD" : 0,
"posAlpha" : True,
"clean" : True,
"posD" : True,
"batchsize" : 256,
"lambda1" : 0.2,
"lambda2" : 0,
"mode" : 2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
assert (g.astype(bool) == d.astype(bool)).all()
@nose.plugins.attrib.attr("3D")
def test_generate_dictionary_07(self):
if not has_spams:
raise nose.SkipTest(
"Cannot run this test without SPAMS being installed."
)
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(float),
g.astype(float),
len(g),
**{
"spams.trainDL" : {
"gamma2" : 0,
"gamma1" : 0,
"numThreads" : 1,
"iter" : 10,
"modeD" : 0,
"posAlpha" : True,
"clean" : True,
"posD" : True,
"batchsize" : 256,
"lambda1" : 0.2,
"lambda2" : 0,
"mode" : 2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
assert (g.astype(bool) == d.astype(bool)).all()
def test_generate_dictionary_08(self):
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(float),
**{
"sklearn.decomposition.dict_learning_online" : {
"n_jobs" : 1,
"n_components" : len(g),
"n_iter" : 20,
"batch_size" : 256,
"alpha" : 0.2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
@nose.plugins.attrib.attr("3D")
def test_generate_dictionary_09(self):
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(float),
**{
"sklearn.decomposition.dict_learning_online" : {
"n_jobs" : 1,
"n_components" : len(g),
"n_iter" : 20,
"batch_size" : 256,
"alpha" : 0.2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
def test_generate_dictionary_10(self):
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(float),
g.astype(float),
len(g),
**{
"sklearn.decomposition.dict_learning_online" : {
"n_jobs" : 1,
"n_iter" : 20,
"batch_size" : 256,
"alpha" : 0.2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
assert (g.astype(bool) == d.astype(bool)).all()
@nose.plugins.attrib.attr("3D")
def test_generate_dictionary_11(self):
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
g = nanshe.syn.data.generate_hypersphere_masks(space, p, radii)
d = nanshe.imp.segment.generate_dictionary(
g.astype(float),
g.astype(float),
len(g),
**{
"sklearn.decomposition.dict_learning_online" : {
"n_jobs" : 1,
"n_iter" : 20,
"batch_size" : 256,
"alpha" : 0.2
}
}
)
d = (d != 0)
assert (g.shape == d.shape)
assert (g.astype(bool).max(axis=0) == d.astype(bool).max(axis=0)).all()
unmatched_g = range(len(g))
matched = dict()
for i in nanshe.util.iters.irange(len(d)):
new_unmatched_g = []
for j in unmatched_g:
if not (d[i] == g[j]).all():
new_unmatched_g.append(j)
else:
matched[i] = j
unmatched_g = new_unmatched_g
print(unmatched_g)
assert (len(unmatched_g) == 0)
assert (g.astype(bool) == d.astype(bool)).all()
def test_generate_local_maxima_vigra_1(self):
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
m = nanshe.imp.segment.generate_local_maxima_vigra(g.max(axis=0))
assert (numpy.array(m.nonzero()) == p.T).all()
@nose.plugins.attrib.attr("3D")
def test_generate_local_maxima_vigra_2(self):
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
m = nanshe.imp.segment.generate_local_maxima_vigra(g.max(axis=0))
assert (numpy.array(m.nonzero()) == p.T).all()
def test_generate_local_maxima_scikit_image_1(self):
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
m = nanshe.imp.segment.generate_local_maxima_scikit_image(g.max(axis=0))
@nose.plugins.attrib.attr("3D")
def test_generate_local_maxima_scikit_image_2(self):
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
m = nanshe.imp.segment.generate_local_maxima_scikit_image(g.max(axis=0))
assert (numpy.array(m.nonzero()) == p.T).all()
def test_generate_local_maxima_1(self):
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
m = nanshe.imp.segment.generate_local_maxima(g.max(axis=0))
assert (numpy.array(m.nonzero()) == p.T).all()
@nose.plugins.attrib.attr("3D")
def test_generate_local_maxima_2(self):
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
m = nanshe.imp.segment.generate_local_maxima(g.max(axis=0))
assert (numpy.array(m.nonzero()) == p.T).all()
def test_extended_region_local_maxima_properties_1(self):
p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
m = (g > 0.00065)
g *= m
e = nanshe.imp.segment.extended_region_local_maxima_properties(
g.max(axis=0),
nanshe.util.xnumpy.enumerate_masks_max(m, axis=0)[0]
)
assert (numpy.bincount(e["label"])[1:] == 1).all()
assert (len(e) == len(p))
assert (e["local_max"] == p).all()
assert (e["area"] == numpy.apply_over_axes(numpy.sum, m, axes=range(1, m.ndim)).squeeze().astype(float)).all()
assert (e["centroid"] == e["local_max"]).all()
assert (e["intensity"] == g.max(axis=0)[tuple(p.T)]).all()
def test_extended_region_local_maxima_properties_2(self):
p = numpy.array([[27, 51],
[32, 53],
[77, 45]])
space = numpy.array((100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
g = numpy.array([g[0] + g[1], g[2]])
m = (g > 0.00065)
g *= m
e = nanshe.imp.segment.extended_region_local_maxima_properties(
g.max(axis=0),
nanshe.util.xnumpy.enumerate_masks_max(m, axis=0)[0]
)
assert (numpy.bincount(e["label"])[1:] == numpy.array([2, 1])).all()
assert (len(e) == len(p))
assert (e["local_max"] == p).all()
assert (e["area"][[0, 2]] == numpy.apply_over_axes(numpy.sum, m, axes=range(1, m.ndim)).squeeze().astype(float)).all()
# Not exactly equal due to floating point round off error
assert ((e["centroid"][0] - numpy.array(m[0].nonzero()).mean(axis=1)) < 1e-14).all()
# Not exactly equal due to floating point round off error
assert ((e["centroid"][1] - numpy.array(m[0].nonzero()).mean(axis=1)) < 1e-14).all()
assert (e["centroid"][2] == e["local_max"][2]).all()
assert (e["intensity"] == g.max(axis=0)[tuple(p.T)]).all()
@nose.plugins.attrib.attr("3D")
def test_extended_region_local_maxima_properties_3(self):
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
m = (g > 0.00065)
g *= m
e = nanshe.imp.segment.extended_region_local_maxima_properties(
g.max(axis=0),
nanshe.util.xnumpy.enumerate_masks_max(m, axis=0)[0]
)
assert (numpy.bincount(e["label"])[1:] == 1).all()
assert (len(e) == len(p))
assert (e["local_max"] == p).all()
assert (e["area"] == numpy.apply_over_axes(numpy.sum, m, axes=range(1, m.ndim)).squeeze().astype(float)).all()
assert (e["centroid"] == e["local_max"]).all()
assert (e["intensity"] == g.max(axis=0)[tuple(p.T)]).all()
@nose.plugins.attrib.attr("3D")
def test_extended_region_local_maxima_properties_4(self):
p = numpy.array([[27, 51, 87],
[66, 85, 55],
[77, 45, 26]])
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 6, 7))
magnitudes = numpy.array((1, 1, 1), dtype=float)
g = nanshe.syn.data.generate_gaussian_images(
space, p, radii/3.0, magnitudes/3
)
g = numpy.array([g[0] + g[1], g[2]])
m = (g > 0.00065)
g *= m
e = nanshe.imp.segment.extended_region_local_maxima_properties(
g.max(axis=0),
nanshe.util.xnumpy.enumerate_masks_max(m, axis=0)[0]
)
assert (numpy.bincount(e["label"])[1:] == numpy.array([2, 1])).all()
assert (len(e) == len(p))
assert (e["local_max"] == p).all()
assert (e["area"][[0, 2]] == numpy.apply_over_axes(numpy.sum, m, axes=range(1, m.ndim)).squeeze().astype(float)).all()
# Not exactly equal due to floating point round off error
assert ((e["centroid"][0] - numpy.array(m[0].nonzero()).mean(axis=1)) < 1e-14).all()
# Not exactly equal due to floating point round off error
assert ((e["centroid"][1] - numpy.array(m[0].nonzero()).mean(axis=1)) < 1e-14).all()
assert (e["centroid"][2] == e["local_max"][2]).all()
assert (e["intensity"] == g.max(axis=0)[tuple(p.T)]).all()
def test_remove_low_intensity_local_maxima_1(self):
space = numpy.array((100, 100))
radii = numpy.array((5, 10))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[23, 36],
[58, 64]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, 1.0)
assert (len(points) == len(e.props))
assert (0 == len(e2.props))
def test_remove_low_intensity_local_maxima_2(self):
space = numpy.array((100, 100))
radii = numpy.array((5, 10))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[23, 36],
[58, 64]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
percentage_pixels_below_max = numpy.zeros((len(masks),), float)
for i in nanshe.util.iters.irange(len(masks)):
pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum()
pixels = masks[i].sum()
percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels)
percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max)
e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[0])
assert (len(points) == len(e.props))
assert (len(e.props) == len(e2.props))
def test_remove_low_intensity_local_maxima_3(self):
space = numpy.array((100, 100))
radii = numpy.array((5, 10))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[23, 36],
[58, 64]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
percentage_pixels_below_max = numpy.zeros((len(masks),), float)
for i in nanshe.util.iters.irange(len(masks)):
pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum()
pixels = masks[i].sum()
percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels)
percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max)
e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[1])
assert (len(points) == len(e.props))
assert ((len(e.props) - 1) == len(e2.props))
def test_remove_low_intensity_local_maxima_4(self):
space = numpy.array((100, 100))
radii = numpy.array((5, 10))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[23, 36],
[58, 64]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
percentage_pixels_below_max = numpy.zeros((len(masks),), float)
for i in nanshe.util.iters.irange(len(masks)):
pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum()
pixels = masks[i].sum()
percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels)
percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max)
e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[1] + \
numpy.finfo(float).eps)
assert (len(points) == len(e.props))
assert ((len(e.props) - 2) == len(e2.props))
@nose.plugins.attrib.attr("3D")
def test_remove_low_intensity_local_maxima_5(self):
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 10))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[23, 36, 21],
[58, 64, 62]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, 1.0)
assert (len(points) == len(e.props))
assert (0 == len(e2.props))
@nose.plugins.attrib.attr("3D")
def test_remove_low_intensity_local_maxima_6(self):
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 10))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[23, 36, 21],
[58, 64, 62]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
percentage_pixels_below_max = numpy.zeros((len(masks),), float)
for i in nanshe.util.iters.irange(len(masks)):
pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum()
pixels = masks[i].sum()
percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels)
percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max)
e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[0])
assert (len(points) == len(e.props))
assert (len(e.props) == len(e2.props))
@nose.plugins.attrib.attr("3D")
def test_remove_low_intensity_local_maxima_7(self):
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 10))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[23, 36, 21],
[58, 64, 62]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
percentage_pixels_below_max = numpy.zeros((len(masks),), float)
for i in nanshe.util.iters.irange(len(masks)):
pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum()
pixels = masks[i].sum()
percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels)
percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max)
e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[1])
assert (len(points) == len(e.props))
assert ((len(e.props) - 1) == len(e2.props))
@nose.plugins.attrib.attr("3D")
def test_remove_low_intensity_local_maxima_8(self):
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 10))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[23, 36, 21],
[58, 64, 62]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
percentage_pixels_below_max = numpy.zeros((len(masks),), float)
for i in nanshe.util.iters.irange(len(masks)):
pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum()
pixels = masks[i].sum()
percentage_pixels_below_max[i] = float(pixels_below_max) / float(pixels)
percentage_pixels_below_max = numpy.sort(percentage_pixels_below_max)
e2 = nanshe.imp.segment.remove_low_intensity_local_maxima(e, percentage_pixels_below_max[1] + \
numpy.finfo(float).eps)
assert (len(points) == len(e.props))
assert ((len(e.props) - 2) == len(e2.props))
def test_remove_too_close_local_maxima_1(self):
space = numpy.array((100, 100))
radii = numpy.array((5, 5))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[63, 69],
[58, 64]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = masks.max(axis=0).astype(int)
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
dist = scipy.spatial.distance.pdist(points).max()
i = 0
while (dist + i * numpy.finfo(type(dist)).eps) == dist:
i += 1
dist += i * numpy.finfo(type(dist)).eps
e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist)
assert (len(points) == len(e.props))
assert (1 == len(e2.props))
def test_remove_too_close_local_maxima_2(self):
space = numpy.array((100, 100))
radii = numpy.array((5, 5))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[63, 69],
[58, 64]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
dist = scipy.spatial.distance.pdist(points).max()
i = 0
while (dist + i * numpy.finfo(type(dist)).eps) == dist:
i += 1
dist += i * numpy.finfo(type(dist)).eps
e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist)
assert (len(points) == len(e.props))
assert (len(points) == len(e2.props))
def test_remove_too_close_local_maxima_3(self):
space = numpy.array((100, 100))
radii = numpy.array((5, 5))
magnitudes = numpy.array((1, 1.01), dtype=float)
points = numpy.array([[63, 69],
[58, 64]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = masks.max(axis=0).astype(int)
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
dist = scipy.spatial.distance.pdist(points).max()
i = 0
while (dist + i * numpy.finfo(type(dist)).eps) == dist:
i += 1
dist += i * numpy.finfo(type(dist)).eps
e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist)
assert (len(points) == len(e.props))
assert (1 == len(e2.props))
assert (points[magnitudes == magnitudes.max()] == e2.props["local_max"][0]).all()
def test_remove_too_close_local_maxima_4(self):
space = numpy.array((100, 100))
radii = numpy.array((5, 5))
magnitudes = numpy.array((1.01, 1), dtype=float)
points = numpy.array([[63, 69],
[58, 64]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = masks.max(axis=0).astype(int)
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
dist = scipy.spatial.distance.pdist(points).max()
i = 0
while (dist + i * numpy.finfo(type(dist)).eps) == dist:
i += 1
dist += i * numpy.finfo(type(dist)).eps
e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist)
assert (len(points) == len(e.props))
assert (1 == len(e2.props))
assert (points[magnitudes == magnitudes.max()] == e2.props["local_max"][0]).all()
@nose.plugins.attrib.attr("3D")
def test_remove_too_close_local_maxima_5(self):
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 5))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[63, 69, 26],
[58, 64, 21]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = masks.max(axis=0).astype(int)
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
dist = scipy.spatial.distance.pdist(points).max()
i = 0
while (dist + i * numpy.finfo(type(dist)).eps) == dist:
i += 1
dist += i * numpy.finfo(type(dist)).eps
e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist)
assert (len(points) == len(e.props))
assert (1 == len(e2.props))
@nose.plugins.attrib.attr("3D")
def test_remove_too_close_local_maxima_6(self):
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 5))
magnitudes = numpy.array((1, 1), dtype=float)
points = numpy.array([[63, 69, 26],
[58, 64, 21]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = nanshe.util.xnumpy.enumerate_masks_max(masks, axis=0)[0]
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
dist = scipy.spatial.distance.pdist(points).max()
i = 0
while (dist + i * numpy.finfo(type(dist)).eps) == dist:
i += 1
dist += i * numpy.finfo(type(dist)).eps
e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist)
assert (len(points) == len(e.props))
assert (len(points) == len(e2.props))
@nose.plugins.attrib.attr("3D")
def test_remove_too_close_local_maxima_7(self):
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 5))
magnitudes = numpy.array((1, 1.01), dtype=float)
points = numpy.array([[63, 69, 26],
[58, 64, 21]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = masks.max(axis=0).astype(int)
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
dist = scipy.spatial.distance.pdist(points).max()
i = 0
while (dist + i * numpy.finfo(type(dist)).eps) == dist:
i += 1
dist += i * numpy.finfo(type(dist)).eps
e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist)
assert (len(points) == len(e.props))
assert (1 == len(e2.props))
assert (points[magnitudes == magnitudes.max()] == e2.props["local_max"][0]).all()
@nose.plugins.attrib.attr("3D")
def test_remove_too_close_local_maxima_8(self):
space = numpy.array((100, 100, 100))
radii = numpy.array((5, 5))
magnitudes = numpy.array((1.01, 1), dtype=float)
points = numpy.array([[63, 69, 26],
[58, 64, 21]])
masks = nanshe.syn.data.generate_hypersphere_masks(
space, points, radii
)
images = nanshe.syn.data.generate_gaussian_images(
space, points, radii/3.0, magnitudes
) * masks
labels = masks.max(axis=0).astype(int)
e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels)
dist = scipy.spatial.distance.pdist(points).max()
i = 0
while (dist + i * numpy.finfo(type(dist)).eps) == dist:
i += 1
dist += i * numpy.finfo(type(dist)).eps
e2 = nanshe.imp.segment.remove_too_close_local_maxima(e, dist)
assert (len(points) == len(e.props))
assert (1 == len(e2.props))
assert (points[magnitudes == magnitudes.max()] == e2.props["local_max"][0]).all()
def test_wavelet_thresholding_1(self):
params = {
"significance_threshold" : 3.0,
"wavelet_scale" : 5,
"noise_threshold" : 3.0
}
shape = numpy.array((500, 500))
neuron_centers = numpy.array([[177, 52], [127, 202], [343, 271]])
original_neurons_image = nanshe.syn.data.generate_gaussian_images(shape, neuron_centers, (50.0/3.0,)*len(neuron_centers), (1.0/3.0,)*len(neuron_centers)).sum(axis=0)
original_neurons_mask = (original_neurons_image >= 0.00014218114898827068)
wtt_image, wtt_mask = nanshe.imp.segment.wavelet_thresholding(
original_neurons_image, **params
)
assert (wtt_mask[-2] == original_neurons_mask).all()
assert ((wtt_mask[-1] & original_neurons_mask) == original_neurons_mask).all()
def test_match_regions_properties_1(self):
props = numpy.array(
[
(1, 1990.0, [3.7402010050251255, 127.0, 202.0], 0.9990127357638044, 39.484721299262105),
(2, 1988.0, [3.7399396378269616, 177.0, 52.021126760563384], 0.9990128314664918, 39.49948424388854),
(3, 1990.0, [3.7402010050251255, 343.0, 271.0], 0.9990127357638044, 39.484721299262105)
],
dtype=[
('label', '<i8'),
('area', '<f8'),
('centroid', '<f8', (3,)),
('eccentricity', '<f8'),
('major_axis_length', '<f8')
]
)
params = {
"area": {
"min" : 1990, "max" : 2000
}
}
matches = nanshe.imp.segment.match_regions_properties(props, params)
assert len(matches) == len(props)
assert (matches == numpy.array([ True, False, True])).all()
def test_wavelet_denoising_1(self):
params = {
"remove_low_intensity_local_maxima" : {
"percentage_pixels_below_max" : 0
},
"wavelet.transform" : {
"scale" : 5
},
"accepted_region_shape_constraints" : {
"major_axis_length" : {
"max" : 25.0,
"min" : 0.0
}
},
"accepted_neuron_shape_constraints" : {
"eccentricity" : {
"max" : 0.9,
"min" : 0.0
},
"area" : {
"max" : 600,
"min" : 30
}
},
"estimate_noise" : {
"significance_threshold" : 3.0
},
"significant_mask" : {
"noise_threshold" : 3.0
},
"remove_too_close_local_maxima" : {
"min_local_max_distance" : 100.0
},
"use_watershed" : True
}
shape = numpy.array((500, 500))
neuron_centers = numpy.array([[177, 52], [127, 202], [343, 271]])
original_neuron_image = nanshe.syn.data.generate_gaussian_images(shape, neuron_centers, (50.0/3.0,)*len(neuron_centers), (1.0/3.0,)*len(neuron_centers)).sum(axis=0)
original_neurons_mask = (original_neuron_image >= 0.00014218114898827068)
neurons = nanshe.imp.segment.wavelet_denoising(original_neuron_image, **params)
assert (len(neuron_centers) == len(neurons))
assert (original_neurons_mask == neurons["mask"].max(axis=0)).all()
assert ((original_neurons_mask*original_neuron_image) == neurons["image"].max(axis=0)).all()
def test_wavelet_denoising_2(self):
params = {
"remove_low_intensity_local_maxima" : {
"percentage_pixels_below_max" : 0
},
"wavelet.transform" : {
"scale" : 5
},
"accepted_region_shape_constraints" : {
"major_axis_length" : {
"max" : 150.0,
"min" : 0.0
}
},
"accepted_neuron_shape_constraints" : {
"eccentricity" : {
"max" : 0.9,
"min" : 0.0
},
"area" : {
"max" : 10000,
"min" : 0
}
},
"estimate_noise" : {
"significance_threshold" : 3.0
},
"significant_mask" : {
"noise_threshold" : 3.0
},
"remove_too_close_local_maxima" : {
"min_local_max_distance" : 100.0
},
"use_watershed" : True
}
shape = numpy.array((500, 500))
neuron_centers = numpy.array([[127, 202], [177, 52], [343, 271]])
neuron_radii = numpy.array((50.0,)*len(neuron_centers))
neuron_magnitudes = numpy.array((1.0/3.0,)*len(neuron_centers))
neuron_spreads = neuron_radii / 3.0
neuron_images = nanshe.syn.data.generate_gaussian_images(shape, neuron_centers, neuron_spreads, neuron_magnitudes)
neuron_masks = (neuron_images >= (neuron_magnitudes.max() * scipy.stats.norm.pdf(3 * neuron_spreads.max(), scale=neuron_spreads.max())**len(shape)))
neuron_images *= neuron_masks
neurons = nanshe.imp.segment.wavelet_denoising(neuron_images.max(axis=0), **params)
# Resort neuron image order based on most similar.
result_neurons_distance = scipy.spatial.distance.cdist(neuron_images.reshape(neurons.shape + (-1,)), neurons["image"].reshape(neurons.shape + (-1,)))
neuron_centers_old = neuron_centers
neuron_radii_old = neuron_radii
neuron_magnitudes_old = neuron_magnitudes
neuron_images_old = neuron_images
neuron_masks_old = neuron_masks
neuron_centers = numpy.zeros(neuron_centers_old.shape, dtype=neuron_centers_old.dtype)
neuron_radii = numpy.zeros(neuron_radii_old.shape, dtype=neuron_radii_old.dtype)
neuron_magnitudes = numpy.zeros(neuron_magnitudes_old.shape, dtype=neuron_magnitudes_old.dtype)
neuron_images = numpy.zeros(neuron_images_old.shape, dtype=neuron_images_old.dtype)
neuron_masks = numpy.zeros(neuron_masks_old.shape, dtype=neuron_masks_old.dtype)
for i1, i2 in enumerate(result_neurons_distance.argmin(axis=1)):
neuron_centers[i1] = neuron_centers_old[i2]
neuron_radii[i1] = neuron_radii_old[i2]
neuron_magnitudes[i1] = neuron_magnitudes_old[i2]
neuron_images[i1] = neuron_images_old[i2]
neuron_masks[i1] = neuron_masks_old[i2]
neuron_centers_old = None
neuron_radii_old = None
neuron_magnitudes_old = None
neuron_images_old = None
neuron_masks_old = None
assert (len(neuron_centers) == len(neurons))
assert (numpy.abs(neurons["image"].max(axis=0) - neuron_images.max(axis=0)).max() < 1.0e-4)
assert (numpy.abs(neurons["image"] - neuron_images).max() < 1.0e-4)
@nose.plugins.attrib.attr("3D")
def test_wavelet_denoising_3(self):
params = {
"remove_low_intensity_local_maxima" : {
"percentage_pixels_below_max" : 0
},
"wavelet.transform" : {
"scale" : 5
},
"accepted_region_shape_constraints" : {
"major_axis_length" : {
"max" : 30.0,
"min" : 0.0
}
},
"accepted_neuron_shape_constraints" : {
"eccentricity" : {
"max" : 0.9,
"min" : 0.0
},
"area" : {
"max" : 30000,
"min" : 10000
}
},
"estimate_noise" : {
"significance_threshold" : 3.0
},
"significant_mask" : {
"noise_threshold" : 3.0
},
"remove_too_close_local_maxima" : {
"min_local_max_distance" : 100.0
},
"use_watershed" : True
}
shape = numpy.array((100, 100, 100))
neuron_centers = numpy.array([[21, 17, 46], [46, 71, 83], [77, 52, 17]])
neuron_radii = numpy.array((10.0,)*len(neuron_centers))
neuron_magnitudes = numpy.array((1.0/3.0,)*len(neuron_centers))
neuron_spreads = neuron_radii / 3.0
neuron_images = nanshe.syn.data.generate_gaussian_images(shape, neuron_centers, neuron_spreads, neuron_magnitudes)
neuron_masks = (neuron_images >= (neuron_magnitudes.max() * scipy.stats.norm.pdf(3 * neuron_spreads.max(), scale=neuron_spreads.max())**len(shape)))
neuron_images *= neuron_masks
neurons = nanshe.imp.segment.wavelet_denoising(neuron_images.max(axis=0), **params)
# Resort neuron image order based on most similar.
result_neurons_distance = scipy.spatial.distance.cdist(neuron_images.reshape(neurons.shape + (-1,)), neurons["image"].reshape(neurons.shape + (-1,)))
neuron_centers_old = neuron_centers
neuron_radii_old = neuron_radii
neuron_magnitudes_old = neuron_magnitudes
neuron_images_old = neuron_images
neuron_masks_old = neuron_masks
neuron_centers = numpy.zeros(neuron_centers_old.shape, dtype=neuron_centers_old.dtype)
neuron_radii = numpy.zeros(neuron_radii_old.shape, dtype=neuron_radii_old.dtype)
neuron_magnitudes = numpy.zeros(neuron_magnitudes_old.shape, dtype=neuron_magnitudes_old.dtype)
neuron_images = numpy.zeros(neuron_images_old.shape, dtype=neuron_images_old.dtype)
neuron_masks = numpy.zeros(neuron_masks_old.shape, dtype=neuron_masks_old.dtype)
for i1, i2 in enumerate(result_neurons_distance.argmin(axis=1)):
neuron_centers[i1] = neuron_centers_old[i2]
neuron_radii[i1] = neuron_radii_old[i2]
neuron_magnitudes[i1] = neuron_magnitudes_old[i2]
neuron_images[i1] = neuron_images_old[i2]
neuron_masks[i1] = neuron_masks_old[i2]
neuron_centers_old = None
neuron_radii_old = None
neuron_magnitudes_old = None
neuron_images_old = None
neuron_masks_old = None
assert (len(neuron_centers) == len(neurons))
assert (numpy.abs(neurons["image"].max(axis=0) - neuron_images.max(axis=0)).max() < 1.0e-6)
assert (numpy.abs(neurons["image"] - neuron_images).max() < 1.0e-6)
def test_extract_neurons_1(self):
image = 5 * numpy.ones((100, 100))
xy = numpy.indices(image.shape)
circle_centers = numpy.array([[25, 25], [74, 74]])
circle_radii = numpy.array([25, 25])
circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \
nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers))
circle_offsets_squared = circle_offsets**2
circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape))
circle_images = circle_masks * image
circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,))
circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,))
for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)):
each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float)
circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1)
circle_mask_cov[circle_mask_i] = numpy.cov(each_circle_mask_points)
neurons = nanshe.imp.segment.extract_neurons(image, circle_masks)
assert (len(circle_masks) == len(neurons))
assert (circle_masks == neurons["mask"]).all()
assert (circle_images == neurons["image"]).all()
assert (numpy.apply_over_axes(numpy.sum, circle_masks, range(1, circle_masks.ndim)) == neurons["area"]).all()
assert (numpy.apply_over_axes(numpy.max, circle_images, range(1, circle_masks.ndim)) == neurons["max_F"]).all()
assert (circle_mask_mean == neurons["gaussian_mean"]).all()
assert (circle_mask_cov == neurons["gaussian_cov"]).all()
assert (neurons["centroid"] == neurons["gaussian_mean"]).all()
@nose.plugins.attrib.attr("3D")
def test_extract_neurons_2(self):
image = 5 * numpy.ones((100, 100, 100))
xyz = numpy.indices(image.shape)
circle_centers = numpy.array([[25, 25, 25], [74, 74, 74]])
circle_radii = numpy.array([25, 25])
circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \
nanshe.util.xnumpy.expand_view(xyz, reps_before=len(circle_centers))
circle_offsets_squared = circle_offsets**2
circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape))
circle_images = circle_masks * image
circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,))
circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,))
for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)):
each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float)
circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1)
circle_mask_cov[circle_mask_i] = numpy.cov(each_circle_mask_points)
neurons = nanshe.imp.segment.extract_neurons(image, circle_masks)
assert (len(circle_masks) == len(neurons))
assert (circle_masks == neurons["mask"]).all()
assert (circle_images == neurons["image"]).all()
assert (numpy.apply_over_axes(numpy.sum, circle_masks, range(1, circle_masks.ndim)) == neurons["area"]).all()
assert (numpy.apply_over_axes(numpy.max, circle_images, range(1, circle_masks.ndim)) == neurons["max_F"]).all()
assert (circle_mask_mean == neurons["gaussian_mean"]).all()
assert (circle_mask_cov == neurons["gaussian_cov"]).all()
assert (neurons["centroid"] == neurons["gaussian_mean"]).all()
def test_fuse_neurons_1(self):
fraction_mean_neuron_max_threshold = 0.01
image = 5 * numpy.ones((100, 100))
xy = numpy.indices(image.shape)
circle_centers = numpy.array([[25, 25], [74, 74]])
circle_radii = numpy.array([25, 25])
circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \
nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers))
circle_offsets_squared = circle_offsets**2
circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape))
circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,))
circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,))
for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)):
each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float)
circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1)
circle_mask_cov[circle_mask_i] = numpy.cov(each_circle_mask_points)
neurons = nanshe.imp.segment.extract_neurons(image, circle_masks)
fused_neurons = nanshe.imp.segment.fuse_neurons(neurons[0], neurons[1],
fraction_mean_neuron_max_threshold)
assert (neurons["mask"].sum(axis=0) == fused_neurons["mask"]).all()
assert (neurons["image"].mean(axis=0) == fused_neurons["image"]).all()
assert (numpy.array(neurons["area"].sum()) == fused_neurons["area"])
assert (fused_neurons["image"].max() == fused_neurons["max_F"])
assert (neurons["gaussian_mean"].mean(axis=0) == fused_neurons["gaussian_mean"]).all()
assert (fused_neurons["centroid"] == fused_neurons["gaussian_mean"]).all()
@nose.plugins.attrib.attr("3D")
def test_fuse_neurons_2(self):
fraction_mean_neuron_max_threshold = 0.01
image = 5 * numpy.ones((100, 100, 100))
xy = numpy.indices(image.shape)
circle_centers = numpy.array([[25, 25, 25], [74, 74, 74]])
circle_radii = numpy.array([25, 25])
circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \
nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers))
circle_offsets_squared = circle_offsets**2
circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape))
circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,))
circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,))
for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)):
each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float)
circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1)
circle_mask_cov[circle_mask_i] = numpy.cov(each_circle_mask_points)
neurons = nanshe.imp.segment.extract_neurons(image, circle_masks)
fused_neurons = nanshe.imp.segment.fuse_neurons(neurons[0], neurons[1],
fraction_mean_neuron_max_threshold)
assert (neurons["mask"].sum(axis=0) == fused_neurons["mask"]).all()
assert (neurons["image"].mean(axis=0) == fused_neurons["image"]).all()
assert (numpy.array(neurons["area"].sum()) == fused_neurons["area"])
assert (fused_neurons["image"].max() == fused_neurons["max_F"])
assert (neurons["gaussian_mean"].mean(axis=0) == fused_neurons["gaussian_mean"]).all()
assert (fused_neurons["centroid"] == fused_neurons["gaussian_mean"]).all()
def test_merge_neuron_sets_1(self):
alignment_min_threshold = 0.6
overlap_min_threshold = 0.6
fuse_neurons = {"fraction_mean_neuron_max_threshold" : 0.01}
image = 5 * numpy.ones((100, 100))
xy = numpy.indices(image.shape)
circle_centers = numpy.array([[25, 25], [74, 74]])
circle_radii = numpy.array([25, 25])
circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \
nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers))
circle_offsets_squared = circle_offsets**2
circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape))
neurons = nanshe.imp.segment.extract_neurons(image, circle_masks)
merged_neurons = nanshe.imp.segment.merge_neuron_sets(neurons[:1], neurons[1:], alignment_min_threshold, overlap_min_threshold, fuse_neurons=fuse_neurons)
assert (len(neurons) == len(circle_centers))
assert (neurons == merged_neurons).all()
def test_merge_neuron_sets_2(self):
alignment_min_threshold = 0.6
overlap_min_threshold = 0.6
fuse_neurons = {"fraction_mean_neuron_max_threshold" : 0.01}
image = 5 * numpy.ones((100, 100))
xy = numpy.indices(image.shape)
circle_centers = numpy.array([[25, 25]])
circle_radii = numpy.array([25])
circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \
nanshe.util.xnumpy.expand_view(xy, reps_before=len(circle_centers))
circle_offsets_squared = circle_offsets**2
circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape))
neurons = nanshe.imp.segment.extract_neurons(image, circle_masks)
merged_neurons = nanshe.imp.segment.merge_neuron_sets(neurons, neurons, alignment_min_threshold, overlap_min_threshold, fuse_neurons=fuse_neurons)
assert (len(neurons) == len(circle_centers))
assert (neurons == merged_neurons).all()
@nose.plugins.attrib.attr("3D")
def test_merge_neuron_sets_3(self):
alignment_min_threshold = 0.6
overlap_min_threshold = 0.6
fuse_neurons = {"fraction_mean_neuron_max_threshold" : 0.01}
image = 5 * numpy.ones((100, 100, 100))
xyz = numpy.indices(image.shape)
circle_centers = numpy.array([[25, 25, 25], [74, 74, 74]])
circle_radii = numpy.array([25, 25])
circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \
nanshe.util.xnumpy.expand_view(xyz, reps_before=len(circle_centers))
circle_offsets_squared = circle_offsets**2
circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape))
neurons = nanshe.imp.segment.extract_neurons(image, circle_masks)
merged_neurons = nanshe.imp.segment.merge_neuron_sets(neurons[:1], neurons[1:], alignment_min_threshold, overlap_min_threshold, fuse_neurons=fuse_neurons)
assert (len(neurons) == len(circle_centers))
assert (neurons == merged_neurons).all()
@nose.plugins.attrib.attr("3D")
def test_merge_neuron_sets_4(self):
alignment_min_threshold = 0.6
overlap_min_threshold = 0.6
fuse_neurons = {"fraction_mean_neuron_max_threshold" : 0.01}
image = 5 * numpy.ones((100, 100, 100))
xyz = numpy.indices(image.shape)
circle_centers = numpy.array([[25, 25, 25]])
circle_radii = numpy.array([25])
circle_offsets = nanshe.util.xnumpy.expand_view(circle_centers, image.shape) - \
nanshe.util.xnumpy.expand_view(xyz, reps_before=len(circle_centers))
circle_offsets_squared = circle_offsets**2
circle_masks = (circle_offsets_squared.sum(axis=1)**.5 < nanshe.util.xnumpy.expand_view(circle_radii, image.shape))
neurons = nanshe.imp.segment.extract_neurons(image, circle_masks)
merged_neurons = nanshe.imp.segment.merge_neuron_sets(neurons, neurons, alignment_min_threshold, overlap_min_threshold, fuse_neurons=fuse_neurons)
assert (len(neurons) == len(circle_centers))
assert (neurons == merged_neurons).all()
def test_postprocess_data_1(self):
config = {
"wavelet_denoising" : {
"remove_low_intensity_local_maxima" : {
"percentage_pixels_below_max" : 0.0
},
"wavelet.transform" : {
"scale" : 4
},
"accepted_region_shape_constraints" : {
"major_axis_length" : {
"max" : 25.0,
"min" : 0.0
}
},
"accepted_neuron_shape_constraints" : {
"eccentricity" : {
"max" : 0.9,
"min" : 0.0
},
"area" : {
"max" : 600,
"min" : 30
}
},
"estimate_noise" : {
"significance_threshold" : 3.0
},
"significant_mask" : {
"noise_threshold" : 3.0
},
"remove_too_close_local_maxima" : {
"min_local_max_distance" : 10.0
},
"use_watershed" : True
},
"merge_neuron_sets" : {
"alignment_min_threshold" : 0.6,
"fuse_neurons" : {
"fraction_mean_neuron_max_threshold" : 0.01
},
"overlap_min_threshold" : 0.6
}
}
space = numpy.array([100, 100])
radii = numpy.array([7, 6, 6, 6, 7, 6])
magnitudes = numpy.array([15, 16, 15, 17, 16, 16])
points = numpy.array([[30, 24],
[59, 65],
[21, 65],
[13, 12],
[72, 16],
[45, 32]])
masks = nanshe.syn.data.generate_hypersphere_masks(space, points, radii)
images = nanshe.syn.data.generate_gaussian_images(space, points, radii/3.0, magnitudes) * masks
bases_indices = [[1,3,4], [0,2], [5]]
bases_masks = numpy.zeros((len(bases_indices),) + masks.shape[1:], dtype=masks.dtype)
bases_images = numpy.zeros((len(bases_indices),) + images.shape[1:], dtype=images.dtype)
for i, each_basis_indices in enumerate(bases_indices):
bases_masks[i] = masks[list(each_basis_indices)].max(axis=0)
bases_images[i] = images[list(each_basis_indices)].max(axis=0)
neurons = nanshe.imp.segment.postprocess_data(bases_images, **config)
assert (len(points) == len(neurons))
neuron_max_matches = nanshe.util.xnumpy.all_permutations_equal(neurons["max_F"], neurons["image"])
neuron_max_matches = neuron_max_matches.max(axis=0).max(axis=0)
neuron_points = numpy.array(neuron_max_matches.nonzero()).T.copy()
matched = dict()
unmatched_points = numpy.arange(len(points))
for i in nanshe.util.iters.irange(len(neuron_points)):
new_unmatched_points = []
for j in unmatched_points:
if not (neuron_points[i] == points[j]).all():
new_unmatched_points.append(j)
else:
matched[i] = j
unmatched_points = new_unmatched_points
assert (len(unmatched_points) == 0)
def test_postprocess_data_2(self):
config = {
"wavelet_denoising" : {
"remove_low_intensity_local_maxima" : {
"percentage_pixels_below_max" : 0.0
},
"wavelet.transform" : {
"scale" : 4
},
"accepted_region_shape_constraints" : {
"major_axis_length" : {
"max" : 25.0,
"min" : 0.0
}
},
"accepted_neuron_shape_constraints" : {
"eccentricity" : {
"max" : 0.9,
"min" : 0.0
},
"area" : {
"max" : 600,
"min" : 30
}
},
"estimate_noise" : {
"significance_threshold" : 3.0
},
"significant_mask" : {
"noise_threshold" : 3.0
},
"remove_too_close_local_maxima" : {
"min_local_max_distance" : 10.0
},
"use_watershed" : True
},
"merge_neuron_sets" : {
"alignment_min_threshold" : 0.6,
"fuse_neurons" : {
"fraction_mean_neuron_max_threshold" : 0.01
},
"overlap_min_threshold" : 0.6
}
}
space = numpy.array([100, 100])
radii = numpy.array([25])
magnitudes = numpy.array([15])
points = numpy.array([[25, 25]])
masks = nanshe.syn.data.generate_hypersphere_masks(space, numpy.vstack([points, points]), numpy.hstack([radii, radii]))
images = nanshe.syn.data.generate_gaussian_images(space, numpy.vstack([points, points]), numpy.hstack([radii, radii])/3.0, numpy.hstack([magnitudes, magnitudes])) * masks
print(masks.shape)
bases_indices = [[0], [1]]
bases_masks = numpy.zeros((len(bases_indices),) + masks.shape[1:], dtype=masks.dtype)
bases_images = numpy.zeros((len(bases_indices),) + images.shape[1:], dtype=images.dtype)
for i, each_basis_indices in enumerate(bases_indices):
bases_masks[i] = masks[list(each_basis_indices)].max(axis=0)
bases_images[i] = images[list(each_basis_indices)].max(axis=0)
neurons = nanshe.imp.segment.postprocess_data(bases_images, **config)
assert (len(points) == len(neurons))
neuron_max_matches = nanshe.util.xnumpy.all_permutations_equal(neurons["max_F"], neurons["image"])
neuron_max_matches = neuron_max_matches.max(axis=0).max(axis=0)
neuron_points = numpy.array(neuron_max_matches.nonzero()).T.copy()
matched = dict()
unmatched_points = numpy.arange(len(points))
for i in nanshe.util.iters.irange(len(neuron_points)):
new_unmatched_points = []
for j in unmatched_points:
if not (neuron_points[i] == points[j]).all():
new_unmatched_points.append(j)
else:
matched[i] = j
unmatched_points = new_unmatched_points
assert (len(unmatched_points) == 0)
@nose.plugins.attrib.attr("3D")
def test_postprocess_data_3(self):
config = {
"wavelet_denoising" : {
"remove_low_intensity_local_maxima" : {
"percentage_pixels_below_max" : 0.0
},
"wavelet.transform" : {
"scale" : 4
},
"accepted_region_shape_constraints" : {
"major_axis_length" : {
"max" : 30.0,
"min" : 0.0
}
},
"accepted_neuron_shape_constraints" : {
"eccentricity" : {
"max" : 0.9,
"min" : 0.0
},
"area" : {
"max" : 6000.0,
"min" : 1000.0
}
},
"estimate_noise" : {
"significance_threshold" : 3.0
},
"significant_mask" : {
"noise_threshold" : 3.0
},
"remove_too_close_local_maxima" : {
"min_local_max_distance" : 20.0
},
"use_watershed" : True
},
"merge_neuron_sets" : {
"alignment_min_threshold" : 0.6,
"fuse_neurons" : {
"fraction_mean_neuron_max_threshold" : 0.01
},
"overlap_min_threshold" : 0.6
}
}
space = numpy.array([100, 100, 100])
radii = numpy.array([7, 6, 6, 6, 7, 6])
magnitudes = numpy.array([15, 16, 15, 17, 16, 16])
points = numpy.array([[30, 24, 68],
[59, 65, 47],
[21, 65, 21],
[13, 12, 21],
[72, 16, 67],
[45, 32, 27]])
masks = nanshe.syn.data.generate_hypersphere_masks(space, points, radii)
images = nanshe.syn.data.generate_gaussian_images(space, points, radii/3.0, magnitudes) * masks
bases_indices = [[1,3,4], [0,2], [5]]
bases_masks = numpy.zeros((len(bases_indices),) + masks.shape[1:], dtype=masks.dtype)
bases_images = numpy.zeros((len(bases_indices),) + images.shape[1:], dtype=images.dtype)
for i, each_basis_indices in enumerate(bases_indices):
bases_masks[i] = masks[list(each_basis_indices)].max(axis=0)
bases_images[i] = images[list(each_basis_indices)].max(axis=0)
neurons = nanshe.imp.segment.postprocess_data(bases_images, **config)
assert (len(points) == len(neurons))
neuron_max_matches = nanshe.util.xnumpy.all_permutations_equal(neurons["max_F"], neurons["image"])
neuron_max_matches = neuron_max_matches.max(axis=0).max(axis=0)
neuron_points = numpy.array(neuron_max_matches.nonzero()).T.copy()
matched = dict()
unmatched_points = numpy.arange(len(points))
for i in nanshe.util.iters.irange(len(neuron_points)):
new_unmatched_points = []
for j in unmatched_points:
if not (neuron_points[i] == points[j]).all():
new_unmatched_points.append(j)
else:
matched[i] = j
unmatched_points = new_unmatched_points
assert (len(unmatched_points) == 0)
@nose.plugins.attrib.attr("3D")
def test_postprocess_data_4(self):
config = {
"wavelet_denoising" : {
"remove_low_intensity_local_maxima" : {
"percentage_pixels_below_max" : 0.0
},
"wavelet.transform" : {
"scale" : 4
},
"accepted_region_shape_constraints" : {
"major_axis_length" : {
"max" : 30.0,
"min" : 0.0
}
},
"accepted_neuron_shape_constraints" : {
"eccentricity" : {
"max" : 0.9,
"min" : 0.0
},
"area" : {
"max" : 70000.0,
"min" : 10000.0
}
},
"estimate_noise" : {
"significance_threshold" : 3.0
},
"significant_mask" : {
"noise_threshold" : 3.0
},
"remove_too_close_local_maxima" : {
"min_local_max_distance" : 20.0
},
"use_watershed" : True
},
"merge_neuron_sets" : {
"alignment_min_threshold" : 0.6,
"fuse_neurons" : {
"fraction_mean_neuron_max_threshold" : 0.01
},
"overlap_min_threshold" : 0.6
}
}
space = numpy.array([100, 100, 100])
radii = numpy.array([25])
magnitudes = numpy.array([15])
points = numpy.array([[25, 25, 25]])
masks = nanshe.syn.data.generate_hypersphere_masks(space, numpy.vstack([points, points]), numpy.hstack([radii, radii]))
images = nanshe.syn.data.generate_gaussian_images(space, numpy.vstack([points, points]), numpy.hstack([radii, radii])/3.0, numpy.hstack([magnitudes, magnitudes])) * masks
bases_indices = [[0], [1]]
bases_masks = numpy.zeros((len(bases_indices),) + masks.shape[1:], dtype=masks.dtype)
bases_images = numpy.zeros((len(bases_indices),) + images.shape[1:], dtype=images.dtype)
for i, each_basis_indices in enumerate(bases_indices):
bases_masks[i] = masks[list(each_basis_indices)].max(axis=0)
bases_images[i] = images[list(each_basis_indices)].max(axis=0)
neurons = nanshe.imp.segment.postprocess_data(bases_images, **config)
assert (len(points) == len(neurons))
neuron_max_matches = nanshe.util.xnumpy.all_permutations_equal(neurons["max_F"], neurons["image"])
neuron_max_matches = neuron_max_matches.max(axis=0).max(axis=0)
neuron_points = numpy.array(neuron_max_matches.nonzero()).T.copy()
matched = dict()
unmatched_points = numpy.arange(len(points))
for i in nanshe.util.iters.irange(len(neuron_points)):
new_unmatched_points = []
for j in unmatched_points:
if not (neuron_points[i] == points[j]).all():
new_unmatched_points.append(j)
else:
matched[i] = j
unmatched_points = new_unmatched_points
assert (len(unmatched_points) == 0)
| bsd-3-clause |
jcrudy/py-earth | pyearth/test/testing_utils.py | 2 | 3198 | import os
from functools import wraps
from nose import SkipTest
from nose.tools import assert_almost_equal
from distutils.version import LooseVersion
import sys
def if_environ_has(var_name):
# Test decorator that skips test if environment variable is not defined
def if_environ(func):
@wraps(func)
def run_test(*args, **kwargs):
if var_name in os.environ:
return func(*args, **kwargs)
else:
raise SkipTest('Only run if %s environment variable is '
'defined.' % var_name)
return run_test
return if_environ
def if_platform_not_win_32(func):
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'win32':
raise SkipTest('Skip for 32 bit Windows platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_sklearn_version_greater_than_or_equal_to(min_version):
'''
Test decorator that skips test unless sklearn version is greater than or
equal to min_version.
'''
def _if_sklearn_version(func):
@wraps(func)
def run_test(*args, **kwargs):
import sklearn
if LooseVersion(sklearn.__version__) < LooseVersion(min_version):
raise SkipTest('sklearn version less than %s' %
str(min_version))
else:
return func(*args, **kwargs)
return run_test
return _if_sklearn_version
def if_statsmodels(func):
"""Test decorator that skips test if statsmodels not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import statsmodels
except ImportError:
raise SkipTest('statsmodels not available.')
else:
return func(*args, **kwargs)
return run_test
def if_pandas(func):
"""Test decorator that skips test if pandas not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import pandas
except ImportError:
raise SkipTest('pandas not available.')
else:
return func(*args, **kwargs)
return run_test
def if_sympy(func):
""" Test decorator that skips test if sympy not installed """
@wraps(func)
def run_test(*args, **kwargs):
try:
from sympy import Symbol, Add, Mul, Max, RealNumber, Piecewise, sympify, Pow, And, lambdify
except ImportError:
raise SkipTest('sympy not available.')
else:
return func(*args, **kwargs)
return run_test
def if_patsy(func):
"""Test decorator that skips test if patsy not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import patsy
except ImportError:
raise SkipTest('patsy not available.')
else:
return func(*args, **kwargs)
return run_test
def assert_list_almost_equal(list1, list2):
for el1, el2 in zip(list1, list2):
assert_almost_equal(el1, el2)
def assert_list_almost_equal_value(list, value):
for el in list:
assert_almost_equal(el, value)
| bsd-3-clause |
ojustino/test-demo | py_exes/est_pi.py | 1 | 3416 | import numpy as np
import numpy.random as rand
import matplotlib.pyplot as plt
import matplotlib.axes as ax
# ARRAYS ARE (supposedly) FASTER THAN LOOPS, MOST OF THE TIME
def est_pi(N,R): # takes in number of points and radius of circle
a = 0
inside = 0.; outside = 0. # counters for point locations
while(a < N):
x = R*np.random.rand()
y = R*np.random.rand()
# if condition to determine whether or not point is in circle
if x**2 + y**2 <= R**2:
inside += 1
else:
outside += 1
a += 1
return 4.*(inside/(inside+outside))
def est_pi2(N,R):
#np.random.seed(3) # makes random generator give same number each time
x = R*rand.random(N); y = R*rand.random(N)
M = x**2 + y**2 <= R**2 # boolean array with same length as x and y based on conditional
M_in = float(len(x[M]))
pi_est = 4.*M_in/len(x) # could've also been len(y) at bottom... or N
plt.scatter(x,y,marker='x',alpha=.25,color='#1C105E')
plt.plot([0,0],[0,R],linewidth=3,linestyle='--',color='#000000')
plt.plot([0,R],[R,R],linewidth=3,linestyle='--',color='#000000')
plt.plot([R,R],[R,0],linewidth=3,linestyle='--',color='#000000')
plt.plot([R,0],[0,0],linewidth=3,linestyle='--',color='#000000')
circle = plt.Circle((0,0),R,alpha=.5,linewidth=10,color='#E65F20')
fig = plt.gcf()
fig.gca().add_artist(circle)
plt.title('Dart Locations')
plt.xlim(-.99,.99)
plt.xlim(0-.1,R+.1)
plt.ylim(0-.1,R+.1)
plt.show()
return pi_est
a1 = np.zeros(1000); b1 = np.zeros(1000); c1 = np.zeros(1000)
d1 = np.zeros(1000); e1 = np.zeros(1000)
a1 = [est_pi2(1e3,10) for i in range(100)]
plt.hist(a1,bins=10,histtype='barstacked',alpha=.2,linewidth=7,color='#B71234')
plt.title('Pi Estimate Distribution, N = 1e3')
plt.xlabel('Estimate')
plt.ylabel('Frequency')
plt.show()
b1 = [est_pi2(1e4,10) for i in range(100)]
plt.hist(a1,bins=10,histtype='barstacked',alpha=.2,linewidth=7,color='#B71234')
plt.title('Pi Estimate Distribution, N = 1e4')
plt.xlabel('Estimate')
plt.ylabel('Frequency')
plt.show()
c1 = [est_pi2(1e5,10) for i in range(100)]
plt.hist(a1,bins=10,histtype='barstacked',alpha=.2,linewidth=7,color='#B71234')
plt.title('Pi Estimate Distribution, N = 1e5')
plt.xlabel('Estimate')
plt.ylabel('Frequency')
plt.show()
d1 = [est_pi2(1e6,10) for i in range(100)]
plt.hist(a1,bins=10,histtype='barstacked',alpha=.2,linewidth=7,color='#B71234')
plt.title('Pi Estimate Distribution, N = 1e6')
plt.xlabel('Estimate')
plt.ylabel('Frequency')
plt.show()
e1 = [est_pi2(1e7,10) for i in range(100)]
plt.hist(a1,bins=10,histtype='barstacked',alpha=.2,linewidth=7,color='#B71234')
plt.title('Pi Estimate Distribution, N = 1e7')
plt.xlabel('Estimate')
plt.ylabel('Frequency')
plt.show()
'''# if you want a plot of a single result from a trial at each N, remove the loops and call each function twice, then uncomment this
e3 = est_pi2(1e3,10)
e4 = est_pi2(1e4,10)
e5 = est_pi2(1e5,10)
e6 = est_pi2(1e6,10)
e7 = est_pi2(1e7,10)
e8 = est_pi2(1e8,10)
estimates = np.array([e3,e4,e5,e6,e7,e8])
plt.plot(estimates,linewidth=3,color='#000000')
plt.xticks(np.arange(7),['1e3','1e4','1e5','1e6','1e7','1e8'])
plt.plot([-100,100],[3.14159,3.14159],linewidth=4,linestyle='--',color='#DAAA00')
plt.title('Pi Estimates by Dart Number')
plt.xlim(0,5)
plt.xlabel('Darts Thrown')
plt.ylabel('Pi Estimate')
plt.show()
'''
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/lines.py | 1 | 37668 | """
This module contains all the 2D line class which can draw with a
variety of line styles, markers and colors.
"""
# TODO: expose cap and join style attrs
from __future__ import division
import numpy as np
from numpy import ma
from matplotlib import verbose
import artist
from artist import Artist
from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\
flatten, is_math_text
from colors import colorConverter
from path import Path
from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform
from matplotlib import rcParams
from artist import allow_rasterization
from matplotlib import docstring
from matplotlib.font_manager import FontProperties
from matplotlib.markers import MarkerStyle
# Imported here for backward compatibility, even though they don't
# really belong.
from matplotlib.markers import TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN, \
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 )
return res
# We need to lop the last element off a lot.
xr,yr = x[:-1],y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx,dy = x[1:]-xr, y[1:]-yr
Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0
u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq
candidates = (u>=0) & (u<=1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px,py = xr+u*dx,yr+u*dy
line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points,lines))
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, eg one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-' : '_draw_solid',
'--' : '_draw_dashed',
'-.' : '_draw_dash_dot',
':' : '_draw_dotted',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
_drawStyles_l = {
'default' : '_draw_lines',
'steps-mid' : '_draw_steps_mid',
'steps-pre' : '_draw_steps_pre',
'steps-post' : '_draw_steps_post',
}
_drawStyles_s = {
'steps' : '_draw_steps_pre',
}
drawStyles = {}
drawStyles.update(_drawStyles_l)
drawStyles.update(_drawStyles_s)
# Need a list ordered with long names first:
drawStyleKeys = _drawStyles_l.keys() + _drawStyles_s.keys()
# Referenced here to maintain API. These are defined in
# MarkerStyle
markers = MarkerStyle.markers
filled_markers = MarkerStyle.filled_markers
fillStyles = MarkerStyle.fillstyles
zorder = 2
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
if self._label != "":
return "Line2D(%s)"%(self._label)
elif hasattr(self, '_x') and len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
%(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1])
elif hasattr(self, '_x'):
return "Line2D(%s)"\
%(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)]))
else:
return "Line2D()"
def __init__(self, xdata, ydata,
linewidth = None, # all Nones default to rc
linestyle = None,
color = None,
marker = None,
markersize = None,
markeredgewidth = None,
markeredgecolor = None,
markerfacecolor = None,
markerfacecoloralt = 'none',
fillstyle = 'full',
antialiased = None,
dash_capstyle = None,
solid_capstyle = None,
dash_joinstyle = None,
solid_joinstyle = None,
pickradius = 5,
drawstyle = None,
markevery = None,
**kwargs
):
"""
Create a :class:`~matplotlib.lines.Line2D` instance with *x*
and *y* data in sequences *xdata*, *ydata*.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
See :meth:`set_linestyle` for a decription of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
Artist.__init__(self)
#convert sequences to numpy arrays
if not iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None : linewidth=rcParams['lines.linewidth']
if linestyle is None : linestyle=rcParams['lines.linestyle']
if marker is None : marker=rcParams['lines.marker']
if color is None : color=rcParams['lines.color']
if markersize is None : markersize=rcParams['lines.markersize']
if antialiased is None : antialiased=rcParams['lines.antialiased']
if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle']
if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle']
if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle']
if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle']
if drawstyle is None : drawstyle='default'
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self.set_linewidth(linewidth)
self.set_color(color)
self._marker = MarkerStyle()
self.set_marker(marker)
self.set_markevery(markevery)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._dashSeq = None
self.set_markerfacecolor(markerfacecolor)
self.set_markerfacecoloralt(markerfacecoloralt)
self.set_markeredgecolor(markeredgecolor)
self.set_markeredgewidth(markeredgewidth)
self.set_fillstyle(fillstyle)
self.verticalOffset = None
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
self.ind_offset = 0
if is_numlike(self._picker):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalidx = True
self._invalidy = True
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the line. The pick
radius determines the precision of the location test (usually
within five points of the value). Use
:meth:`~matplotlib.lines.Line2D.get_pickradius` or
:meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
modify it.
Returns *True* if any values are within the radius along with
``{'ind': pointlist}``, where *pointlist* is the set of points
within the radius.
TODO: sort returned indices by distance
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not is_numlike(self.pickradius):
raise ValueError,"pick radius should be a distance"
# Make sure we have data to plot
if self._invalidy or self._invalidx:
self.recache()
if len(self._xy)==0: return False,{}
# Convert points to pixels
if self._transformed_path is None:
self._transform_path()
path, affine = self._transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure == None:
warning.warn('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi/72. * self.pickradius
# Check for collision
if self._linestyle in ['None',None]:
# If no line, return the nearby point(s)
d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2
ind, = np.nonzero(np.less_equal(d, pixels**2))
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels)
ind += self.ind_offset
# Debugging message
if False and self._label != u'':
print "Checking line",self._label,"at",mouseevent.x,mouseevent.y
print 'xt', xt
print 'yt', yt
#print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
print 'ind',ind
# Return the point(s) within radius
return len(ind)>0,dict(ind=ind)
def get_pickradius(self):
'return the pick radius used for containment tests'
return self.pickradius
def set_pickradius(self,d):
"""Sets the pick radius used for containment tests
ACCEPTS: float distance in points
"""
self.pickradius = d
def get_fillstyle(self):
"""
return the marker fillstyle
"""
return self._marker.get_fillstyle()
def set_fillstyle(self, fs):
"""
Set the marker fill style; 'full' means fill the whole marker.
The other options are for half filled markers
ACCEPTS: ['full' | 'left' | 'right' | 'bottom' | 'top']
"""
self._marker.set_fillstyle(fs)
def set_markevery(self, every):
"""
Set the markevery property to subsample the plot when using
markers. Eg if ``markevery=5``, every 5-th marker will be
plotted. *every* can be
None
Every point will be plotted
an integer N
Every N-th marker will be plotted starting with marker 0
A length-2 tuple of integers
every=(start, N) will start at point start and plot every N-th marker
ACCEPTS: None | integer | (startind, stride)
"""
self._markevery = every
def get_markevery(self):
'return the markevery setting'
return self._markevery
def set_picker(self,p):
"""Sets the event picker details for the line.
ACCEPTS: float distance in points or callable pick function
``fn(artist, event)``
"""
if callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox.unit()
bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_axes(self, ax):
Artist.set_axes(self, ax)
if ax.xaxis is not None:
self._xcid = ax.xaxis.callbacks.connect('units', self.recache_always)
if ax.yaxis is not None:
self._ycid = ax.yaxis.callbacks.connect('units', self.recache_always)
set_axes.__doc__ = Artist.set_axes.__doc__
def set_data(self, *args):
"""
Set the x and y data
ACCEPTS: 2D array (rows are x, y) or two 1D arrays
"""
if len(args)==1:
x, y = args[0]
else:
x, y = args
self.set_xdata(x)
self.set_ydata(y)
def recache_always(self):
self.recache(always=True)
def recache(self, always=False):
if always or self._invalidx:
xconv = self.convert_xunits(self._xorig)
if ma.isMaskedArray(self._xorig):
x = ma.asarray(xconv, np.float_)
else:
x = np.asarray(xconv, np.float_)
x = x.ravel()
else:
x = self._x
if always or self._invalidy:
yconv = self.convert_yunits(self._yorig)
if ma.isMaskedArray(self._yorig):
y = ma.asarray(yconv, np.float_)
else:
y = np.asarray(yconv, np.float_)
y = y.ravel()
else:
y = self._y
if len(x)==1 and len(y)>1:
x = x * np.ones(y.shape, np.float_)
if len(y)==1 and len(x)>1:
y = y * np.ones(x.shape, np.float_)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
self._subslice = False
if (self.axes and len(x) > 100 and self._is_sorted(x) and
self.axes.name == 'rectilinear' and
self.axes.get_xscale() == 'linear' and
self._markevery is None):
self._subslice = True
if hasattr(self, '_path'):
interpolation_steps = self._path._interpolation_steps
else:
interpolation_steps = 1
self._path = Path(self._xy, None, interpolation_steps)
self._transformed_path = None
self._invalidx = False
self._invalidy = False
def _transform_path(self, subslice=None):
# Masked arrays are now handled by the Path class itself
if subslice is not None:
_path = Path(self._xy[subslice,:])
else:
_path = self._path
self._transformed_path = TransformedPath(_path, self.get_transform())
def set_transform(self, t):
"""
set the Transformation instance used by this artist
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Artist.set_transform(self, t)
self._invalidx = True
self._invalidy = True
def _is_sorted(self, x):
"return true if x is sorted"
if len(x)<2: return 1
return np.alltrue(x[1:]-x[0:-1]>=0)
@allow_rasterization
def draw(self, renderer):
if self._invalidy or self._invalidx:
self.recache()
self.ind_offset = 0 # Needed for contains() method.
if self._subslice and self.axes:
# Need to handle monotonically decreasing case also...
x0, x1 = self.axes.get_xbound()
i0, = self._x.searchsorted([x0], 'left')
i1, = self._x.searchsorted([x1], 'right')
subslice = slice(max(i0-1, 0), i1+1)
self.ind_offset = subslice.start
self._transform_path(subslice)
if self._transformed_path is None:
self._transform_path()
if not self.get_visible(): return
renderer.open_group('line2d', self.get_gid())
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
if len(tpath.vertices):
self._lineFunc = getattr(self, funcname)
funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')
drawFunc = getattr(self, funcname)
drawFunc(renderer, gc, tpath, affine.frozen())
if self._marker:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self._markeredgewidth)
gc.set_alpha(self._alpha)
marker = self._marker
tpath, affine = self._transformed_path.get_transformed_points_and_affine()
if len(tpath.vertices):
# subsample the markers if markevery is not None
markevery = self.get_markevery()
if markevery is not None:
if iterable(markevery):
startind, stride = markevery
else:
startind, stride = 0, markevery
if tpath.codes is not None:
codes = tpath.codes[startind::stride]
else:
codes = None
vertices = tpath.vertices[startind::stride]
subsampled = Path(vertices, codes)
else:
subsampled = tpath
snap = marker.get_snap_threshold()
if type(snap) == float:
snap = renderer.points_to_pixels(self._markersize) >= snap
gc.set_snap(snap)
marker_path = marker.get_path()
marker_trans = marker.get_transform()
w = renderer.points_to_pixels(self._markersize)
if marker.get_marker() != ',': # Don't scale for pixels
marker_trans = marker_trans.scale(w)
rgbFace = self._get_rgb_face()
renderer.draw_markers(
gc, marker_path, marker_trans, subsampled, affine.frozen(),
rgbFace)
alt_marker_path = marker.get_alt_path()
if alt_marker_path:
alt_marker_trans = marker.get_alt_transform()
alt_marker_trans = alt_marker_trans.scale(w)
rgbFace = self._get_rgb_face(alt=True)
renderer.draw_markers(
gc, alt_marker_path, alt_marker_trans, subsampled,
affine.frozen(), rgbFace)
gc.restore()
gc.restore()
renderer.close_group('line2d')
def get_antialiased(self): return self._antialiased
def get_color(self): return self._color
def get_drawstyle(self): return self._drawstyle
def get_linestyle(self): return self._linestyle
def get_linewidth(self): return self._linewidth
def get_marker(self): return self._marker.get_marker()
def get_markeredgecolor(self):
if (is_string_like(self._markeredgecolor) and
self._markeredgecolor == 'auto'):
if self._marker.is_filled():
return 'k'
else:
return self._color
else:
return self._markeredgecolor
return self._markeredgecolor
def get_markeredgewidth(self): return self._markeredgewidth
def _get_markerfacecolor(self, alt=False):
if alt:
fc = self._markerfacecoloralt
else:
fc = self._markerfacecolor
if (fc is None or (is_string_like(fc) and fc.lower()=='none') ):
return fc
elif (is_string_like(fc) and fc.lower() == 'auto'):
return self._color
else:
return fc
def get_markerfacecolor(self):
return self._get_markerfacecolor(alt=False)
def get_markerfacecoloralt(self):
return self._get_markerfacecolor(alt=True)
def get_markersize(self): return self._markersize
def get_data(self, orig=True):
"""
Return the xdata, ydata.
If *orig* is *True*, return the original data
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalidx:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalidy:
self.recache()
return self._y
def get_path(self):
"""
Return the :class:`~matplotlib.path.Path` object associated
with this line.
"""
if self._invalidy or self._invalidx:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalidy or self._invalidx:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
True if line should be drawin with antialiased rendering
ACCEPTS: [True | False]
"""
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line
ACCEPTS: any matplotlib color
"""
self._color = color
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot
'default' connects the points with lines. The steps variants
produce step-plots. 'steps' is equivalent to 'steps-pre' and
is maintained for backward-compatibility.
ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ]
"""
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points
ACCEPTS: float value in points
"""
self._linewidth = w
def set_linestyle(self, linestyle):
"""
Set the linestyle of the line (also accepts drawstyles)
================ =================
linestyle description
================ =================
``'-'`` solid
``'--'`` dashed
``'-.'`` dash_dot
``':'`` dotted
``'None'`` draw nothing
``' '`` draw nothing
``''`` draw nothing
================ =================
'steps' is equivalent to 'steps-pre' and is maintained for
backward-compatibility.
.. seealso::
:meth:`set_drawstyle`
To set the drawing style (stepping) of the plot.
ACCEPTS: [ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` | ``' '`` | ``''`` ]
and any drawstyle in combination with a linestyle, e.g. ``'steps--'``.
"""
for ds in self.drawStyleKeys: # long names are first in the list
if linestyle.startswith(ds):
self.set_drawstyle(ds)
if len(linestyle) > len(ds):
linestyle = linestyle[len(ds):]
else:
linestyle = '-'
break
if linestyle not in self._lineStyles:
if linestyle in ls_mapper:
linestyle = ls_mapper[linestyle]
else:
verbose.report('Unrecognized line style %s, %s' %
(linestyle, type(linestyle)))
if linestyle in [' ','']:
linestyle = 'None'
self._linestyle = linestyle
@docstring.dedent_interpd
def set_marker(self, marker):
"""
Set the line marker
%(MarkerTable)s
%(MarkerAccepts)s
"""
self._marker.set_marker(marker)
def set_markeredgecolor(self, ec):
"""
Set the marker edge color
ACCEPTS: any matplotlib color
"""
if ec is None :
ec = 'auto'
self._markeredgecolor = ec
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points
ACCEPTS: float value in points
"""
if ew is None :
ew = rcParams['lines.markeredgewidth']
self._markeredgewidth = ew
def set_markerfacecolor(self, fc):
"""
Set the marker face color.
ACCEPTS: any matplotlib color
"""
if fc is None:
fc = 'auto'
self._markerfacecolor = fc
def set_markerfacecoloralt(self, fc):
"""
Set the alternate marker face color.
ACCEPTS: any matplotlib color
"""
if fc is None:
fc = 'auto'
self._markerfacecoloralt = fc
def set_markersize(self, sz):
"""
Set the marker size in points
ACCEPTS: float
"""
self._markersize = sz
def set_xdata(self, x):
"""
Set the data np.array for x
ACCEPTS: 1D array
"""
self._xorig = x
self._invalidx = True
def set_ydata(self, y):
"""
Set the data np.array for y
ACCEPTS: 1D array
"""
self._yorig = y
self._invalidy = True
def set_dashes(self, seq):
"""
Set the dash sequence, sequence of dashes with on off ink in
points. If seq is empty or if seq = (None, None), the
linestyle will be set to solid.
ACCEPTS: sequence of on/off ink in points
"""
if seq == (None, None) or len(seq)==0:
self.set_linestyle('-')
else:
self.set_linestyle('--')
self._dashSeq = seq # TODO: offset ignored for now
def _draw_lines(self, renderer, gc, path, trans):
self._lineFunc(renderer, gc, path, trans)
def _draw_steps_pre(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]
steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_post(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_mid(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices), 2), np.float_)
steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[0, 0] = vertices[0, 0]
steps[-1, 0] = vertices[-1, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_solid(self, renderer, gc, path, trans):
gc.set_linestyle('solid')
renderer.draw_path(gc, path, trans)
def _draw_dashed(self, renderer, gc, path, trans):
gc.set_linestyle('dashed')
if self._dashSeq is not None:
gc.set_dashes(0, self._dashSeq)
renderer.draw_path(gc, path, trans)
def _draw_dash_dot(self, renderer, gc, path, trans):
gc.set_linestyle('dashdot')
renderer.draw_path(gc, path, trans)
def _draw_dotted(self, renderer, gc, path, trans):
gc.set_linestyle('dotted')
renderer.draw_path(gc, path, trans)
def update_from(self, other):
'copy properties from other to self'
Artist.update_from(self, other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markerfacecoloralt = other._markerfacecoloralt
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = MarkerStyle(other._marker.get_marker(),
other._marker.get_fillstyle())
self._drawstyle = other._drawstyle
def _get_rgb_face(self, alt=False):
facecolor = self._get_markerfacecolor(alt=alt)
if is_string_like(facecolor) and facecolor.lower()=='none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
# some aliases....
def set_aa(self, val):
'alias for set_antialiased'
self.set_antialiased(val)
def set_c(self, val):
'alias for set_color'
self.set_color(val)
def set_ls(self, val):
'alias for set_linestyle'
self.set_linestyle(val)
def set_lw(self, val):
'alias for set_linewidth'
self.set_linewidth(val)
def set_mec(self, val):
'alias for set_markeredgecolor'
self.set_markeredgecolor(val)
def set_mew(self, val):
'alias for set_markeredgewidth'
self.set_markeredgewidth(val)
def set_mfc(self, val):
'alias for set_markerfacecolor'
self.set_markerfacecolor(val)
def set_mfcalt(self, val):
'alias for set_markerfacecoloralt'
self.set_markerfacecoloralt(val)
def set_ms(self, val):
'alias for set_markersize'
self.set_markersize(val)
def get_aa(self):
'alias for get_antialiased'
return self.get_antialiased()
def get_c(self):
'alias for get_color'
return self.get_color()
def get_ls(self):
'alias for get_linestyle'
return self.get_linestyle()
def get_lw(self):
'alias for get_linewidth'
return self.get_linewidth()
def get_mec(self):
'alias for get_markeredgecolor'
return self.get_markeredgecolor()
def get_mew(self):
'alias for get_markeredgewidth'
return self.get_markeredgewidth()
def get_mfc(self):
'alias for get_markerfacecolor'
return self.get_markerfacecolor()
def get_mfcalt(self, alt=False):
'alias for get_markerfacecoloralt'
return self.get_markerfacecoloralt()
def get_ms(self):
'alias for get_markersize'
return self.get_markersize()
def set_dash_joinstyle(self, s):
"""
Set the join style for dashed linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._dashjoinstyle = s
def set_solid_joinstyle(self, s):
"""
Set the join style for solid linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._solidjoinstyle = s
def get_dash_joinstyle(self):
"""
Get the join style for dashed linestyles
"""
return self._dashjoinstyle
def get_solid_joinstyle(self):
"""
Get the join style for solid linestyles
"""
return self._solidjoinstyle
def set_dash_capstyle(self, s):
"""
Set the cap style for dashed linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._dashcapstyle = s
def set_solid_capstyle(self, s):
"""
Set the cap style for solid linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._solidcapstyle = s
def get_dash_capstyle(self):
"""
Get the cap style for dashed linestyles
"""
return self._dashcapstyle
def get_solid_capstyle(self):
"""
Get the cap style for solid linestyles
"""
return self._solidcapstyle
def is_dashed(self):
'return True if line is dashstyle'
return self._linestyle in ('--', '-.', ':')
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for
:class:`matplotlib.lines.Line2D`. Derived classes should override
:meth:`~matplotlib.lines.VertexSelector.process_selected` to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a :class:`matplotlib.lines.Line2D`
instance. The line should already be added to some
:class:`matplotlib.axes.Axes` instance and should have the
picker property set.
"""
if not hasattr(line, 'axes'):
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the
:meth:`process_selected` method.
*ind* are the indices of the selected vertices. *xs* and *ys*
are the coordinates of the selected vertices.
"""
pass
def onpick(self, event):
'When the line is picked, update the set of selected indicies.'
if event.artist is not self.line: return
for i in event.ind:
if i in self.ind:
self.ind.remove(i)
else:
self.ind.add(i)
ind = list(self.ind)
ind.sort()
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = MarkerStyle.markers
drawStyles = Line2D.drawStyles
fillStyles = MarkerStyle.fillstyles
docstring.interpd.update(Line2D = artist.kwdoc(Line2D))
# You can not set the docstring of an instancemethod,
# but you can on the underlying function. Go figure.
docstring.dedent_interpd(Line2D.__init__.im_func)
| gpl-2.0 |
saatvikshah1994/SmartMM | KeywordExtraction/unsupervised/keywordextraction_tfidf.py | 1 | 2513 | from utilities import CandidateSelection,DataClean
from utilities import load_data,cross_validate
from sklearn.pipeline import Pipeline
from keyword_metrics import keyword_prf,keyword_prf_onegram
from nltk.stem import PorterStemmer
import networkx, nltk
import gensim
class Tfidf_KeywordSelection:
def __init__(self,keyword_count,stem=True):
self.keyword_count = keyword_count
self.stem = stem
if self.stem:
self.stemmer = PorterStemmer()
def fit(self,X,y=None):
return self
def predict(self,X):
if self.stem:
for idx in xrange(len(X)):
for idx_cand in xrange(len(X[idx])):
X[idx][idx_cand] = " ".join([self.stemmer.stem(word) for word in X[idx][idx_cand].split()])
corpus_tfidf,dictionary = self.score_keyphrases_by_tfidf(X)
ypred = []
for scores in corpus_tfidf:
scores = sorted(scores,key=lambda x:x[1],reverse=True)[:self.keyword_count]
ypred.append([dictionary[word_idx] for word_idx,score in scores])
return ypred
def score_keyphrases_by_tfidf(self, candidates):
# make gensim dictionary and corpus
dictionary = gensim.corpora.Dictionary(candidates)
corpus = [dictionary.doc2bow(candidate) for candidate in candidates]
# transform corpus with tf*idf model
tfidf = gensim.models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
return corpus_tfidf, dictionary
if __name__ == '__main__':
ids,docs,keywords_doc = load_data()
ids = ids[:50]
docs = docs[:50]
keywords_doc = keywords_doc[:50]
to_stem=True
pipeline = Pipeline([
('cleaner',DataClean(clean_list=[
["[^a-z\.-]"," "], # only letters,fullstops
[" [ ]+", " "], # remove extra spaces
])),
('candidate_selector',CandidateSelection()),
('keyword_selector',Tfidf_KeywordSelection(keyword_count=10,stem=to_stem))
])
pipeline.fit(docs,keywords_doc)
cross_validate((docs,keywords_doc),pipeline,keyword_prf_onegram,stem_y=to_stem)
# print pipeline.predict(docs)
# print keywords_doc
# keyword_prf_onegram - top 10 keywords - NounAdj Heuristic Word Extracter
# keyword_prf - top 10 keywords - NounAdj Heuristic Word Extracter
# keyword_prf_onegram - top 15 keywords - NounAdj Heuristic Word Extracter
# keyword_prf - top 15 keywords - NounAdj Heuristic Word Extracter
| mit |
hugochan/KATE | run_classifier.py | 1 | 5418 | '''
Created on Dec, 2016
@author: hugo
'''
from __future__ import absolute_import
import argparse
import numpy as np
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import ShuffleSplit
from autoencoder.testing.classifier import multiclass_classifier, multilabel_classifier
from autoencoder.utils.io_utils import load_json, load_pickle
def main():
parser = argparse.ArgumentParser()
parser.add_argument('train_doc_codes', type=str, help='path to the train doc codes file')
parser.add_argument('train_doc_labels', type=str, help='path to the train doc labels file')
parser.add_argument('test_doc_codes', type=str, help='path to the test doc codes file')
parser.add_argument('test_doc_labels', type=str, help='path to the test doc labels file')
parser.add_argument('-nv', '--n_val', type=int, default=1000, help='size of validation set (default 1000)')
parser.add_argument('-ne', '--n_epoch', type=int, default=100, help='num of epoches (default 100)')
parser.add_argument('-bs', '--batch_size', type=int, default=100, help='batch size (default 100)')
parser.add_argument('-cv', '--cross_validation', type=int, help='k-fold cross validation')
parser.add_argument('-mlc', '--multilabel_clf', action='store_true', help='multilabel classification flag')
args = parser.parse_args()
# autoencoder
train_doc_codes = load_json(args.train_doc_codes)
train_doc_labels = load_json(args.train_doc_labels)
test_doc_codes = load_json(args.test_doc_codes)
test_doc_labels = load_json(args.test_doc_labels)
X_train = np.r_[train_doc_codes.values()]
Y_train = [train_doc_labels[i] for i in train_doc_codes]
X_test = np.r_[test_doc_codes.values()]
Y_test = [test_doc_labels[i] for i in test_doc_codes]
# # DBN
# X_train = np.array(load_pickle(args.train_doc_codes))
# Y_train = load_pickle(args.train_doc_labels)
# X_test = np.array(load_pickle(args.test_doc_codes))
# Y_test = load_pickle(args.test_doc_labels)
# import pdb;pdb.set_trace()
if args.multilabel_clf:
encoder = MultiLabelBinarizer()
encoder.fit(Y_train + Y_test)
Y_train = encoder.transform(Y_train)
Y_test = encoder.transform(Y_test)
else:
Y = Y_train + Y_test
n_train = len(Y_train)
n_test = len(Y_test)
encoder = LabelEncoder()
Y = np_utils.to_categorical(encoder.fit_transform(Y))
Y_train = Y[:n_train]
Y_test = Y[-n_test:]
seed = 7
np.random.seed(seed)
if not args.cross_validation:
val_idx = np.random.choice(range(X_train.shape[0]), args.n_val, replace=False)
train_idx = list(set(range(X_train.shape[0])) - set(val_idx))
X_new_train = X_train[train_idx]
Y_new_train = Y_train[train_idx]
X_new_val = X_train[val_idx]
Y_new_val = Y_train[val_idx]
print 'train: %s, val: %s, test: %s' % (X_new_train.shape[0], X_new_val.shape[0], X_test.shape[0])
if args.multilabel_clf:
results = multilabel_classifier(X_new_train, Y_new_train, X_new_val, Y_new_val, \
X_test, Y_test, nb_epoch=args.n_epoch, batch_size=args.batch_size, seed=seed)
print 'f1 score on test set: macro_f1: %s, micro_f1: %s' % tuple(results)
else:
results = multiclass_classifier(X_new_train, Y_new_train, X_new_val, Y_new_val, \
X_test, Y_test, nb_epoch=args.n_epoch, batch_size=args.batch_size, seed=seed)
print 'acc on test set: %s' % results
else:
X = np.concatenate((X_train, X_test), axis=0)
Y = np.concatenate((Y_train, Y_test), axis=0)
ss = ShuffleSplit(n_splits=int(args.cross_validation), test_size=X_test.shape[0], random_state=seed)
results = []
for train_idx, test_idx in ss.split(X):
val_idx = np.random.choice(train_idx, args.n_val, replace=False)
new_train_idx = list(set(train_idx) - set(val_idx))
X_new_train = X[new_train_idx]
Y_new_train = Y[new_train_idx]
X_new_val = X[val_idx]
Y_new_val = Y[val_idx]
if args.multilabel_clf:
results.append(multilabel_classifier(X_new_train, Y_new_train, X_new_val, Y_new_val, \
X_test, Y_test, nb_epoch=args.n_epoch, batch_size=args.batch_size, seed=seed))
else:
results.append(multiclass_classifier(X_new_train, Y_new_train, X_new_val, Y_new_val, \
X[test_idx], Y[test_idx], nb_epoch=args.n_epoch, batch_size=args.batch_size, seed=seed))
if args.multilabel_clf:
macro_f1, micro_f1 = zip(*results)
macro_mean = np.mean(macro_f1)
macro_std = np.std(macro_f1)
micro_mean = np.mean(micro_f1)
micro_std = np.std(micro_f1)
print 'f1 score on %s-fold cross validation: macro_f1: %s (%s), micro_f1: %s (%s)' \
% (int(args.cross_validation), macro_mean, macro_std, micro_mean, micro_std)
else:
mean = np.mean(results)
std = np.std(results)
print 'acc on %s-fold cross validation: %s (%s)' % (int(args.cross_validation), mean, std)
import pdb;pdb.set_trace()
if __name__ == '__main__':
main()
| bsd-3-clause |
cast051/ardupilot_cast | Tools/mavproxy_modules/lib/magcal_graph_ui.py | 108 | 8248 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
| gpl-3.0 |
gfyoung/pandas | pandas/tests/frame/methods/test_append.py | 1 | 8611 | import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range, timedelta_range
import pandas._testing as tm
# TODO td.skip_array_manager_not_yet_implemented
# appending with reindexing not yet working
class TestDataFrameAppend:
def test_append_multiindex(self, multiindex_dataframe_random_data, frame_or_series):
obj = multiindex_dataframe_random_data
if frame_or_series is Series:
obj = obj["A"]
a = obj[:5]
b = obj[5:]
result = a.append(b)
tm.assert_equal(result, obj)
def test_append_empty_list(self):
# GH 28769
df = DataFrame()
result = df.append([])
expected = df
tm.assert_frame_equal(result, expected)
assert result is not df
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
result = df.append([])
expected = df
tm.assert_frame_equal(result, expected)
assert result is not df # .append() should return a new object
@td.skip_array_manager_not_yet_implemented
def test_append_series_dict(self):
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
series = df.loc[4]
msg = "Indexes have overlapping values"
with pytest.raises(ValueError, match=msg):
df.append(series, verify_integrity=True)
series.name = None
msg = "Can only append a Series if ignore_index=True"
with pytest.raises(TypeError, match=msg):
df.append(series, verify_integrity=True)
result = df.append(series[::-1], ignore_index=True)
expected = df.append(
DataFrame({0: series[::-1]}, index=df.columns).T, ignore_index=True
)
tm.assert_frame_equal(result, expected)
# dict
result = df.append(series.to_dict(), ignore_index=True)
tm.assert_frame_equal(result, expected)
result = df.append(series[::-1][:3], ignore_index=True)
expected = df.append(
DataFrame({0: series[::-1][:3]}).T, ignore_index=True, sort=True
)
tm.assert_frame_equal(result, expected.loc[:, result.columns])
msg = "Can only append a dict if ignore_index=True"
with pytest.raises(TypeError, match=msg):
df.append(series.to_dict())
# can append when name set
row = df.loc[4]
row.name = 5
result = df.append(row)
expected = df.append(df[-1:], ignore_index=True)
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented
def test_append_list_of_series_dicts(self):
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
dicts = [x.to_dict() for idx, x in df.iterrows()]
result = df.append(dicts, ignore_index=True)
expected = df.append(df, ignore_index=True)
tm.assert_frame_equal(result, expected)
# different columns
dicts = [
{"foo": 1, "bar": 2, "baz": 3, "peekaboo": 4},
{"foo": 5, "bar": 6, "baz": 7, "peekaboo": 8},
]
result = df.append(dicts, ignore_index=True, sort=True)
expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented
def test_append_missing_cols(self):
# GH22252
# exercise the conditional branch in append method where the data
# to be appended is a list and does not contain all columns that are in
# the target DataFrame
df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"])
dicts = [{"foo": 9}, {"bar": 10}]
with tm.assert_produces_warning(None):
result = df.append(dicts, ignore_index=True, sort=True)
expected = df.append(DataFrame(dicts), ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
def test_append_empty_dataframe(self):
# Empty df append empty df
df1 = DataFrame()
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-empty df append empty df
df1 = DataFrame(np.random.randn(5, 2))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Empty df with columns append empty df
df1 = DataFrame(columns=["bar", "foo"])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
# Non-Empty df with columns append empty df
df1 = DataFrame(np.random.randn(5, 2), columns=["bar", "foo"])
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_not_yet_implemented
def test_append_dtypes(self):
# GH 5754
# row appends of different dtypes (so need to do by-item)
# can sometimes infer the correct type
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(5))
df2 = DataFrame()
result = df1.append(df2)
expected = df1.copy()
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": "foo"}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame({"bar": [Timestamp("20130101"), "foo"]})
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": np.nan}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame(
{"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")}
)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": np.nan}, index=range(1, 2), dtype=object)
result = df1.append(df2)
expected = DataFrame(
{"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")}
)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": np.nan}, index=range(1))
df2 = DataFrame({"bar": Timestamp("20130101")}, index=range(1, 2))
result = df1.append(df2)
expected = DataFrame(
{"bar": Series([np.nan, Timestamp("20130101")], dtype="M8[ns]")}
)
tm.assert_frame_equal(result, expected)
df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1))
df2 = DataFrame({"bar": 1}, index=range(1, 2), dtype=object)
result = df1.append(df2)
expected = DataFrame({"bar": Series([Timestamp("20130101"), 1])})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"timestamp", ["2019-07-19 07:04:57+0100", "2019-07-19 07:04:57"]
)
def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp):
# GH 30238
tz = tz_naive_fixture
df = DataFrame([Timestamp(timestamp, tz=tz)])
result = df.append(df.iloc[0]).iloc[-1]
expected = Series(Timestamp(timestamp, tz=tz), name=0)
tm.assert_series_equal(result, expected)
@td.skip_array_manager_not_yet_implemented
@pytest.mark.parametrize(
"data, dtype",
[
([1], pd.Int64Dtype()),
([1], pd.CategoricalDtype()),
([pd.Interval(left=0, right=5)], pd.IntervalDtype()),
([pd.Period("2000-03", freq="M")], pd.PeriodDtype("M")),
([1], pd.SparseDtype()),
],
)
def test_other_dtypes(self, data, dtype):
df = DataFrame(data, dtype=dtype)
result = df.append(df.iloc[0]).iloc[-1]
expected = Series(data, name=0, dtype=dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
def test_append_numpy_bug_1681(self, dtype):
# another datetime64 bug
if dtype == "datetime64[ns]":
index = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
else:
index = timedelta_range("1 days", "10 days", freq="2D")
df = DataFrame()
other = DataFrame({"A": "foo", "B": index}, index=index)
result = df.append(other)
assert (result["B"] == index).all()
| bsd-3-clause |
bloyl/mne-python | mne/viz/epochs.py | 1 | 43589 | """Functions to plot epochs data."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
# Jona Sassenhagen <[email protected]>
# Stefan Repplinger <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: Simplified BSD
from collections import Counter
from copy import deepcopy
import warnings
import numpy as np
from .raw import _setup_channel_selections
from ..defaults import _handle_default
from ..utils import verbose, logger, warn, fill_doc, _check_option
from ..io.meas_info import create_info, _validate_type
from ..io.pick import (_get_channel_types, _picks_to_idx, _DATA_CH_TYPES_SPLIT,
_VALID_CHANNEL_TYPES)
from .utils import (tight_layout, _setup_vmin_vmax, plt_show, _check_cov,
_compute_scalings, DraggableColorbar, _setup_cmap,
_handle_decim, _set_title_multiple_electrodes,
_make_combine_callable, _set_window_title,
_make_event_color_dict, _get_channel_plotting_order)
@fill_doc
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap=None, fig=None,
axes=None, overlay_times=None, combine=None,
group_by=None, evoked=True, ts_args=None, title=None,
clear=False):
"""Plot Event Related Potential / Fields image.
Parameters
----------
epochs : instance of Epochs
The epochs.
%(picks_good_data)s
``picks`` interacts with ``group_by`` and ``combine`` to determine the
number of figures generated; see Notes.
sigma : float
The standard deviation of a Gaussian smoothing window applied along
the epochs axis of the image. If 0, no smoothing is applied.
Defaults to 0.
vmin : None | float | callable
The min value in the image (and the ER[P/F]). The unit is µV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
Hint: to specify the lower limit of the data, use
``vmin=lambda data: data.min()``.
vmax : None | float | callable
The max value in the image (and the ER[P/F]). The unit is µV for
EEG channels, fT for magnetometers and fT/cm for gradiometers.
If vmin is None and multiple plots are returned, the limit is
equalized within channel types.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not ``None``, order is used to reorder the epochs along the y-axis
of the image. If it is an array of :class:`int`, its length should
match the number of good epochs. If it is a callable it should accept
two positional parameters (``times`` and ``data``, where
``data.shape == (len(good_epochs), len(times))``) and return an
:class:`array <numpy.ndarray>` of indices that will sort ``data`` along
its first axis.
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes labels. If None,
defaults to ``units=dict(eeg='µV', grad='fT/cm', mag='fT')``.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to ``scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)``.
cmap : None | colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ('RdBu_r', True).
If None, "RdBu_r" is used, unless the data is all positive, in which
case "Reds" is used.
fig : Figure | None
:class:`~matplotlib.figure.Figure` instance to draw the image to.
Figure must contain the correct number of axes for drawing the epochs
image, the evoked response, and a colorbar (depending on values of
``evoked`` and ``colorbar``). If ``None`` a new figure is created.
Defaults to ``None``.
axes : list of Axes | dict of list of Axes | None
List of :class:`~matplotlib.axes.Axes` objects in which to draw the
image, evoked response, and colorbar (in that order). Length of list
must be 1, 2, or 3 (depending on values of ``colorbar`` and ``evoked``
parameters). If a :class:`dict`, each entry must be a list of Axes
objects with the same constraints as above. If both ``axes`` and
``group_by`` are dicts, their keys must match. Providing non-``None``
values for both ``fig`` and ``axes`` results in an error. Defaults to
``None``.
overlay_times : array_like, shape (n_epochs,) | None
Times (in seconds) at which to draw a line on the corresponding row of
the image (e.g., a reaction time associated with each epoch). Note that
``overlay_times`` should be ordered to correspond with the
:class:`~mne.Epochs` object (i.e., ``overlay_times[0]`` corresponds to
``epochs[0]``, etc).
%(combine)s
If callable, the callable must accept one positional input (data of
shape ``(n_epochs, n_channels, n_times)``) and return an
:class:`array <numpy.ndarray>` of shape ``(n_epochs, n_times)``. For
example::
combine = lambda data: np.median(data, axis=1)
If ``combine`` is ``None``, channels are combined by computing GFP,
unless ``group_by`` is also ``None`` and ``picks`` is a list of
specific channels (not channel types), in which case no combining is
performed and each channel gets its own figure. See Notes for further
details. Defaults to ``None``.
group_by : None | dict
Specifies which channels are aggregated into a single figure, with
aggregation method determined by the ``combine`` parameter. If not
``None``, one :class:`~matplotlib.figure.Figure` is made per dict
entry; the dict key will be used as the figure title and the dict
values must be lists of picks (either channel names or integer indices
of ``epochs.ch_names``). For example::
group_by=dict(Left_ROI=[1, 2, 3, 4], Right_ROI=[5, 6, 7, 8])
Note that within a dict entry all channels must have the same type.
``group_by`` interacts with ``picks`` and ``combine`` to determine the
number of figures generated; see Notes. Defaults to ``None``.
evoked : bool
Draw the ER[P/F] below the image or not.
ts_args : None | dict
Arguments passed to a call to `~mne.viz.plot_compare_evokeds` to style
the evoked plot below the image. Defaults to an empty dictionary,
meaning `~mne.viz.plot_compare_evokeds` will be called with default
parameters.
title : None | str
If :class:`str`, will be plotted as figure title. Otherwise, the
title will indicate channel(s) or channel type being plotted. Defaults
to ``None``.
clear : bool
Whether to clear the axes before plotting (if ``fig`` or ``axes`` are
provided). Defaults to ``False``.
Returns
-------
figs : list of Figure
One figure per channel, channel type, or group, depending on values of
``picks``, ``group_by``, and ``combine``. See Notes.
Notes
-----
You can control how channels are aggregated into one figure or plotted in
separate figures through a combination of the ``picks``, ``group_by``, and
``combine`` parameters. If ``group_by`` is a :class:`dict`, the result is
one :class:`~matplotlib.figure.Figure` per dictionary key (for any valid
values of ``picks`` and ``combine``). If ``group_by`` is ``None``, the
number and content of the figures generated depends on the values of
``picks`` and ``combine``, as summarized in this table:
.. cssclass:: table-bordered
.. rst-class:: midvalign
+----------+----------------------------+------------+-------------------+
| group_by | picks | combine | result |
+==========+============================+============+===================+
| | None, int, list of int, | None, | |
| dict | ch_name, list of ch_names, | string, or | 1 figure per |
| | ch_type, list of ch_types | callable | dict key |
+----------+----------------------------+------------+-------------------+
| | None, | None, | |
| | ch_type, | string, or | 1 figure per |
| | list of ch_types | callable | ch_type |
| None +----------------------------+------------+-------------------+
| | int, | None | 1 figure per pick |
| | ch_name, +------------+-------------------+
| | list of int, | string or | 1 figure |
| | list of ch_names | callable | |
+----------+----------------------------+------------+-------------------+
"""
from scipy.ndimage import gaussian_filter1d
from .. import EpochsArray
_validate_type(group_by, (dict, None), 'group_by')
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
if set(units) != set(scalings):
raise ValueError('Scalings and units must have the same keys.')
# is picks a channel type (or None)?
picks, picked_types = _picks_to_idx(epochs.info, picks, return_kind=True)
ch_types = _get_channel_types(epochs.info, picks)
# `combine` defaults to 'gfp' unless picks are specific channels and
# there was no group_by passed
combine_given = combine is not None
if combine is None and (group_by is not None or picked_types):
combine = 'gfp'
# convert `combine` into callable (if None or str)
combine_func = _make_combine_callable(combine)
# handle ts_args (params for the evoked time series)
ts_args = dict() if ts_args is None else ts_args
manual_ylims = 'ylim' in ts_args
if combine is not None:
ts_args['show_sensors'] = False
vlines = [0] if (epochs.times[0] < 0 < epochs.times[-1]) else []
ts_defaults = dict(colors={'cond': 'k'}, title='', show=False,
truncate_yaxis=False, truncate_xaxis=False,
vlines=vlines, legend=False)
ts_defaults.update(**ts_args)
ts_args = ts_defaults.copy()
# construct a group_by dict if one wasn't supplied
if group_by is None:
if picked_types:
# one fig per ch_type
group_by = {ch_type: picks[np.array(ch_types) == ch_type]
for ch_type in set(ch_types)
if ch_type in _DATA_CH_TYPES_SPLIT}
elif combine is None:
# one fig per pick
group_by = {epochs.ch_names[pick]: [pick] for pick in picks}
else:
# one fig to rule them all
ch_names = np.array(epochs.ch_names)[picks].tolist()
key = _set_title_multiple_electrodes(None, combine, ch_names)
group_by = {key: picks}
else:
group_by = deepcopy(group_by)
# check for heterogeneous sensor type combinations / "combining" 1 channel
for this_group, these_picks in group_by.items():
this_ch_type = np.array(ch_types)[np.in1d(picks, these_picks)]
if len(set(this_ch_type)) > 1:
types = ', '.join(set(this_ch_type))
raise ValueError('Cannot combine sensors of different types; "{}" '
'contains types {}.'.format(this_group, types))
# now we know they're all the same type...
group_by[this_group] = dict(picks=these_picks, ch_type=this_ch_type[0],
title=title)
# are they trying to combine a single channel?
if len(these_picks) < 2 and combine_given:
warn('Only one channel in group "{}"; cannot combine by method '
'"{}".'.format(this_group, combine))
# check for compatible `fig` / `axes`; instantiate figs if needed; add
# fig(s) and axes into group_by
group_by = _validate_fig_and_axes(fig, axes, group_by, evoked, colorbar,
clear=clear)
# prepare images in advance to get consistent vmin/vmax.
# At the same time, create a subsetted epochs object for each group
data = epochs.get_data()
vmin_vmax = {ch_type: dict(images=list(), norm=list())
for ch_type in set(ch_types)}
for this_group, this_group_dict in group_by.items():
these_picks = this_group_dict['picks']
this_ch_type = this_group_dict['ch_type']
this_ch_info = [epochs.info['chs'][n] for n in these_picks]
these_ch_names = np.array(epochs.info['ch_names'])[these_picks]
this_data = data[:, these_picks]
# create subsetted epochs object
this_info = create_info(sfreq=epochs.info['sfreq'],
ch_names=list(these_ch_names),
ch_types=[this_ch_type] * len(these_picks))
this_info['chs'] = this_ch_info
this_epochs = EpochsArray(this_data, this_info, tmin=epochs.times[0])
# apply scalings (only to image, not epochs object), combine channels
this_image = combine_func(this_data * scalings[this_ch_type])
# handle `order`. NB: this can potentially yield different orderings
# in each figure!
this_image, _overlay_times = _order_epochs(this_image, epochs.times,
order, overlay_times)
this_norm = np.all(this_image > 0)
# apply smoothing
if sigma > 0.:
this_image = gaussian_filter1d(this_image, sigma=sigma, axis=0,
mode='nearest')
# update the group_by and vmin_vmax dicts
group_by[this_group].update(image=this_image, epochs=this_epochs,
norm=this_norm)
vmin_vmax[this_ch_type]['images'].append(this_image)
vmin_vmax[this_ch_type]['norm'].append(this_norm)
# compute overall vmin/vmax for images
for ch_type, this_vmin_vmax_dict in vmin_vmax.items():
image_list = this_vmin_vmax_dict['images']
image_stack = np.stack(image_list)
norm = all(this_vmin_vmax_dict['norm'])
vmin_vmax[ch_type] = _setup_vmin_vmax(image_stack, vmin, vmax, norm)
del image_stack, vmin, vmax
# prepare to plot
auto_ylims = {ch_type: [0., 0.] for ch_type in set(ch_types)}
# plot
for this_group, this_group_dict in group_by.items():
this_ch_type = this_group_dict['ch_type']
this_axes_dict = this_group_dict['axes']
vmin, vmax = vmin_vmax[this_ch_type]
# plot title
if this_group_dict['title'] is None:
title = _handle_default('titles').get(this_group, this_group)
if isinstance(combine, str) and len(title):
_comb = combine.upper() if combine == 'gfp' else combine
_comb = 'std. dev.' if _comb == 'std' else _comb
title += f' ({_comb})'
# plot the image
this_fig = _plot_epochs_image(
this_group_dict['image'], epochs=this_group_dict['epochs'],
picks=picks, colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
style_axes=True, norm=this_group_dict['norm'],
unit=units[this_ch_type], ax=this_axes_dict, show=False,
title=title, combine=combine, combine_given=combine_given,
overlay_times=_overlay_times, evoked=evoked, ts_args=ts_args)
group_by[this_group].update(fig=this_fig)
# detect ylims across figures
if evoked and not manual_ylims:
# ensure get_ylim works properly
this_axes_dict['evoked'].figure.canvas.draw_idle()
this_bot, this_top = this_axes_dict['evoked'].get_ylim()
this_min = min(this_bot, this_top)
this_max = max(this_bot, this_top)
curr_min, curr_max = auto_ylims[ch_type]
auto_ylims[this_ch_type] = [min(curr_min, this_min),
max(curr_max, this_max)]
# equalize ylims across figures (does not adjust ticks)
if evoked:
for this_group_dict in group_by.values():
ax = this_group_dict['axes']['evoked']
ch_type = this_group_dict['ch_type']
if not manual_ylims:
args = auto_ylims[ch_type]
if 'invert_y' in ts_args:
args = args[::-1]
ax.set_ylim(*args)
plt_show(show)
# impose deterministic order of returned objects
return_order = np.array(sorted(group_by))
are_ch_types = np.in1d(return_order, _VALID_CHANNEL_TYPES)
if any(are_ch_types):
return_order = np.concatenate((return_order[are_ch_types],
return_order[~are_ch_types]))
return [group_by[group]['fig'] for group in return_order]
def _validate_fig_and_axes(fig, axes, group_by, evoked, colorbar, clear=False):
"""Check user-provided fig/axes compatibility with plot_epochs_image."""
from matplotlib.pyplot import figure, Axes, subplot2grid
n_axes = 1 + int(evoked) + int(colorbar)
ax_names = ('image', 'evoked', 'colorbar')
ax_names = np.array(ax_names)[np.where([True, evoked, colorbar])]
prefix = 'Since evoked={} and colorbar={}, '.format(evoked, colorbar)
# got both fig and axes
if fig is not None and axes is not None:
raise ValueError('At least one of "fig" or "axes" must be None; got '
'fig={}, axes={}.'.format(fig, axes))
# got fig=None and axes=None: make fig(s) and axes
if fig is None and axes is None:
axes = dict()
colspan = 9 if colorbar else 10
rowspan = 2 if evoked else 3
shape = (3, 10)
for this_group in group_by:
this_fig = figure()
_set_window_title(this_fig, this_group)
subplot2grid(shape, (0, 0), colspan=colspan, rowspan=rowspan,
fig=this_fig)
if evoked:
subplot2grid(shape, (2, 0), colspan=colspan, rowspan=1,
fig=this_fig)
if colorbar:
subplot2grid(shape, (0, 9), colspan=1, rowspan=rowspan,
fig=this_fig)
axes[this_group] = this_fig.axes
# got a Figure instance
if fig is not None:
# If we're re-plotting into a fig made by a previous call to
# `plot_image`, be forgiving of presence/absence of sensor inset axis.
if len(fig.axes) not in (n_axes, n_axes + 1):
raise ValueError('{}"fig" must contain {} axes, got {}.'
''.format(prefix, n_axes, len(fig.axes)))
if len(list(group_by)) != 1:
raise ValueError('When "fig" is not None, "group_by" can only '
'have one group (got {}: {}).'
.format(len(group_by), ', '.join(group_by)))
key = list(group_by)[0]
if clear: # necessary if re-plotting into previous figure
_ = [ax.clear() for ax in fig.axes]
if len(fig.axes) > n_axes: # get rid of sensor inset
fig.axes[-1].remove()
_set_window_title(fig, key)
axes = {key: fig.axes}
# got an Axes instance, be forgiving (if evoked and colorbar are False)
if isinstance(axes, Axes):
axes = [axes]
# got an ndarray; be forgiving
if isinstance(axes, np.ndarray):
axes = axes.ravel().tolist()
# got a list of axes, make it a dict
if isinstance(axes, list):
if len(axes) != n_axes:
raise ValueError('{}"axes" must be length {}, got {}.'
''.format(prefix, n_axes, len(axes)))
# for list of axes to work, must be only one group
if len(list(group_by)) != 1:
raise ValueError('When axes is a list, can only plot one group '
'(got {} groups: {}).'
.format(len(group_by), ', '.join(group_by)))
key = list(group_by)[0]
axes = {key: axes}
# got a dict of lists of axes, make it dict of dicts
if isinstance(axes, dict):
# in theory a user could pass a dict of axes but *NOT* pass a group_by
# dict, but that is forbidden in the docstring so it shouldn't happen.
# The next test could fail in that case because we've constructed a
# group_by dict and the user won't have known what keys we chose.
if set(axes) != set(group_by):
raise ValueError('If "axes" is a dict its keys ({}) must match '
'the keys in "group_by" ({}).'
.format(list(axes), list(group_by)))
for this_group, this_axes_list in axes.items():
if len(this_axes_list) != n_axes:
raise ValueError('{}each value in "axes" must be a list of {} '
'axes, got {}.'.format(prefix, n_axes,
len(this_axes_list)))
# NB: next line assumes all axes in each list are in same figure
group_by[this_group]['fig'] = this_axes_list[0].get_figure()
group_by[this_group]['axes'] = {key: axis for key, axis in
zip(ax_names, this_axes_list)}
return group_by
def _order_epochs(data, times, order=None, overlay_times=None):
"""Sort epochs image data (2D). Helper for plot_epochs_image."""
n_epochs = len(data)
if overlay_times is not None:
if len(overlay_times) != n_epochs:
raise ValueError(
f'size of overlay_times parameter ({len(overlay_times)}) does '
f'not match the number of epochs ({n_epochs}).')
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if (times_min < times[0]) or (times_max > times[-1]):
warn('Some values in overlay_times fall outside of the epochs '
f'time interval (between {times[0]} s and {times[-1]} s)')
if callable(order):
order = order(times, data)
if order is not None:
if len(order) != n_epochs:
raise ValueError(f'If order is a {type(order).__name__}, its '
f'length ({len(order)}) must match the length of '
f'the data ({n_epochs}).')
order = np.array(order)
data = data[order]
if overlay_times is not None:
overlay_times = overlay_times[order]
return data, overlay_times
def _plot_epochs_image(image, style_axes=True, epochs=None, picks=None,
vmin=None, vmax=None, colorbar=False, show=False,
unit=None, cmap=None, ax=None, overlay_times=None,
title=None, evoked=False, ts_args=None, combine=None,
combine_given=False, norm=False):
"""Plot epochs image. Helper function for plot_epochs_image."""
from matplotlib.ticker import AutoLocator
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
tmin = epochs.times[0]
tmax = epochs.times[-1]
ax_im = ax['image']
fig = ax_im.get_figure()
# draw the image
cmap = _setup_cmap(cmap, norm=norm)
n_epochs = len(image)
extent = [tmin, tmax, 0, n_epochs]
im = ax_im.imshow(image, vmin=vmin, vmax=vmax, cmap=cmap[0], aspect='auto',
origin='lower', interpolation='nearest', extent=extent)
# optional things
if style_axes:
ax_im.set_title(title)
ax_im.set_ylabel('Epochs')
if not evoked:
ax_im.set_xlabel('Time (s)')
ax_im.axis('auto')
ax_im.axis('tight')
ax_im.axvline(0, color='k', linewidth=1, linestyle='--')
if overlay_times is not None:
ax_im.plot(overlay_times, 0.5 + np.arange(n_epochs), 'k',
linewidth=2)
ax_im.set_xlim(tmin, tmax)
# draw the evoked
if evoked:
from . import plot_compare_evokeds
pass_combine = (combine if combine_given else None)
_picks = [0] if len(picks) == 1 else None # prevent applying GFP
plot_compare_evokeds({'cond': list(epochs.iter_evoked(copy=False))},
picks=_picks, axes=ax['evoked'],
combine=pass_combine, **ts_args)
ax['evoked'].set_xlim(tmin, tmax)
ax['evoked'].lines[0].set_clip_on(True)
ax['evoked'].collections[0].set_clip_on(True)
ax['evoked'].get_shared_x_axes().join(ax['evoked'], ax_im)
# fix the axes for proper updating during interactivity
loc = ax_im.xaxis.get_major_locator()
ax['evoked'].xaxis.set_major_locator(loc)
ax['evoked'].yaxis.set_major_locator(AutoLocator())
# draw the colorbar
if colorbar:
from matplotlib.pyplot import colorbar as cbar
this_colorbar = cbar(im, cax=ax['colorbar'])
this_colorbar.ax.set_ylabel(unit, rotation=270, labelpad=12)
if cmap[1]:
ax_im.CB = DraggableColorbar(this_colorbar, im)
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
tight_layout(fig=fig)
# finish
plt_show(show)
return fig
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown subj',
color=(0.8, 0.8, 0.8), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs.
Parameters
----------
drop_log : list of list
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str | None
The subject name to use in the title of the plot. If ``None``, do not
display a subject name.
.. versionchanged:: 0.23
Added support for ``None``.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
from ..epochs import _drop_log_stats
percent = _drop_log_stats(drop_log, ignore)
if percent < threshold:
logger.info('Percent dropped epochs < supplied threshold; not '
'plotting drop log.')
return
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
counts = np.array(list(scores.values()))
# init figure, handle easy case (no drops)
fig, ax = plt.subplots()
title = f'{percent:.1f}% of all epochs rejected'
if subject is not None:
title = f'{subject}: {title}'
ax.set_title(title)
if len(ch_names) == 0:
ax.text(0.5, 0.5, 'No drops', ha='center', fontsize=14)
return fig
# count epochs that aren't fully caught by `ignore`
n_used = sum([any(ch not in ignore for ch in d) or len(d) == 0
for d in drop_log])
# calc plot values
n_bars = min(n_max_plot, len(ch_names))
x = np.arange(n_bars)
y = 100 * counts / n_used
order = np.flipud(np.argsort(y))
ax.bar(x, y[order[:n_bars]], color=color, width=width, align='center')
ax.set_xticks(x)
ax.set_xticklabels(ch_names[order[:n_bars]], rotation=45, size=10,
horizontalalignment='right')
ax.set_ylabel('% of epochs rejected')
ax.grid(axis='y')
tight_layout(pad=1, fig=fig)
plt_show(show)
return fig
@fill_doc
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20, n_channels=20,
title=None, events=None, event_color=None,
order=None, show=True, block=False, decim='auto',
noise_cov=None, butterfly=False, show_scrollbars=True,
epoch_colors=None, event_id=None, group_by='type'):
"""Visualize epochs.
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object.
%(picks_good_data)s
scalings : dict | 'auto' | None
Scaling factors for the traces. If any fields in scalings are 'auto',
the scaling factor is set to match the 99.5th percentile of a subset of
the corresponding data. If scalings == 'auto', all scalings fields are
set to 'auto'. If any fields are 'auto' and data is not preloaded,
a subset of epochs up to 100 Mb will be loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4,
whitened=10.)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
events : None | array, shape (n_events, 3)
Events to show with vertical bars. You can use `~mne.viz.plot_events`
as a legend for the colors. By default, the coloring scheme is the
same. Defaults to ``None``.
.. warning:: If the epochs have been resampled, the events no longer
align with the data.
.. versionadded:: 0.14.0
%(event_color)s
Defaults to ``None``.
order : array of str | None
Order in which to plot channel types.
.. versionadded:: 0.18.0
show : bool
Show figure if True. Defaults to True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
decim : int | 'auto'
Amount to decimate the data during display for speed purposes.
You should only decimate if the data are sufficiently low-passed,
otherwise aliasing can occur. The 'auto' mode (default) uses
the decimation that results in a sampling rate at least three times
larger than ``info['lowpass']`` (e.g., a 40 Hz lowpass will result in
at least a 120 Hz displayed sample rate).
.. versionadded:: 0.15.0
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels are scaled by ``scalings['whitened']``,
and their channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
butterfly : bool
Whether to directly call the butterfly view.
.. versionadded:: 0.18.0
%(show_scrollbars)s
epoch_colors : list of (n_epochs) list (of n_channels) | None
Colors to use for individual epochs. If None, use default colors.
event_id : dict | None
Dictionary of event labels (e.g. 'aud_l') as keys and associated event
integers as values. Useful when ``events`` contains event numbers not
present in ``epochs.event_id`` (e.g., because of event subselection).
Values in ``event_id`` will take precedence over those in
``epochs.event_id`` when there are overlapping keys.
.. versionadded:: 0.20
%(browse_group_by)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can be used to navigate between
channels and epochs and the scaling can be adjusted with - and + (or =)
keys, but this depends on the backend matplotlib is configured to use
(e.g., mpl.use(``TkAgg``) should work). Full screen mode can be toggled
with f11 key. The amount of epochs and channels per view can be adjusted
with home/end and page down/page up keys. These can also be set through
options dialog by pressing ``o`` key. ``h`` key plots a histogram of
peak-to-peak values along with the used rejection thresholds. Butterfly
plot can be toggled with ``b`` key. Right mouse click adds a vertical line
to the plot. Click 'help' button at bottom left corner of the plotter to
view all the options.
.. versionadded:: 0.10.0
"""
from ._figure import _browse_figure
epochs.drop_bad()
info = epochs.info.copy()
sfreq = info['sfreq']
projs = info['projs']
projs_on = np.full_like(projs, epochs.proj, dtype=bool)
if not epochs.proj:
info['projs'] = list()
# handle defaults / check arg validity
color = _handle_default('color', None)
scalings = _compute_scalings(scalings, epochs)
scalings = _handle_default('scalings_plot_raw', scalings)
if scalings['whitened'] == 'auto':
scalings['whitened'] = 1.
units = _handle_default('units', None)
unit_scalings = _handle_default('scalings', None)
decim, picks_data = _handle_decim(epochs.info.copy(), decim, None)
noise_cov = _check_cov(noise_cov, epochs.info)
event_id_rev = {v: k for k, v in (event_id or {}).items()}
_check_option('group_by', group_by,
('selection', 'position', 'original', 'type'))
# validate epoch_colors
_validate_type(epoch_colors, (list, None), 'epoch_colors')
if epoch_colors is not None:
if len(epoch_colors) != len(epochs.events):
msg = ('epoch_colors must have length equal to the number of '
f'epochs ({len(epochs)}); got length {len(epoch_colors)}.')
raise ValueError(msg)
for ix, this_colors in enumerate(epoch_colors):
_validate_type(this_colors, list, f'epoch_colors[{ix}]')
if len(this_colors) != len(epochs.ch_names):
msg = (f'epoch colors for epoch {ix} has length '
f'{len(this_colors)}, expected {len(epochs.ch_names)}.')
raise ValueError(msg)
# handle time dimension
n_epochs = min(n_epochs, len(epochs))
n_times = len(epochs) * len(epochs.times)
duration = n_epochs * len(epochs.times) / sfreq
# NB: this includes start and end of data:
boundary_times = np.arange(len(epochs) + 1) * len(epochs.times) / sfreq
# events
if events is not None:
event_nums = events[:, 2]
event_samps = events[:, 0]
epoch_n_samps = len(epochs.times)
# handle overlapping epochs (each event may show up in multiple places)
boundaries = (epochs.events[:, [0]] + np.array([-1, 1])
* epochs.time_as_index(0))
in_bounds = np.logical_and(boundaries[:, [0]] <= event_samps,
event_samps < boundaries[:, [1]])
event_ixs = [np.nonzero(a)[0] for a in in_bounds.T]
warned = False
event_times = list()
event_numbers = list()
for samp, num, _ixs in zip(event_samps, event_nums, event_ixs):
relevant_epoch_events = epochs.events[:, 0][_ixs]
if len(relevant_epoch_events) > 1 and not warned:
logger.info('You seem to have overlapping epochs. Some event '
'lines may be duplicated in the plot.')
warned = True
offsets = samp - relevant_epoch_events + epochs.time_as_index(0)
this_event_times = (_ixs * epoch_n_samps + offsets) / sfreq
event_times.extend(this_event_times)
event_numbers.extend([num] * len(_ixs))
event_nums = np.array(event_numbers)
event_times = np.array(event_times)
else:
event_nums = None
event_times = None
event_color_dict = _make_event_color_dict(event_color, events, event_id)
# determine trace order
picks = _picks_to_idx(info, picks)
n_channels = min(n_channels, len(picks))
ch_names = np.array(epochs.ch_names)
ch_types = np.array(epochs.get_channel_types())
order = _get_channel_plotting_order(order, ch_types, picks)
selections = None
if group_by in ('selection', 'position'):
selections = _setup_channel_selections(epochs, group_by, order)
order = np.concatenate(list(selections.values()))
default_selection = list(selections)[0]
n_channels = len(selections[default_selection])
# generate window title
if title is None:
title = epochs._name
if title is None or len(title) == 0:
title = 'Epochs'
elif not isinstance(title, str):
raise TypeError(f'title must be None or a string, got a {type(title)}')
params = dict(inst=epochs,
info=info,
n_epochs=n_epochs,
# channels and channel order
ch_names=ch_names,
ch_types=ch_types,
ch_order=order,
picks=order[:n_channels],
n_channels=n_channels,
picks_data=picks_data,
group_by=group_by,
ch_selections=selections,
# time
t_start=0,
duration=duration,
n_times=n_times,
first_time=0,
time_format='float',
decim=decim,
boundary_times=boundary_times,
# events
event_id_rev=event_id_rev,
event_color_dict=event_color_dict,
event_nums=event_nums,
event_times=event_times,
# preprocessing
projs=projs,
projs_on=projs_on,
apply_proj=epochs.proj,
remove_dc=True,
filter_coefs=None,
filter_bounds=None,
noise_cov=noise_cov,
use_noise_cov=noise_cov is not None,
# scalings
scalings=scalings,
units=units,
unit_scalings=unit_scalings,
# colors
ch_color_bad=(0.8, 0.8, 0.8),
ch_color_dict=color,
epoch_color_bad=(1, 0, 0),
epoch_colors=epoch_colors,
# display
butterfly=butterfly,
clipping=None,
scrollbars_visible=show_scrollbars,
scalebars_visible=False,
window_title=title,
xlabel='Epoch number')
fig = _browse_figure(**params)
fig._update_picks()
# make channel selection dialog, if requested (doesn't work well in init)
if group_by in ('selection', 'position'):
fig._create_selection_fig()
fig._update_projector()
fig._update_trace_offsets()
fig._update_data()
fig._draw_traces()
plt_show(show, block=block)
return fig
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
xscale='linear', area_mode='std', area_alpha=0.33,
dB=True, estimate='auto', show=True, n_jobs=1,
average=False, line_alpha=None, spatial_colors=True,
sphere=None, verbose=None):
"""%(plot_psd_doc)s.
Parameters
----------
epochs : instance of Epochs
The epochs object.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90%% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
%(plot_psd_picks_good_data)s
ax : instance of Axes | None
Axes to plot into. If None, axes will be created.
%(plot_psd_color)s
%(plot_psd_xscale)s
%(plot_psd_area_mode)s
%(plot_psd_area_alpha)s
%(plot_psd_dB)s
%(plot_psd_estimate)s
%(show)s
%(n_jobs)s
%(plot_psd_average)s
%(plot_psd_line_alpha)s
%(plot_psd_spatial_colors)s
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of Figure
Figure with frequency spectra of the data channels.
"""
from ._figure import _psd_figure
# generate figure
# epochs always use multitaper, not Welch, so no need to allow "window"
# param above
fig = _psd_figure(
inst=epochs, proj=proj, picks=picks, axes=ax, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, sphere=sphere, xscale=xscale, dB=dB,
average=average, estimate=estimate, area_mode=area_mode,
line_alpha=line_alpha, area_alpha=area_alpha, color=color,
spatial_colors=spatial_colors, n_jobs=n_jobs, bandwidth=bandwidth,
adaptive=adaptive, low_bias=low_bias, normalization=normalization,
window='hamming')
plt_show(show)
return fig
| bsd-3-clause |
lsanomaly/lsanomaly | lsanomaly/notebooks/digits.py | 1 | 2928 | """
Demo of least squares anomaly detection on static digits data.
In this example, we try to recognise digits of class 9 given training
examples from classes 0-8.
"""
import itertools
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, metrics, model_selection
def plot_roc(fper, tper, auc):
_ = plt.figure(figsize=(8, 6))
plt.plot(fper, tper, color="orange", label="ROC")
plt.plot([0, 1], [0, 1], color="darkblue", linestyle="--")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
title = "Receiver Operating Characteristic (ROC) Curve\nAUC = {:1.3f}".format( # noqa
auc
)
plt.title(title, fontsize=16)
plt.legend()
plt.show()
def data_prep(test_size=0.2):
digits = datasets.load_digits()
X = digits.data
y = digits.target
# Split data into training and test sets, then remove all examples of
# class 9 from the training set, leaving only examples of 0-8.
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=test_size
)
train_inlier_idx = y_train < 9
X_train = X_train[train_inlier_idx, :]
y_train = y_train[train_inlier_idx]
return X_train, X_test, y_train, y_test
# adapted from https://www.kaggle.com/grfiv4/plot-a-confusion-matrix
def plot_confusion_matrix(
y_true,
y_pred,
target_names=None,
title="Confusion matrix",
cmap=None,
normalize=True,
):
cm = metrics.confusion_matrix(y_true, y_pred)
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap("Blues")
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title, fontsize=16)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(
j,
i,
"{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
else:
plt.text(
j,
i,
"{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label", fontsize=14)
plt.xlabel(
"Predicted label\naccuracy={:0.4f}; misclass={:0.4f}".format(
accuracy, misclass
),
fontsize=14,
)
plt.show()
| mit |
kevin-intel/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 27 | 3244 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1]_.
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for parallelized ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
sandeepdsouza93/TensorFlow-15712 | tensorflow/contrib/factorization/python/ops/gmm_test.py | 22 | 6248 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
class GMMTest(tf.test.TestCase):
def setUp(self):
np.random.seed(3)
tf.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = 100
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = tf.contrib.factorization.KMeansClustering(
num_clusters=self.num_centers)
clusterer.fit(self.points, steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(np.random.rand(num_centers,
num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(np.random.randn(num_points,
num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [np.mean(points[assignments == center], axis=0)
for center in xrange(num_centers)]
covs = [np.cov(points[assignments == center].T)
for center in xrange(num_centers)]
scores = []
for r in xrange(num_points):
scores.append(np.sqrt(np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])),
points[r, :] - means[assignments[r]])))
return (points, assignments, scores)
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters=self.initial_means,
batch_size=self.batch_size,
steps=40,
continue_training=True,
random_seed=4,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(x=self.points, steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape),
[self.num_centers, self.num_dims])
def test_fit(self):
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters='random',
batch_size=self.batch_size,
random_seed=4,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(x=self.points, steps=1)
score1 = gmm.score(x=self.points)
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters='random',
batch_size=self.batch_size,
random_seed=4,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(x=self.points, steps=10)
score2 = gmm.score(x=self.points)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters=self.initial_means,
batch_size=self.batch_size,
steps=40,
continue_training=True,
random_seed=4,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(x=self.points, steps=60)
clusters = gmm.clusters()
# Make a small test set
points, true_assignments, true_offsets = (
self.make_random_points(clusters, 40))
assignments = np.ravel(gmm.predict(points))
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(points)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = tf.contrib.factorization.GMM(
self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
batch_size=self.num_points,
steps=iterations,
continue_training=True,
config=tf.contrib.learn.RunConfig(tf_random_seed=2))
gmm.fit(self.points)
skflow_assignments = gmm.predict(self.points[:10, :]).astype(int)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(np.diag(sklearn_covs[d]),
gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
phoebe-project/phoebe2-docs | 2.0/tutorials/building_a_system.py | 1 | 6379 | #!/usr/bin/env python
# coding: utf-8
# Building a System
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.0 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.0,<2.1"')
# From now on, we'll just quickly do common setup at the beginning of each tutorial.
# For full gory details on the general concepts here, make sure to read [General Concepts](general_concepts).
#
# We'll always start by doing our basic imports, setting up a logger, and initializing
# an empty Bundle.
# In[1]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.Bundle()
# Default Systems
# ------------------------
#
# Although the default empty Bundle doesn't include a system, there are available
# constructors that create default systems. To create a simple binary with component tags
# 'binary', 'primary', and 'secondary' (as above), you could call:
# In[2]:
b = phoebe.Bundle.default_binary()
# or for short:
# In[3]:
b = phoebe.default_binary()
# In[4]:
print b.hierarchy
# To build the same binary but as a contact system, you would call:
# In[5]:
b = phoebe.default_binary(contact_binary=True)
# In[6]:
print b.hierarchy
# For more details on dealing with contact binary systems, see the [Contact Binary Example Script](../examples/minimal_contact_binary)
# Adding Components Manually
# --------------------
#
# By default, an empty Bundle does not contain any information about our system.
#
# So, let's first start by adding a few stars. Here we'll call the generic add_component method. This method works for any type of component in the system - stars, orbits, planets, disks, rings, spots, etc. The first argument needs to be a callable or the name of a callable in phoebe.parameters.component which include the following options:
#
# * orbit
# * star
# * envelope
#
# add_component also takes a keyword argument for the 'component' tag. Here we'll give them component tags 'primary' and 'secondary' - but note that these are merely convenience labels and do not hold any special roles. Some tags, however, are forbidden if they clash with other tags or reserved values - so if you get error stating the component tag is forbidden, try using a different string.
# In[7]:
b = phoebe.Bundle()
# In[8]:
b.add_component(phoebe.component.star, component='primary')
b.add_component('star', component='secondary')
# But there are also shortcut methods for add_star and add_orbit. In these cases you don't need to provide the function, but only the component tag of your star/orbit.
#
# Any of these functions also accept values for any of the qualifiers of the created parameters.
# In[9]:
b.add_star('extrastarforfun', teff=6000)
# Here we call the add_component method of the bundle with several arguments:
#
# * a function (or the name of a function) in phoebe.parameters.component. This
# function tells the bundle what parameters need to be added.
# * component: the tag that we want to give this component for future reference.
# * any additional keyword arguments: you can also provide initial values for Parameters
# that you know will be created. In the last example you can see that the
# effective temperature will already be set to 6000 (in default units which is K).
#
# and then we'll do the same to add an orbit:
# In[10]:
b.add_orbit('binary')
# Defining the Hierarchy
# ---------------------------------
#
# At this point all we've done is add a bunch of Parameters to our Bundle, but
# we still need to specify the hierarchical setup of our system.
#
# Here we want to place our two stars (with component tags 'primary' and 'secondary') in our
# orbit (with component tag 'binary'). This can be done with several different syntaxes:
# In[11]:
b.set_hierarchy(phoebe.hierarchy.binaryorbit, b['binary'], b['primary'], b['secondary'])
# or
# In[12]:
b.set_hierarchy(phoebe.hierarchy.binaryorbit(b['binary'], b['primary'], b['secondary']))
# If you access the value that this set, you'll see that it really just resulted
# in a simple string representation:
# In[13]:
b.get_hierarchy()
# We could just as easily have used this string to set the hierarchy:
# In[14]:
b.set_hierarchy('orbit:binary(star:primary, star:secondary)')
# If at any point we want to flip the primary and secondary components or make
# this binary a triple, its seriously as easy as changing this hierarchy and
# everything else will adjust as needed (including cross-ParameterSet constraints,
# and datasets)
# The Hierarchy Parameter
# -----------------------------
#
# Setting the hierarchy just sets the value of a single parameter (although it may take some time because it also does a lot of paperwork and manages constraints between components in the system). You can access that parameter as usual:
# In[15]:
b['hierarchy@system']
# or through any of these shortcuts:
# In[16]:
b.get_hierarchy()
# In[17]:
b.hierarchy
# This hierarchy parameter then has several methods unique to itself. You can, for instance, list the component tags of all the stars or orbits in the hierarchy:
# In[18]:
print b.hierarchy.get_stars()
# In[19]:
print b.hierarchy.get_orbits()
# Or you can ask for the component tag of the top-level item in the hierarchy
# In[20]:
print b.hierarchy.get_top()
# And request the parent, children, child, or sibling of any item in the hierarchy
# In[21]:
print b.hierarchy.get_parent_of('primary')
# In[22]:
print b.hierarchy.get_children_of('binary')
# In[23]:
print b.hierarchy.get_child_of('binary', 0) # here 0 means primary component, 1 means secondary
# In[24]:
print b.hierarchy.get_sibling_of('primary')
# We can also check whether a given component (by component tag) is the primary or secondary component in its parent orbit. Note that here its just a coincidence (although on purpose) that the component tag is also 'secondary'.
# In[25]:
print b.hierarchy.get_primary_or_secondary('secondary')
# Next
# ----------
#
# Next up: let's learn about [saving and loading](saving_and_loading.ipynb)
# In[ ]:
| gpl-3.0 |
cigroup-ol/metaopt | docs/_extensions/math_symbol_table.py | 6 | 6929 | from __future__ import print_function
symbols = [
["Lower-case Greek",
5,
r"""\alpha \beta \gamma \chi \delta \epsilon \eta \iota \kappa
\lambda \mu \nu \omega \phi \pi \psi \rho \sigma \tau \theta
\upsilon \xi \zeta \digamma \varepsilon \varkappa \varphi
\varpi \varrho \varsigma \vartheta"""],
["Upper-case Greek",
6,
r"""\Delta \Gamma \Lambda \Omega \Phi \Pi \Psi \Sigma \Theta
\Upsilon \Xi \mho \nabla"""],
["Hebrew",
4,
r"""\aleph \beth \daleth \gimel"""],
["Delimiters",
6,
r"""| \{ \lfloor / \Uparrow \llcorner \vert \} \rfloor \backslash
\uparrow \lrcorner \| \langle \lceil [ \Downarrow \ulcorner
\Vert \rangle \rceil ] \downarrow \urcorner"""],
["Big symbols",
5,
r"""\bigcap \bigcup \bigodot \bigoplus \bigotimes \biguplus
\bigvee \bigwedge \coprod \oint \prod \sum \int"""],
["Standard function names",
4,
r"""\arccos \csc \ker \min \arcsin \deg \lg \Pr \arctan \det \lim
\gcd \ln \sup \cot \hom \log \tan \coth \inf \max \tanh
\sec \arg \dim \liminf \sin \cos \exp \limsup \sinh \cosh"""],
["Binary operation and relation symbols",
3,
r"""\ast \pm \slash \cap \star \mp \cup \cdot \uplus
\triangleleft \circ \odot \sqcap \triangleright \bullet \ominus
\sqcup \bigcirc \oplus \wedge \diamond \oslash \vee
\bigtriangledown \times \otimes \dag \bigtriangleup \div \wr
\ddag \barwedge \veebar \boxplus \curlywedge \curlyvee \boxminus
\Cap \Cup \boxtimes \bot \top \dotplus \boxdot \intercal
\rightthreetimes \divideontimes \leftthreetimes \equiv \leq \geq
\perp \cong \prec \succ \mid \neq \preceq \succeq \parallel \sim
\ll \gg \bowtie \simeq \subset \supset \Join \approx \subseteq
\supseteq \ltimes \asymp \sqsubset \sqsupset \rtimes \doteq
\sqsubseteq \sqsupseteq \smile \propto \dashv \vdash \frown
\models \in \ni \notin \approxeq \leqq \geqq \lessgtr \leqslant
\geqslant \lesseqgtr \backsim \lessapprox \gtrapprox \lesseqqgtr
\backsimeq \lll \ggg \gtreqqless \triangleq \lessdot \gtrdot
\gtreqless \circeq \lesssim \gtrsim \gtrless \bumpeq \eqslantless
\eqslantgtr \backepsilon \Bumpeq \precsim \succsim \between
\doteqdot \precapprox \succapprox \pitchfork \Subset \Supset
\fallingdotseq \subseteqq \supseteqq \risingdotseq \sqsubset
\sqsupset \varpropto \preccurlyeq \succcurlyeq \Vdash \therefore
\curlyeqprec \curlyeqsucc \vDash \because \blacktriangleleft
\blacktriangleright \Vvdash \eqcirc \trianglelefteq
\trianglerighteq \neq \vartriangleleft \vartriangleright \ncong
\nleq \ngeq \nsubseteq \nmid \nsupseteq \nparallel \nless \ngtr
\nprec \nsucc \subsetneq \nsim \supsetneq \nVDash \precnapprox
\succnapprox \subsetneqq \nvDash \precnsim \succnsim \supsetneqq
\nvdash \lnapprox \gnapprox \ntriangleleft \ntrianglelefteq
\lneqq \gneqq \ntriangleright \lnsim \gnsim \ntrianglerighteq
\coloneq \eqsim \nequiv \napprox \nsupset \doublebarwedge \nVdash
\Doteq \nsubset \eqcolon \ne
"""],
["Arrow symbols",
2,
r"""\leftarrow \longleftarrow \uparrow \Leftarrow \Longleftarrow
\Uparrow \rightarrow \longrightarrow \downarrow \Rightarrow
\Longrightarrow \Downarrow \leftrightarrow \updownarrow
\longleftrightarrow \updownarrow \Leftrightarrow
\Longleftrightarrow \Updownarrow \mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow \leftharpoonup
\rightharpoonup \swarrow \leftharpoondown \rightharpoondown
\nwarrow \rightleftharpoons \leadsto \dashrightarrow
\dashleftarrow \leftleftarrows \leftrightarrows \Lleftarrow
\Rrightarrow \twoheadleftarrow \leftarrowtail \looparrowleft
\leftrightharpoons \curvearrowleft \circlearrowleft \Lsh
\upuparrows \upharpoonleft \downharpoonleft \multimap
\leftrightsquigarrow \rightrightarrows \rightleftarrows
\rightrightarrows \rightleftarrows \twoheadrightarrow
\rightarrowtail \looparrowright \rightleftharpoons
\curvearrowright \circlearrowright \Rsh \downdownarrows
\upharpoonright \downharpoonright \rightsquigarrow \nleftarrow
\nrightarrow \nLeftarrow \nRightarrow \nleftrightarrow
\nLeftrightarrow \to \Swarrow \Searrow \Nwarrow \Nearrow
\leftsquigarrow
"""],
["Miscellaneous symbols",
3,
r"""\neg \infty \forall \wp \exists \bigstar \angle \partial
\nexists \measuredangle \eth \emptyset \sphericalangle \clubsuit
\varnothing \complement \diamondsuit \imath \Finv \triangledown
\heartsuit \jmath \Game \spadesuit \ell \hbar \vartriangle \cdots
\hslash \vdots \blacksquare \ldots \blacktriangle \ddots \sharp
\prime \blacktriangledown \Im \flat \backprime \Re \natural
\circledS \P \copyright \ss \circledR \S \yen \AA \checkmark \$
\iiint \iint \iint \oiiint"""]
]
def run(state_machine):
def get_n(n, l):
part = []
for x in l:
part.append(x)
if len(part) == n:
yield part
part = []
yield part
lines = []
for category, columns, syms in symbols:
syms = syms.split()
syms.sort()
lines.append("**%s**" % category)
lines.append('')
max_width = 0
for sym in syms:
max_width = max(max_width, len(sym))
max_width = max_width * 2 + 16
header = " " + (('=' * max_width) + ' ') * columns
format = '%%%ds' % max_width
for chunk in get_n(20, get_n(columns, syms)):
lines.append(header)
for part in chunk:
line = []
for sym in part:
line.append(format % (":math:`%s` ``%s``" % (sym, sym)))
lines.append(" " + " ".join(line))
lines.append(header)
lines.append('')
state_machine.insert_input(lines, "Symbol table")
return []
def math_symbol_table_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(state_machine)
def setup(app):
app.add_directive(
'math_symbol_table', math_symbol_table_directive,
False, (0, 1, 0))
if __name__ == "__main__":
# Do some verification of the tables
from matplotlib import _mathtext_data
print("SYMBOLS NOT IN STIX:")
all_symbols = {}
for category, columns, syms in symbols:
if category == "Standard Function Names":
continue
syms = syms.split()
for sym in syms:
if len(sym) > 1:
all_symbols[sym[1:]] = None
if sym[1:] not in _mathtext_data.tex2uni:
print(sym)
print("SYMBOLS NOT IN TABLE:")
for sym in _mathtext_data.tex2uni:
if sym not in all_symbols:
print(sym)
| bsd-3-clause |
rubikloud/scikit-learn | sklearn/feature_selection/__init__.py | 33 | 1159 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
from .from_model import SelectFromModel
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'SelectFromModel']
| bsd-3-clause |
tornadozou/tensorflow | tensorflow/contrib/timeseries/examples/predict_test.py | 80 | 2487 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the prediction example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
from tensorflow.contrib.timeseries.examples import predict
from tensorflow.python.platform import test
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/period_trend.csv")
class PeriodTrendExampleTest(test.TestCase):
def test_shapes_and_variance_structural(self):
(times, observed, all_times, mean, upper_limit, lower_limit
) = predict.structural_ensemble_train_and_predict(_DATA_FILE)
# Just check that plotting will probably be OK. We can't actually run the
# plotting code since we don't want to pull in matplotlib as a dependency
# for this test.
self.assertAllEqual([500], times.shape)
self.assertAllEqual([500], observed.shape)
self.assertAllEqual([700], all_times.shape)
self.assertAllEqual([700], mean.shape)
self.assertAllEqual([700], upper_limit.shape)
self.assertAllEqual([700], lower_limit.shape)
# Check that variance hasn't blown up too much. This is a relatively good
# indication that training was successful.
self.assertLess(upper_limit[-1] - lower_limit[-1],
1.5 * (upper_limit[0] - lower_limit[0]))
def test_ar(self):
(times, observed, all_times, mean,
upper_limit, lower_limit) = predict.ar_train_and_predict(_DATA_FILE)
self.assertAllEqual(times.shape, observed.shape)
self.assertAllEqual(all_times.shape, mean.shape)
self.assertAllEqual(all_times.shape, upper_limit.shape)
self.assertAllEqual(all_times.shape, lower_limit.shape)
self.assertLess((upper_limit - lower_limit).mean(), 4.)
if __name__ == "__main__":
test.main()
| apache-2.0 |
rebeccabilbro/orlo | evaluation/valcurve.py | 1 | 2828 | #!/usr/bin/env python
# valcurve.py
# Title: Plot validation curves for tour
# Author: Rebecca Bilbro
# Date: 3/23/16
# Organization: District Data Labs
"""
Plot Validation Curves for classified datasets.
"""
#####################################################################
# Imports
#####################################################################
import csv
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction import DictVectorizer
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
IMG_STORE = "../orlo/valgallery/"
#####################################################################
# Helpers
#####################################################################
def openFile(fname):
"""
Opens data file.
"""
with open(fname, 'rb') as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read(), delimiters=';,\t')
csvfile.seek(0)
reader = csv.reader(csvfile, dialect)
data = list(reader)
return data
#####################################################################
# Plotter
#####################################################################
dataset = openFile("data/tic-tac-toe.data")
target = [row[-1] for row in dataset]
data = []
for row in dataset:
row.remove(row[-1])
data.append(row)
label_enc = LabelEncoder()
encoded_labels = label_enc.fit_transform(target)
mapping = []
for instance in range(len(data)):
D = dict()
for f in range(len(data[instance])):
D[f] = data[instance][f]
mapping.append(D)
data_enc = DictVectorizer(sparse=False)
encoded_data = data_enc.fit_transform(mapping)
X, y = encoded_data, encoded_labels
param_range = np.logspace(-7, 3, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range, #Gamma is the kernel coefficient for SVM
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.savefig("valgallery/tictactoe.png")
| mit |
nmayorov/scikit-learn | examples/mixture/plot_gmm_selection.py | 36 | 3271 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
shirtsgroup/checkensemble | examples/harmonic_mu.py | 1 | 8561 | #!/usr/bin/python
import numpy
import numpy.random
import matplotlib
import matplotlib.pyplot as plt
import scipy.integrate
from scipy.integrate import quad
from scipy.stats import geom
import checkensemble
import optparse, sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-t", "--temperature", nargs = 2, dest="T_k", type="float",default=[0.5,1.5],
help="low and high temperatures, [default = %default]")
parser.add_option("-m", "--mu", nargs = 2, dest="mu_k", type="float",default=[0.6,1.4],
help="low and high chemical potentials, [default = %default]")
parser.add_option("-c", "--cuttails", dest="cuttails", type="float",default=0.01,
help="fraction of the tails to omit from the analysis to avoid small sample errors in binning [default = %default]")
parser.add_option("-b", "--nboots", dest="nboots", type="int",default=200,
help="number of bootstrap samples performed [default = %default]")
parser.add_option("-r", "--nreps", dest="nreps", type="int",default=0,
help="number of independent repetitions of the sampling [default = %default]")
parser.add_option("-i", "--nbins", dest="nbins",type = "int", default=30,
help="number of bins for bootstrapping [default = %default]")
parser.add_option("-e", "--energytype", dest="type", default="jointEN",
help="the type of energy that is being analyzed [default = %default]")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",default=False,
help="more verbosity")
parser.add_option("-N", "--number", nargs = 2, dest="N_k", type="int",default=[50000,50000],
help="number of samples from the two states, [default = %default]")
parser.add_option("-K", "--force", dest="K", type="float",default=1.0,
help="spring force constant prefactor [default = %default]")
parser.add_option("-g", "--figureprefix", dest="figname", default='harmonic_mu',
help="name prefix for the figure")
parser.add_option("-s", "--seed", dest="seed", type = "int", default=None,
help="random seed for generating independet or bootstrap samples")
(options, args) = parser.parse_args()
# For N 1D harmonic oscillator, the potential is given by
# V(x;K) = (K/2) * (x_-x_0)**2
# where K denotes the spring constant.
#
# The equilibrium distribution is given analytically by
# p(x;beta,N) = sqrt[(beta K) / (2 pi)] exp[-beta K (x-x_0)**2 / 2]
# The dimensionless free energy is therefore
# q(beta,K) = sqrt(2 pi / beta K)
# f(beta,K) = - (1/2) * ln[ (2 pi) / (beta K) ]
# f_2 - f_1 = -(1/2) (ln [ 1/ beta_2 K_2] - ln [ 1/beta_1 K_1])
# f_2 - f_1 = (1/2) ln [beta_2/K_2 / beta_1/K_1]
# f_2 - f_1 = (1/2) ln [K_1 T_1/ K_2 T_2]
# for T_1 = 0.9, T_2 = 1.1, K_1 = K_2, df = 0.5*ln(1.1/0.9) = 0.10034
# for T_1 = 0.8, T_2 = 1.2, K_1 = K_2, df = 0.5*ln(1.2/0.8) = 0.2027
# for T_1 = 0.5, T_2 = 1.5, K_1 = K_2, df = 0.5*ln(1.5/0.5) = 0.5493
# Now add number of particles. Use the model of distinguishable noninteracting particles.
# In this case, Q(beta,N) = Q^N
# Xi(x,N) = \sum_N Q(beta,N) exp(beta mu N)
# = \sum_N (2 pi /beta K)^(N/2) exp(beta mu N)
# = \sum_N sqrt(2 pi /beta K)^N exp(beta mu)^N
# = \sum_N [sqrt(2 pi /beta K)^N exp(beta mu)]^N
# for code simplicity, we assume N=1 is the minimum
# set x = (sqrt(2 pi / beta K)*exp(beta mu))
# = ((1-x)^-1)-1 = 1/(1-x) - (1-x)/(1-x) = x/(1-x)
#
# P(x,N) propto exp(-\beta (E - mu N))
# P(x,N) exp(-\sum_i \betaK/2 x_i^2 -\beta mu N)/[1-(sqrt(2 pi/ beta K)exp(beta mu))]^(-1)
#
# If there are two temperatures and the same chemical, we just bin the A_i = E_i - \mu N_i
# So we want to sample from two temperatures and two mu.
# P_1 / P_2 = exp(B_2 PV_1 - B_2 PV_2 - (B_2 - B_1) E + (B_2 mu_2 - B_2 mu_2) N)
# analytical variance = depends on the method.
title='harmonic oscillators with number'
if (options.nreps > 0):
reptype = 'independent'
nreps = options.nreps
if (options.nboots > 0):
reptype = 'bootstrap'
nreps = options.nboots
if (options.nboots > 0 and options.nreps > 0):
print "Can't do both bootstrap sampling and independence sampling: defaulting to bootstrap sampling"
if (options.seed):
numpy.random.seed(options.seed) # setting the seed for independent sampling
print "setting random number seed for generating samples as %d" % (options.seed)
kB = 1.0
a_k = options.K*numpy.array([1,1])
T_k = numpy.array(options.T_k) #T_k = numpy.array([0.5,1.5]) # temperatures
mu_k = numpy.array(options.mu_k) #T_k = numpy.array([0.5,1.5]) # chemical potentials
beta_k = 1.0/(kB*T_k) # kB = 1
N_k = numpy.array(options.N_k) #N_k number of samples
if (options.type == 'helmholtz'):
analysis_type = 'dbeta-constmu'
if (mu_k[0] != mu_k[1]):
print "Chemical potentials are not equal: can't test the ethalpy distribution"
if (T_k[0] == T_k[1]):
print "Temperatures are equal: can sometimes result in numerical instability"
elif (options.type == 'number'):
analysis_type = 'dmu-constB'
if (T_k[0] != T_k[1]):
print "Temperatures are not equal: can't test the volume distribution"
if (mu_k[0] == mu_k[1]):
print "Chemical potentials are equal: can sometimes result in numerical instability"
elif (options.type == 'jointEN'):
analysis_type = 'dbeta-dmu'
if (T_k[0] == T_k[1]):
print "Temperatures are equal: can sometimes result in numerical instability"
if (mu_k[0] == mu_k[1]):
print "Chemical potentials are equal: can sometimes result in numerical instability"
else:
print "analysis type %s is not defined!" % (options.type)
N_max = numpy.max(N_k)
gtau = 1
genfreq = 1000
K = len(beta_k)
K_k = numpy.array([1,1]) # the spring constants
O_k = numpy.array([0,0]) # the locations
sigma_k = (beta_k * K_k)**(-0.5)
N_kn = numpy.zeros([K,N_max], float) # x_kn[k,n] is the number of harmonic oscillators in the sample
U_kn = numpy.zeros([K,N_max], float) # x_kn[k,n] is the energy of the sample at x_kn[k,n]
#f = -log [1-(sqrt(2 pi /beta K)exp(-beta mu))]^(-1)
#f = log [1-(sqrt(2 pi /beta K)exp(-beta mu))]
#df = log ([1-(sqrt(2 pi /beta[1] K) exp(-beta[1] mu[1]))] / [1-(sqrt(2 pi /beta[0] K)exp(-beta[0] mu[0]))])
# set x = (sqrt(2 pi / beta K)*exp(-beta mu))
# = ((1-x)^-1)-1 = 1/(1-x) - (1-x)/(1-x) = x/(1-x)
xv = numpy.sqrt(2*numpy.pi/(beta_k*K_k))*numpy.exp(beta_k*mu_k)
f = -numpy.log(xv/(1-xv))
df = f[1]-f[0]
print "Analytical df = %.8f" % (df)
if (options.nreps > 0):
print "Now sampling %d sets of data . . . could also take a bit" % (nreps)
reps = []
if (reptype == 'independent'):
ncount = nreps
elif (reptype == 'bootstrap'):
ncount = 1
for n in range(ncount):
if (n%10 == 0 and n>0):
print "Finished generating %d data sets . . ." % (n)
# generate independent samples from (x,N).
# Pick samples from N, and then from x given N
# P(N) propto [Q exp(-\beta mu)]^N \propto [sqrt(2 Pi /beta K)exp(beta mu)]^N
# P(X) = a^x
# then sample N with the geometric distribution.
for k in range(K):
for n in range(N_k[k]): # gibbs sampling to get x,N samples:
if (n%10000) == 0:
print "Set %d: Generated up through sample %d" % (k,n)
# for the x coordinate
p = numpy.exp(beta_k[k]*mu_k[k])*numpy.sqrt(2*numpy.pi/beta_k[k]*K_k[k])
N_kn[k,n] = scipy.stats.geom.rvs(1-p,size=1)
# now generate random distances
x_i = numpy.random.normal(O_k[k], sigma_k[k], N_kn[k,n])
# compute potential energy of all samples in all potentials
U_kn[k,n] = 0.5*(K_k[k] * numpy.sum(x_i**2))
addrep = [U_kn.copy(),N_kn.copy()]
reps.append(addrep)
checkensemble.ProbabilityAnalysis(N_k,type=analysis_type,T_k=T_k,mu_k=mu_k,U_kn=U_kn,N_kn=N_kn,kB=1.0,title=title,figname=options.figname,nbins=options.nbins, reptype=reptype, nboots=options.nboots, reps=reps, cuttails=options.cuttails, eunits='kT',seed=options.seed)
| gpl-2.0 |
Lab603/PicEncyclopedias | jni-build/jni-build/jni/include/tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py | 4 | 23390 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import csv_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import example_parser
from tensorflow.contrib.learn.python.learn.dataframe.transforms import hashes
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source
from tensorflow.contrib.learn.python.learn.dataframe.transforms import sparsify
from tensorflow.python.client import session as sess
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner as qr
def _expand_file_names(filepatterns):
"""Takes a list of file patterns and returns a list of resolved file names."""
if not isinstance(filepatterns, (list, tuple, set)):
filepatterns = [filepatterns]
filenames = set()
for filepattern in filepatterns:
names = set(gfile.Glob(filepattern))
filenames |= names
return list(filenames)
def _dtype_to_nan(dtype):
if dtype is dtypes.string:
return b""
elif dtype.is_integer:
return np.nan
elif dtype.is_floating:
return np.nan
elif dtype is dtypes.bool:
return np.nan
else:
raise ValueError("Can't parse type without NaN into sparse tensor: %s" %
dtype)
def _get_default_value(feature_spec):
if isinstance(feature_spec, parsing_ops.FixedLenFeature):
return feature_spec.default_value
else:
return _dtype_to_nan(feature_spec.dtype)
class TensorFlowDataFrame(df.DataFrame):
"""TensorFlowDataFrame implements convenience functions using TensorFlow."""
def run(self,
num_batches=None,
graph=None,
session=None,
start_queues=True,
initialize_variables=True,
**kwargs):
"""Builds and runs the columns of the `DataFrame` and yields batches.
This is a generator that yields a dictionary mapping column names to
evaluated columns.
Args:
num_batches: the maximum number of batches to produce. If none specified,
the returned value will iterate through infinite batches.
graph: the `Graph` in which the `DataFrame` should be built.
session: the `Session` in which to run the columns of the `DataFrame`.
start_queues: if true, queues will be started before running and halted
after producting `n` batches.
initialize_variables: if true, variables will be initialized.
**kwargs: Additional keyword arguments e.g. `num_epochs`.
Yields:
A dictionary, mapping column names to the values resulting from running
each column for a single batch.
"""
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
if session is None:
session = sess.Session()
self_built = self.build(**kwargs)
keys = list(self_built.keys())
cols = list(self_built.values())
if initialize_variables:
if variables.local_variables():
session.run(variables.initialize_local_variables())
if variables.all_variables():
session.run(variables.initialize_all_variables())
if start_queues:
coord = coordinator.Coordinator()
threads = qr.start_queue_runners(sess=session, coord=coord)
i = 0
while num_batches is None or i < num_batches:
i += 1
try:
values = session.run(cols)
yield collections.OrderedDict(zip(keys, values))
except errors.OutOfRangeError:
break
if start_queues:
coord.request_stop()
coord.join(threads)
def select_rows(self, boolean_series):
"""Returns a `DataFrame` with only the rows indicated by `boolean_series`.
Note that batches may no longer have consistent size after calling
`select_rows`, so the new `DataFrame` may need to be rebatched.
For example:
'''
filtered_df = df.select_rows(df["country"] == "jp").batch(64)
'''
Args:
boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
rows where `boolean_series` evaluated to `True`.
"""
result = type(self)()
for key, col in self._columns.items():
try:
result[key] = col.select_rows(boolean_series)
except AttributeError as e:
raise NotImplementedError((
"The select_rows method is not implemented for Series type {}. "
"Original error: {}").format(type(col), e))
return result
def split(self, index_series, proportion, batch_size=None):
"""Deterministically split a `DataFrame` into two `DataFrame`s.
Note this split is only as deterministic as the underlying hash function;
see `tf.string_to_hash_bucket_fast`. The hash function is deterministic
for a given binary, but may change occasionally. The only way to achieve
an absolute guarantee that the split `DataFrame`s do not change across runs
is to materialize them.
Note too that the allocation of a row to one partition or the
other is evaluated independently for each row, so the exact number of rows
in each partition is binomially distributed.
Args:
index_series: a `Series` of unique strings, whose hash will determine the
partitioning; or the name in this `DataFrame` of such a `Series`.
(This `Series` must contain strings because TensorFlow provides hash
ops only for strings, and there are no number-to-string converter ops.)
proportion: The proportion of the rows to select for the 'left'
partition; the remaining (1 - proportion) rows form the 'right'
partition.
batch_size: the batch size to use when rebatching the left and right
`DataFrame`s. If None (default), the `DataFrame`s are not rebatched;
thus their batches will have variable sizes, according to which rows
are selected from each batch of the original `DataFrame`.
Returns:
Two `DataFrame`s containing the partitioned rows.
"""
# TODO(soergel): allow seed?
if isinstance(index_series, str):
index_series = self[index_series]
num_buckets = 1000000 # close enough for simple splits
hashed_input, = hashes.HashFast(num_buckets)(index_series)
threshold = int(num_buckets * proportion)
left = hashed_input < threshold
right = ~left
left_rows = self.select_rows(left)
right_rows = self.select_rows(right)
if batch_size:
left_rows = left_rows.batch(batch_size=batch_size, shuffle=False)
right_rows = right_rows.batch(batch_size=batch_size, shuffle=False)
return left_rows, right_rows
def run_once(self):
"""Creates a new 'Graph` and `Session` and runs a single batch.
Returns:
A dictionary mapping column names to numpy arrays that contain a single
batch of the `DataFrame`.
"""
return list(self.run(num_batches=1))[0]
def batch(self,
batch_size,
shuffle=False,
num_threads=1,
queue_capacity=None,
min_after_dequeue=None,
seed=None):
"""Resize the batches in the `DataFrame` to the given `batch_size`.
Args:
batch_size: desired batch size.
shuffle: whether records should be shuffled. Defaults to true.
num_threads: the number of enqueueing threads.
queue_capacity: capacity of the queue that will hold new batches.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` with `batch_size` rows.
"""
column_names = list(self._columns.keys())
if shuffle:
batcher = batch.ShuffleBatch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
seed=seed)
else:
batcher = batch.Batch(batch_size,
output_names=column_names,
num_threads=num_threads,
queue_capacity=queue_capacity)
batched_series = batcher(list(self._columns.values()))
dataframe = type(self)()
dataframe.assign(**(dict(zip(column_names, batched_series))))
return dataframe
@classmethod
def _from_csv_base(cls, filepatterns, get_default_values, has_header,
column_names, num_threads, enqueue_size,
batch_size, queue_capacity, min_after_dequeue, shuffle,
seed):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
get_default_values: a function that produces a list of default values for
each column, given the column names.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if column_names is None:
if not has_header:
raise ValueError("If column_names is None, has_header must be true.")
with gfile.GFile(filenames[0]) as f:
column_names = csv.DictReader(f).fieldnames
if "index" in column_names:
raise ValueError(
"'index' is reserved and can not be used for a column name.")
default_values = get_default_values(column_names)
reader_kwargs = {"skip_header_lines": (1 if has_header else 0)}
index, value = reader_source.TextFileSource(
filenames,
reader_kwargs=reader_kwargs,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = csv_parser.CSVParser(column_names, default_values)
parsed = parser(value)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_csv(cls,
filepatterns,
default_values,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
default_values: a list of default values for each column.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
# pylint: disable=unused-argument
return default_values
return cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
@classmethod
def from_csv_with_feature_spec(cls,
filepatterns,
feature_spec,
has_header=True,
column_names=None,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from CSV files, given a feature_spec.
If `has_header` is false, then `column_names` must be specified. If
`has_header` is true and `column_names` are specified, then `column_names`
overrides the names in the header.
Args:
filepatterns: a list of file patterns that resolve to CSV files.
feature_spec: a dict mapping column names to `FixedLenFeature` or
`VarLenFeature`.
has_header: whether or not the CSV files have headers.
column_names: a list of names for the columns in the CSV files.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed lines.
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with examples from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
def get_default_values(column_names):
return [_get_default_value(feature_spec[name]) for name in column_names]
dataframe = cls._from_csv_base(filepatterns, get_default_values, has_header,
column_names, num_threads,
enqueue_size, batch_size, queue_capacity,
min_after_dequeue, shuffle, seed)
# replace the dense columns with sparse ones in place in the dataframe
for name in dataframe.columns():
if name != "index" and isinstance(feature_spec[name],
parsing_ops.VarLenFeature):
strip_value = _get_default_value(feature_spec[name])
(dataframe[name],) = sparsify.Sparsify(strip_value)(dataframe[name])
return dataframe
@classmethod
def from_examples(cls,
filepatterns,
features,
reader_cls=io_ops.TFRecordReader,
num_threads=1,
enqueue_size=None,
batch_size=32,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None):
"""Create a `DataFrame` from `tensorflow.Example`s.
Args:
filepatterns: a list of file patterns containing `tensorflow.Example`s.
features: a dict mapping feature names to `VarLenFeature` or
`FixedLenFeature`.
reader_cls: a subclass of `tensorflow.ReaderBase` that will be used to
read the `Example`s.
num_threads: the number of readers that will work in parallel.
enqueue_size: block size for each read operation.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
Returns:
A `DataFrame` that has columns corresponding to `features` and is filled
with `Example`s from `filepatterns`.
Raises:
ValueError: no files match `filepatterns`.
ValueError: `features` contains the reserved name 'index'.
"""
filenames = _expand_file_names(filepatterns)
if not filenames:
raise ValueError("No matching file names.")
if "index" in features:
raise ValueError(
"'index' is reserved and can not be used for a feature name.")
index, record = reader_source.ReaderSource(
reader_cls,
filenames,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
seed=seed)()
parser = example_parser.ExampleParser(features)
parsed = parser(record)
column_dict = parsed._asdict()
column_dict["index"] = index
dataframe = cls()
dataframe.assign(**column_dict)
return dataframe
@classmethod
def from_pandas(cls,
pandas_dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="pandas_data"):
"""Create a `tf.learn.DataFrame` from a `pandas.DataFrame`.
Args:
pandas_dataframe: `pandas.DataFrame` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
`pandas_dataframe`.
"""
pandas_source = in_memory_source.PandasSource(
pandas_dataframe,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(pandas_source()._asdict()))
return dataframe
@classmethod
def from_numpy(cls,
numpy_array,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
min_after_dequeue=None,
shuffle=True,
seed=None,
data_name="numpy_data"):
"""Creates a `tf.learn.DataFrame` from a `numpy.ndarray`.
The returned `DataFrame` contains two columns: 'index' and 'value'. The
'value' column contains a row from the array. The 'index' column contains
the corresponding row number.
Args:
numpy_array: `numpy.ndarray` that serves as a data source.
num_threads: the number of threads to use for enqueueing.
enqueue_size: the number of rows to enqueue per step.
batch_size: desired batch size.
queue_capacity: capacity of the queue that will store parsed `Example`s
min_after_dequeue: minimum number of elements that can be left by a
dequeue operation. Only used if `shuffle` is true.
shuffle: whether records should be shuffled. Defaults to true.
seed: passed to random shuffle operations. Only used if `shuffle` is true.
data_name: a scope name identifying the data.
Returns:
A `tf.learn.DataFrame` that contains batches drawn from the given
array.
"""
numpy_source = in_memory_source.NumpySource(
numpy_array,
num_threads=num_threads,
enqueue_size=enqueue_size,
batch_size=batch_size,
queue_capacity=queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
seed=seed,
data_name=data_name)
dataframe = cls()
dataframe.assign(**(numpy_source()._asdict()))
return dataframe
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/preprocessing/data.py | 9 | 42619 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import warn_if_not_float
from ..utils.extmath import row_norms
from ..utils.fixes import (combinations_with_replacement as combinations_w_r,
bincount)
from ..utils.fixes import isclose
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis)
from ..utils.validation import check_is_fitted
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'Normalizer',
'OneHotEncoder',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False)
warn_if_not_float(X, estimator='The scale function')
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var[var == 0.0] = 1.0
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False)
warn_if_not_float(X, estimator=self)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
# Do not scale constant features
if isinstance(data_range, np.ndarray):
data_range[data_range == 0.0] = 1.0
elif data_range == 0.:
data_range = 1.
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False)
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False)
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy)
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = row_norms(X)
norms[norms == 0.0] = 1.0
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| mit |
oysstu/pyopencl-in-action | ch6/simple_image.py | 1 | 2353 | '''
Listing 6.1: Simple image processing
'''
import numpy as np
import pyopencl as cl
import matplotlib.pyplot as plt
import utility
kernel_src = '''
__constant sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE |
CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST;
__kernel void simple_image(read_only image2d_t src_image,
write_only image2d_t dst_image) {
/* Compute value to be subtracted from each pixel */
uint offset = get_global_id(1) * 0x4000 + get_global_id(0) * 0x1000;
/* Read pixel value */
int2 coord = (int2)(get_global_id(0), get_global_id(1));
uint4 pixel = read_imageui(src_image, sampler, coord);
/* Subtract offset from pixel */
pixel.x -= offset;
/* Write new pixel value to output */
write_imageui(dst_image, coord, pixel);
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev])
queue = cl.CommandQueue(context, dev)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev])
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data and buffers
im_src = np.full(shape=(4, 4), fill_value=np.iinfo(np.uint16).max, dtype=np.uint16)
im_dst = np.empty_like(im_src, dtype=np.uint16)
src_buff = cl.image_from_array(context, im_src, mode='r')
dst_buff = cl.image_from_array(context, im_dst, mode='w')
# Enqueue kernel (with argument specified directly)
# Note: Global indices is reversed due to OpenCL using column-major order when reading images
global_size = im_src.shape[::-1]
local_size = None
# __call__(queue, global_size, local_size, *args, global_offset=None, wait_for=None, g_times_l=False)
prog.simple_image(queue, global_size, local_size, src_buff, dst_buff)
# Enqueue command to copy from buffers to host memory
# Note: Region indices is reversed due to OpenCL using column-major order when reading images
cl.enqueue_copy(queue, dest=im_dst, src=dst_buff, is_blocking=True, origin=(0, 0), region=im_src.shape[::-1])
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(im_src, cmap='gray', vmin=0, vmax=np.iinfo(np.uint16).max)
ax2.imshow(im_dst, cmap='gray', vmin=0, vmax=np.iinfo(np.uint16).max, interpolation='nearest')
plt.show()
| mit |
antgonza/qiita | qiita_db/test/test_meta_util.py | 1 | 21955 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
import numpy.testing as npt
from tarfile import open as topen
from os import remove
from os.path import exists, join
import pandas as pd
from qiita_core.qiita_settings import qiita_config, r_client
from qiita_core.util import qiita_test_checker
import qiita_db as qdb
@qiita_test_checker()
class MetaUtilTests(TestCase):
def setUp(self):
self.old_portal = qiita_config.portal
self.files_to_remove = []
def tearDown(self):
qiita_config.portal = self.old_portal
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
def _set_artifact_private(self):
id_status = qdb.util.convert_to_id('private', 'visibility')
qdb.sql_connection.perform_as_transaction(
"UPDATE qiita.artifact SET visibility_id = %d" % id_status)
def _set_artifact_public(self):
id_status = qdb.util.convert_to_id('public', 'visibility')
qdb.sql_connection.perform_as_transaction(
"UPDATE qiita.artifact SET visibility_id = %d" % id_status)
def test_validate_filepath_access_by_user(self):
self._set_artifact_private()
# shared has access to all study files and analysis files
user = qdb.user.User('[email protected]')
for i in [1, 2, 3, 4, 5, 9, 12, 15, 16, 17, 18, 19, 20, 21]:
self.assertTrue(qdb.meta_util.validate_filepath_access_by_user(
user, i))
# Now shared should not have access to the study files
qdb.study.Study(1).unshare(user)
for i in [1, 2, 3, 4, 5, 9, 12, 17, 18, 19, 20, 21]:
self.assertFalse(qdb.meta_util.validate_filepath_access_by_user(
user, i))
# Note that 15 is the biom from the analysis and 16 is the
# analysis mapping file and here we are testing access
for i in [15, 16]:
self.assertTrue(qdb.meta_util.validate_filepath_access_by_user(
user, i))
# Now shared should not have access to any files
qdb.analysis.Analysis(1).unshare(user)
for i in [1, 2, 3, 4, 5, 9, 12, 15, 16, 17, 18, 19, 20, 21]:
self.assertFalse(qdb.meta_util.validate_filepath_access_by_user(
user, i))
# Now the Analysis is public so the user should have access again. Note
# that we are not using the internal Analysis methods to skip
# validation; thus simplifying the test code
for a in qdb.analysis.Analysis(1).artifacts:
a.visibility = 'public'
# Note that 15 is the biom from the analysis and 16 is the
# analysis mapping file and here we are testing access
for i in [15, 16]:
self.assertTrue(qdb.meta_util.validate_filepath_access_by_user(
user, i))
# returning to private
for a in qdb.analysis.Analysis(1).artifacts:
a.visibility = 'private'
# Now shared has access to public study files
self._set_artifact_public()
for i in [1, 2, 3, 4, 5, 9, 12, 17, 18, 19, 20, 21]:
obs = qdb.meta_util.validate_filepath_access_by_user(user, i)
if i < 3:
self.assertFalse(obs)
else:
self.assertTrue(obs)
# testing that if study.public_raw_download is true we get access
qdb.study.Study(1).public_raw_download = True
for i in [1, 2, 3]:
obs = qdb.meta_util.validate_filepath_access_by_user(user, i)
self.assertTrue(obs)
qdb.study.Study(1).public_raw_download = False
# Test that it doesn't break: if the SampleTemplate hasn't been added
info = {
"timeseries_type_id": 1,
"metadata_complete": True,
"mixs_compliant": True,
"study_alias": "TestStudy",
"study_description": "Description of a test study",
"study_abstract": "No abstract right now...",
"principal_investigator_id": 1,
"lab_person_id": 1
}
study = qdb.study.Study.create(
qdb.user.User('[email protected]'), "Test study", info)
for i in [1, 2, 3, 4, 5, 9, 12, 17, 18, 19, 20, 21]:
obs = qdb.meta_util.validate_filepath_access_by_user(user, i)
if i < 3:
self.assertFalse(obs)
else:
self.assertTrue(obs)
# test in case there is a prep template that failed
qdb.sql_connection.perform_as_transaction(
"INSERT INTO qiita.prep_template (data_type_id) VALUES (2)")
for i in [1, 2, 3, 4, 5, 9, 12, 17, 18, 19, 20, 21]:
obs = qdb.meta_util.validate_filepath_access_by_user(user, i)
if i < 3:
self.assertFalse(obs)
else:
self.assertTrue(obs)
# admin should have access to everything
admin = qdb.user.User('[email protected]')
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT filepath_id FROM qiita.filepath")
fids = qdb.sql_connection.TRN.execute_fetchflatten()
for i in fids:
self.assertTrue(qdb.meta_util.validate_filepath_access_by_user(
admin, i))
# testing access to a prep info file without artifacts
# returning artifacts to private
self._set_artifact_private()
PT = qdb.metadata_template.prep_template.PrepTemplate
md_dict = {
'SKB8.640193': {'center_name': 'ANL',
'center_project_name': 'Test Project',
'ebi_submission_accession': None,
'linkerprimersequence': 'GTGCCAGCMGCCGCGGTAA',
'barcodesequence': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'Illumina',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}
}
md = pd.DataFrame.from_dict(md_dict, orient='index', dtype=str)
# creating prep info on Study(1), which is our default Study
pt = npt.assert_warns(qdb.exceptions.QiitaDBWarning, PT.create, md,
qdb.study.Study(1), "18S")
for idx, _ in pt.get_filepaths():
self.assertFalse(qdb.meta_util.validate_filepath_access_by_user(
user, idx))
# returning to original sharing
PT.delete(pt.id)
qdb.study.Study(1).share(user)
qdb.analysis.Analysis(1).share(user)
qdb.study.Study.delete(study.id)
def test_get_lat_longs(self):
# no public studies should return an empty array
obs = qdb.meta_util.get_lat_longs()
self.assertCountEqual(obs, [])
old_visibility = {}
for pt in qdb.study.Study(1).prep_templates():
old_visibility[pt] = pt.artifact.visibility
pt.artifact.visibility = 'public'
exp = [
[1, 74.0894932572, 65.3283470202],
[1, 57.571893782, 32.5563076447],
[1, 13.089194595, 92.5274472082],
[1, 12.7065957714, 84.9722975792],
[1, 44.9725384282, 66.1920014699],
[1, 10.6655599093, 70.784770579],
[1, 29.1499460692, 82.1270418227],
[1, 35.2374368957, 68.5041623253],
[1, 53.5050692395, 31.6056761814],
[1, 60.1102854322, 74.7123248382],
[1, 4.59216095574, 63.5115213108],
[1, 68.0991287718, 34.8360987059],
[1, 84.0030227585, 66.8954849864],
[1, 3.21190859967, 26.8138925876],
[1, 82.8302905615, 86.3615778099],
[1, 12.6245524972, 96.0693176066],
[1, 85.4121476399, 15.6526750776],
[1, 23.1218032799, 42.838497795],
[1, 43.9614715197, 82.8516734159],
[1, 68.51099627, 2.35063674718],
[1, 0.291867635913, 68.5945325743],
[1, 40.8623799474, 6.66444220187],
[1, 95.2060749748, 27.3592668624],
[1, 78.3634273709, 74.423907894],
[1, 38.2627021402, 3.48274264219]]
obs = qdb.meta_util.get_lat_longs()
self.assertCountEqual(obs, exp)
for k, v in old_visibility.items():
k.artifact.visibility = v
def test_get_lat_longs_EMP_portal(self):
info = {
'timeseries_type_id': 1,
'lab_person_id': None,
'principal_investigator_id': 3,
'metadata_complete': False,
'mixs_compliant': True,
'study_description': 'desc',
'study_alias': 'alias',
'study_abstract': 'abstract'}
study = qdb.study.Study.create(
qdb.user.User('[email protected]'), 'test_study_1', info=info)
qdb.portal.Portal('EMP').add_studies([study.id])
md = {
'my.sample': {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': True,
'dna_extracted': True,
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:51',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 4',
'str_column': 'Value for sample 4',
'int_column': 4,
'latitude': 42.42,
'longitude': 41.41,
'taxon_id': 9606,
'scientific_name': 'homo sapiens'}
}
md_ext = pd.DataFrame.from_dict(md, orient='index', dtype=str)
st = qdb.metadata_template.sample_template.SampleTemplate.create(
md_ext, study)
qiita_config.portal = 'EMP'
obs = qdb.meta_util.get_lat_longs()
exp = []
self.assertCountEqual(obs, exp)
qdb.metadata_template.sample_template.SampleTemplate.delete(st.id)
qdb.study.Study.delete(study.id)
def test_update_redis_stats(self):
# helper function to get the values in the stats_daily table
def _get_daily_stats():
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add('SELECT * FROM qiita.stats_daily')
return qdb.sql_connection.TRN.execute_fetchindex()
# checking empty status of stats in DB
self.assertEqual([], _get_daily_stats())
# generate daily stats
qdb.meta_util.update_redis_stats()
portal = qiita_config.portal
# let's first test the dictionaries
vals = [
('number_studies', {b'sandbox': b'0', b'public': b'0',
b'private': b'1'}, r_client.hgetall),
('number_of_samples', {b'sandbox': b'0', b'public': b'0',
b'private': b'27'}, r_client.hgetall),
('per_data_type_stats', {b'No data': b'0'}, r_client.hgetall)]
for k, exp, f in vals:
redis_key = '%s:stats:%s' % (portal, k)
self.assertDictEqual(f(redis_key), exp)
# then the unique values
vals = [
('num_users', b'4', r_client.get),
('lat_longs', b'[]', r_client.get),
('num_studies_ebi', b'1', r_client.get),
('num_samples_ebi', b'27', r_client.get),
('number_samples_ebi_prep', b'54', r_client.get),
('num_processing_jobs', b'14', r_client.get)
# not testing img/time for simplicity
# ('img', r_client.get),
# ('time', r_client.get)
]
# checking empty status of stats in DB
db_stats = _get_daily_stats()
# there should be only one set of values
self.assertEqual(1, len(db_stats))
db_stats = dict(db_stats[0])
for k, exp, f in vals:
redis_key = '%s:stats:%s' % (portal, k)
# checking redis values
self.assertEqual(f(redis_key), exp)
# checking DB values; note that redis stores all values as bytes,
# thus we have to convert what's in the DB to bytes
self.assertEqual(
f(redis_key), str.encode(str(db_stats['stats'][k])))
# regenerating stats to make sure that we have 2 rows in the DB
qdb.meta_util.update_redis_stats()
db_stats = _get_daily_stats()
# there should be only one set of values
self.assertEqual(2, len(db_stats))
def test_generate_biom_and_metadata_release(self):
level = 'private'
qdb.meta_util.generate_biom_and_metadata_release(level)
portal = qiita_config.portal
working_dir = qiita_config.working_dir
vals = [
('filepath', r_client.get),
('md5sum', r_client.get),
('time', r_client.get)]
# we are storing the [0] filepath, [1] md5sum and [2] time but we are
# only going to check the filepath contents so ignoring the others
tgz = vals[0][1]('%s:release:%s:%s' % (portal, level, vals[0][0]))
tgz = join(working_dir, tgz.decode('ascii'))
self.files_to_remove.extend([tgz])
tmp = topen(tgz, "r:gz")
tgz_obs = [ti.name for ti in tmp]
tmp.close()
# files names might change due to updates and patches so just check
# that the prefix exists.
fn = 'processed_data/1_study_1001_closed_reference_otu_table.biom'
self.assertTrue(fn in tgz_obs)
tgz_obs.remove(fn)
# yes, this file is there twice
self.assertTrue(fn in tgz_obs)
tgz_obs.remove(fn)
# let's check the next biom
fn = ('processed_data/1_study_1001_closed_reference_otu_table_Silva.'
'biom')
self.assertTrue(fn in tgz_obs)
tgz_obs.remove(fn)
# now let's check prep info files based on their suffix, just take
# the first one and check/rm the occurances of that file
fn_prep = [f for f in tgz_obs
if f.startswith('templates/1_prep_1_')][0]
# 3 times
self.assertTrue(fn_prep in tgz_obs)
tgz_obs.remove(fn_prep)
self.assertTrue(fn_prep in tgz_obs)
tgz_obs.remove(fn_prep)
self.assertTrue(fn_prep in tgz_obs)
tgz_obs.remove(fn_prep)
fn_sample = [f for f in tgz_obs if f.startswith('templates/1_')][0]
# 3 times
self.assertTrue(fn_sample in tgz_obs)
tgz_obs.remove(fn_sample)
self.assertTrue(fn_sample in tgz_obs)
tgz_obs.remove(fn_sample)
self.assertTrue(fn_sample in tgz_obs)
tgz_obs.remove(fn_sample)
# now we should only have the text file
txt = tgz_obs.pop()
# now it should be empty
self.assertEqual(tgz_obs, [])
tmp = topen(tgz, "r:gz")
fhd = tmp.extractfile(txt)
txt_obs = [line.decode('ascii') for line in fhd.readlines()]
tmp.close()
txt_exp = [
'biom fp\tsample fp\tprep fp\tqiita artifact id\tplatform\t'
'target gene\tmerging scheme\tartifact software\t'
'parent software\n',
'processed_data/1_study_1001_closed_reference_otu_table.biom\t'
'%s\t%s\t4\tIllumina\t16S rRNA\t'
'Pick closed-reference OTUs | Split libraries FASTQ\t'
'QIIME v1.9.1\tQIIME v1.9.1\n' % (fn_sample, fn_prep),
'processed_data/1_study_1001_closed_reference_otu_table.biom\t'
'%s\t%s\t5\tIllumina\t16S rRNA\t'
'Pick closed-reference OTUs | Split libraries FASTQ\t'
'QIIME v1.9.1\tQIIME v1.9.1\n' % (fn_sample, fn_prep),
'processed_data/1_study_1001_closed_reference_otu_table_Silva.bio'
'm\t%s\t%s\t6\tIllumina\t16S rRNA\t'
'Pick closed-reference OTUs | Split libraries FASTQ\t'
'QIIME v1.9.1\tQIIME v1.9.1' % (fn_sample, fn_prep)]
self.assertEqual(txt_obs, txt_exp)
# whatever the configuration was, we will change to settings so we can
# test the other option when dealing with the end '/'
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT base_data_dir FROM settings")
obdr = qdb.sql_connection.TRN.execute_fetchlast()
if obdr[-1] == '/':
bdr = obdr[:-1]
else:
bdr = obdr + '/'
qdb.sql_connection.TRN.add(
"UPDATE settings SET base_data_dir = '%s'" % bdr)
bdr = qdb.sql_connection.TRN.execute()
qdb.meta_util.generate_biom_and_metadata_release(level)
# we are storing the [0] filepath, [1] md5sum and [2] time but we are
# only going to check the filepath contents so ignoring the others
tgz = vals[0][1]('%s:release:%s:%s' % (portal, level, vals[0][0]))
tgz = join(working_dir, tgz.decode('ascii'))
tmp = topen(tgz, "r:gz")
tgz_obs = [ti.name for ti in tmp]
tmp.close()
# files names might change due to updates and patches so just check
# that the prefix exists.
fn = 'processed_data/1_study_1001_closed_reference_otu_table.biom'
self.assertTrue(fn in tgz_obs)
tgz_obs.remove(fn)
# yes, this file is there twice
self.assertTrue(fn in tgz_obs)
tgz_obs.remove(fn)
# let's check the next biom
fn = ('processed_data/1_study_1001_closed_reference_otu_table_Silva.'
'biom')
self.assertTrue(fn in tgz_obs)
tgz_obs.remove(fn)
# now let's check prep info files based on their suffix, just take
# the first one and check/rm the occurances of that file
fn_prep = [f for f in tgz_obs
if f.startswith('templates/1_prep_1_')][0]
# 3 times
self.assertTrue(fn_prep in tgz_obs)
tgz_obs.remove(fn_prep)
self.assertTrue(fn_prep in tgz_obs)
tgz_obs.remove(fn_prep)
self.assertTrue(fn_prep in tgz_obs)
tgz_obs.remove(fn_prep)
fn_sample = [f for f in tgz_obs if f.startswith('templates/1_')][0]
# 3 times
self.assertTrue(fn_sample in tgz_obs)
tgz_obs.remove(fn_sample)
self.assertTrue(fn_sample in tgz_obs)
tgz_obs.remove(fn_sample)
self.assertTrue(fn_sample in tgz_obs)
tgz_obs.remove(fn_sample)
# now we should only have the text file
txt = tgz_obs.pop()
# now it should be empty
self.assertEqual(tgz_obs, [])
tmp = topen(tgz, "r:gz")
fhd = tmp.extractfile(txt)
txt_obs = [line.decode('ascii') for line in fhd.readlines()]
tmp.close()
txt_exp = [
'biom fp\tsample fp\tprep fp\tqiita artifact id\tplatform\t'
'target gene\tmerging scheme\tartifact software\t'
'parent software\n',
'processed_data/1_study_1001_closed_reference_otu_table.biom\t'
'%s\t%s\t4\tIllumina\t16S rRNA\t'
'Pick closed-reference OTUs | Split libraries FASTQ\t'
'QIIME v1.9.1\tQIIME v1.9.1\n' % (fn_sample, fn_prep),
'processed_data/1_study_1001_closed_reference_otu_table.biom\t'
'%s\t%s\t5\tIllumina\t16S rRNA\t'
'Pick closed-reference OTUs | Split libraries FASTQ\t'
'QIIME v1.9.1\tQIIME v1.9.1\n' % (fn_sample, fn_prep),
'processed_data/1_study_1001_closed_reference_otu_table_Silva.bio'
'm\t%s\t%s\t6\tIllumina\t16S rRNA\t'
'Pick closed-reference OTUs | Split libraries FASTQ'
'\tQIIME v1.9.1\tQIIME v1.9.1' % (fn_sample, fn_prep)]
self.assertEqual(txt_obs, txt_exp)
# returning configuration
qdb.sql_connection.perform_as_transaction(
"UPDATE settings SET base_data_dir = '%s'" % obdr)
# testing public/default release
qdb.meta_util.generate_biom_and_metadata_release()
# we are storing the [0] filepath, [1] md5sum and [2] time but we are
# only going to check the filepath contents so ignoring the others
tgz = vals[0][1]('%s:release:%s:%s' % (portal, 'public', vals[0][0]))
tgz = join(working_dir, tgz.decode('ascii'))
tmp = topen(tgz, "r:gz")
tgz_obs = [ti.name for ti in tmp]
tmp.close()
# the public release should only have the txt file
self.assertEqual(len(tgz_obs), 1)
txt = tgz_obs.pop()
tmp = topen(tgz, "r:gz")
fhd = tmp.extractfile(txt)
txt_obs = [line.decode('ascii') for line in fhd.readlines()]
tmp.close()
# we should only get the header
txt_exp = [
'biom fp\tsample fp\tprep fp\tqiita artifact id\tplatform\t'
'target gene\tmerging scheme\tartifact software\t'
'parent software']
self.assertEqual(txt_obs, txt_exp)
def test_generate_plugin_releases(self):
qdb.meta_util.generate_plugin_releases()
working_dir = qiita_config.working_dir
tgz = r_client.get('release-archive:filepath')
with topen(join(working_dir, tgz.decode('ascii')), "r:gz") as tmp:
tgz_obs = [ti.name for ti in tmp]
# the expected folder/file in the tgz should be named as the time
# when it was created so let's test that
time = r_client.get('release-archive:time').decode('ascii').replace(
'-', '').replace(':', '').replace(' ', '-')
self.assertEqual(tgz_obs, [time])
if __name__ == '__main__':
main()
| bsd-3-clause |
o-kei/design-computing-aij | ch5/plot.py | 1 | 1879 | import numpy as np # モジュールnumpyをnpという名前で読み込み
import csv # モジュールcsvの読み込み
import matplotlib.pyplot as plt # モジュールmatplotlibのpyplot関数をplt
# という名前で読み込み
reader = csv.reader(open('out.csv', 'r')) # 先ほど出力したoutput.csvの読み込み
f_history = [] # 目的関数の履歴
x1_history, x2_history = [], [] # 設計変数の履歴
for row in reader: # 1行目はラベル行なので読み飛ばし
break
for row in reader:
f_history.append(float(row[1])) # 目的関数の読み込み
x1_history.append(float(row[2])) # 設計変数の読み込み
x2_history.append(float(row[3]))
plt.figure(figsize=(15, 8)) # グラフ描画キャンバスを横縦比15:8で生成
x1 = np.arange(1.25, 4.75, 0.1) # 1.25〜4.75まで0.1刻みのベクトル
x2 = np.arange(0.25, 3.75, 0.1) # 0.25〜3.75まで0.1刻みのベクトル
X1, X2 = np.meshgrid(x1, x2) # x1,x2を組み合わせた行列
f = np.vectorize(lambda x1, x2: 0.50 * (x1 - 3.0) **
2 + (x2 - 2.0)**2) # x1,x2を引数として
# 目的関数を返す関数
plt.subplot(1, 2, 1) # 1行目の2列の並びの1列目にグラフを生成
plt.xlabel('x1') # 水平方向のラベル
plt.ylabel('x2') # 鉛直方向のラベル
C = plt.contour(X1, X2, f(X1, X2), 20, colors='black') # 等高線データ生成
plt.clabel(C, inline=1, fontsize=10) # 等高線図生成
plt.plot(x1_history, x2_history) # 目的関数の探索経路生成
plt.subplot(1, 2, 2) # 1行目の2列の並びの2列目にグラフを生成
plt.xlabel('step') # 水平方向のラベル
plt.ylabel('f(x)') # 鉛直方向のラベル
plt.plot(f_history) # 目的関数の履歴図の生成
# (x成分を省略すれば自動的に横軸はstep数となる)
plt.show() # グラフを画面に表示する
| mit |
Tong-Chen/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 8 | 1721 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import pylab as pl
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
pl.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
pl.legend()
pl.title('PCA of IRIS dataset')
pl.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
pl.legend()
pl.title('LDA of IRIS dataset')
pl.show()
| bsd-3-clause |
thesuperzapper/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 30 | 4292 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using convolutional networks over characters for
DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a ReLU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
fengzhyuan/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
aceofwings/Evt-Gateway | applications/vechicleIDS/controllers/maincontroller.py | 1 | 3214 | from gateway.can.controllers.base import BaseController
from gateway.core.service import Service
from gateway.core.service import Service
from threading import Thread
from collections import deque
from matplotlib import pyplot, animation
from lib import changepoint
import numpy as np
import time
pyplot.ion()
windowsize = 50
SUPPOSED_MESSAGE_FREQUENY = 50 #hz
THREAH_HOLD_LOW = 45 #hz
TREASH_HOLD_HIGH = 55 #hz
XMAX = 1000
class MainController(BaseController):
allotment = [0] * 3000
runningavg= [0] * int((XMAX/windowsize))
runningvariance = [0] * int((XMAX/windowsize))
runningavgx= [i for i in range(0,XMAX,windowsize)]
cps = [0] * 3000
lastmessage = None
running_avg = 0
current = None
last = None
num_of_message = 0
n = 0
n2 = 0
xyline = 0
def __init__(self):
print("intialize MainController")
self.firstMessage = True
self.secondMessage = True
self.figure = pyplot.figure()
self.graph = self.figure.add_subplot(111)
self.plot , = self.graph.plot(self.allotment)
self.runningplot , = self.graph.plot(self.runningavg)
self.animation = animation.FuncAnimation(self.figure,self.update)
pyplot.ylim(0,10)
pyplot.xlim(0,200)
pyplot.title('50 message windows', fontsize=18)
pyplot.xlabel('Samples',fontsize=18)
pyplot.ylabel('# of Messages', fontsize=18)
#service = Service(target=evtcurses.startCurses,clean_up=self.end)
# pyplot.show()
@BaseController.handleEvt(0x81)
def handleMessage(self,message):
message.Cell_no0()
if self.last is None:
self.last = time.time()
if self.current is None:
self.current = time.time()
self.num_of_message += 1
if self.current - self.last > .05:
self.last = self.current
self.allotment[self.n] = self.num_of_message
self.num_of_message = 0
self.simple_move_average()
self.n += 1
self.current = time.time()
def simple_move_average(self):
if self.n % windowsize == 0:
window = self.allotment[(self.n - windowsize): self.n ]
if len(window) != 0:
self.runningavg[self.n2] = sum(window) / len(window)
# self.runningvariance[self.n2] = sum([(xi - self.runningavg[self.n2]) ** 2 for xi in window]) / len(window)
changepoints = changepoint.pelt(changepoint.normal_mean(window,2), len(window))
if len(changepoints) > 1:
self.cps[self.n2] = self.runningavgx[self.n2]
self.n2 += 1
def update(self,void):
self.plot.set_ydata(self.allotment)
self.runningplot.set_ydata(self.runningavg)
self.runningplot.set_xdata(self.runningavgx)
self.graph.vlines(self.cps,0,20)
# def running_avg_linear_weight(self):
# """take the arithmatic average """
# sum = 0
# for i in range(len(self.allotment)):
# sum += self.allotment[i]
# self.running_avg = (sum / len(self.allotment))
# #print(self.running_avg)
def end(self):
print("ended")
| mit |
apmoore1/semeval | svrs/feature_extractors/Tokeniser.py | 1 | 1328 | from semeval import helper as helper
from sklearn.base import TransformerMixin
from sklearn.base import BaseEstimator
class Tokeniser(BaseEstimator, TransformerMixin):
def __init__(self, ngram_range=(1,1), tokeniser_func=helper.unitok_tokens):
self.ngram_range = ngram_range
self.tokeniser_func = tokeniser_func
def fit(self, texts, y=None):
'''Kept for consistnecy with the TransformerMixin'''
return self
def fit_transform(self, texts, y=None):
'''See self.transform'''
return self.transform(texts)
def transform(self, texts):
'''Given a list of texts it will return a list of lists contain
Strings of which these are tokens of the text. Note that the index of
the tokens list matches the index of which the list of texts was given
e.g. texts[0] tokens are within the output[0].
The tokeniser is defined by self.tokeniser and stopwords can be applied
by setting self.stopword and normalising the text can be done through
self.normalise. All of this is an interface to:
SentimentPipeline.pre_processing.process method tokenise'''
tokens = [self.tokeniser_func(text) for text in texts]
n_gram_tokens = helper.ngrams(tokens, self.ngram_range)
return n_gram_tokens
| gpl-3.0 |
paladin74/neural-network-animation | matplotlib/bezier.py | 10 | 15695 | """
A module providing some utility functions regarding bezier path manipulation.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.path import Path
from operator import xor
import warnings
class NonIntersectingPathException(ValueError):
pass
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
""" return a intersecting point between a line through (cx1, cy1)
and having angle t1 and a line through (cx2, cy2) and angle t2.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a * d - b * c
if ad_bc == 0.:
raise ValueError("Given lines do not intersect")
#rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
x = a_ * line1_rhs + b_ * line2_rhs
y = c_ * line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having a angle *t*, return
locations of the two points located along its perpendicular line at the
distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length * cos_t1 + cx, length * sin_t1 + cy
x2, y2 = length * cos_t2 + cx, length * sin_t2 + cy
return x1, y1, x2, y2
## BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1 - t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""split a bezier segment defined by its controlpoints *beta*
into two separate segment divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
# FIXME spelling mistake in the name of the parameter ``tolerence``
def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
t0=0., t1=1., tolerence=0.01):
""" Find a parameter t0 and t1 of the given bezier path which
bounds the intersecting points with a provided closed
path(*inside_closedpath*). Search starts from *t0* and *t1* and it
uses a simple bisecting algorithm therefore one of the end point
must be inside the path while the orther doesn't. The search stop
when |t0-t1| gets smaller than the given tolerence.
value for
- bezier_point_at_t : a function which returns x, y coordinates at *t*
- inside_closedpath : return True if the point is insed the path
"""
# inside_closedpath : function
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if not xor(start_inside, end_inside):
raise NonIntersectingPathException(
"the segment does not seem to intersect with the path")
while 1:
# return if the distance is smaller than the tolerence
if (start[0] - end[0]) ** 2 + \
(start[1] - end[1]) ** 2 < tolerence ** 2:
return t0, t1
# calculate the middle point
middle_t = 0.5 * (t0 + t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if xor(start_inside, middle_inside):
t1 = middle_t
end = middle
end_inside = middle_inside
else:
t0 = middle_t
start = middle
start_inside = middle_inside
class BezierSegment(object):
"""
A simple class of a 2-dimensional bezier segment
"""
# Higher order bezier lines can be supported by simplying adding
# corresponding values.
_binom_coeff = {1: np.array([1., 1.]),
2: np.array([1., 2., 1.]),
3: np.array([1., 3., 3., 1.])}
def __init__(self, control_points):
"""
*control_points* : location of contol points. It needs have a
shpae of n * 2, where n is the order of the bezier line. 1<=
n <= 3 is supported.
"""
_o = len(control_points)
self._orders = np.arange(_o)
_coeff = BezierSegment._binom_coeff[_o - 1]
_control_points = np.asarray(control_points)
xx = _control_points[:, 0]
yy = _control_points[:, 1]
self._px = xx * _coeff
self._py = yy * _coeff
def point_at_t(self, t):
"evaluate a point at t"
one_minus_t_powers = np.power(1. - t, self._orders)[::-1]
t_powers = np.power(t, self._orders)
tt = one_minus_t_powers * t_powers
_x = sum(tt * self._px)
_y = sum(tt * self._py)
return _x, _y
def split_bezier_intersecting_with_closedpath(bezier,
inside_closedpath,
tolerence=0.01):
"""
bezier : control points of the bezier segment
inside_closedpath : a function which returns true if the point is inside
the path
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
tolerence=tolerence)
_left, _right = split_de_casteljau(bezier, (t0 + t1) / 2.)
return _left, _right
def find_r_to_boundary_of_closedpath(inside_closedpath, xy,
cos_t, sin_t,
rmin=0., rmax=1., tolerence=0.01):
"""
Find a radius r (centered at *xy*) between *rmin* and *rmax* at
which it intersect with the path.
inside_closedpath : function
cx, cy : center
cos_t, sin_t : cosine and sine for the angle
rmin, rmax :
"""
cx, cy = xy
def _f(r):
return cos_t * r + cx, sin_t * r + cy
find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,
t0=rmin, t1=rmax,
tolerence=tolerence)
## matplotlib specific
def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):
""" divide a path into two segment at the point where inside(x, y)
becomes False.
"""
path_iter = path.iter_segments()
ctl_points, command = next(path_iter)
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
bezier_path = None
ctl_points_old = ctl_points
concat = np.concatenate
iold = 0
i = 1
for ctl_points, command in path_iter:
iold = i
i += len(ctl_points) // 2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = concat([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
if bezier_path is None:
raise ValueError("The path does not seem to intersect with the patch")
bp = list(zip(bezier_path[::2], bezier_path[1::2]))
left, right = split_bezier_intersecting_with_closedpath(bp,
inside,
tolerence)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise ValueError()
verts_left = left[1:]
verts_right = right[:]
#i += 1
if path.codes is None:
path_in = Path(concat([path.vertices[:i], verts_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]))
else:
path_in = Path(concat([path.vertices[:iold], verts_left]),
concat([path.codes[:iold], codes_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]),
concat([codes_right, path.codes[i:]]))
if reorder_inout and begin_inside == False:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
r2 = r ** 2
def _f(xy):
x, y = xy
return (x - cx) ** 2 + (y - cy) ** 2 < r2
return _f
# quadratic bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1 - x0, y1 - y0
d = (dx * dx + dy * dy) ** .5
return dx / d, dy / d
def check_if_parallel(dx1, dy1, dx2, dy2, tolerence=1.e-5):
""" returns
* 1 if two lines are parralel in same direction
* -1 if two lines are parralel in opposite direction
* 0 otherwise
"""
theta1 = np.arctan2(dx1, dy1)
theta2 = np.arctan2(dx2, dy2)
dtheta = np.abs(theta1 - theta2)
if dtheta < tolerence:
return 1
elif np.abs(dtheta - np.pi) < tolerence:
return -1
else:
return False
def get_parallels(bezier2, width):
"""
Given the quadratic bezier control points *bezier2*, returns
control points of quadratic bezier lines roughly parallel to given
one separated by *width*.
"""
# The parallel bezier lines are constructed by following ways.
# c1 and c2 are contol points representing the begin and end of the
# bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
parallel_test = check_if_parallel(c1x - cmx, c1y - cmy,
cmx - c2x, cmy - c2y)
if parallel_test == -1:
warnings.warn(
"Lines do not intersect. A straight line is used instead.")
#cmx, cmy = 0.5*(c1x+c2x), 0.5*(c1y+c2y)
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, c2x, c2y)
cos_t2, sin_t2 = cos_t1, sin_t1
else:
# t1 and t2 is the angle between c1 and cm, cm, c2. They are
# also a angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
c2x_left, c2y_left, c2x_right, c2y_right = \
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
# find cm_left which is the intersectng point of a line through
# c1_left with angle t1 and a line throught c2_left with angle
# t2. Same with cm_right.
if parallel_test != 0:
# a special case for a straight line, i.e., angle between two
# lines are smaller than some (arbitrtay) value.
cmx_left, cmy_left = \
0.5 * (c1x_left + c2x_left), 0.5 * (c1y_left + c2y_left)
cmx_right, cmy_right = \
0.5 * (c1x_right + c2x_right), 0.5 * (c1y_right + c2y_right)
else:
cmx_left, cmy_left = \
get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,
c2x_left, c2y_left, cos_t2, sin_t2)
cmx_right, cmy_right = \
get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,
c2x_right, c2y_right, cos_t2, sin_t2)
# the parralel bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left),
(cmx_left, cmy_left),
(c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right),
(cmx_right, cmy_right),
(c2x_right, c2y_right)]
return path_left, path_right
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
""" Find control points of the bezier line throught c1, mm, c2. We
simply assume that c1, mm, c2 which have parametric value 0, 0.5, and 1.
"""
cmx = .5 * (4 * mmx - (c1x + c2x))
cmy = .5 * (4 * mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to get_parallels, returns control points of two quadrativ
bezier lines having a width roughly parralel to given one separated by
*width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c3.
# They are also a angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width * w1)
c3x_left, c3y_left, c3x_right, c3y_right = \
get_normal_points(c3x, c3y, cos_t2, sin_t2, width * w2)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and
# c12-c23
c12x, c12y = (c1x + cmx) * .5, (c1y + cmy) * .5
c23x, c23y = (cmx + c3x) * .5, (cmy + c3y) * .5
c123x, c123y = (c12x + c23x) * .5, (c12y + c23y) * .5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = \
get_normal_points(c123x, c123y, cos_t123, sin_t123, width * wm)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
def make_path_regular(p):
"""
fill in the codes if None.
"""
c = p.codes
if c is None:
c = np.empty(p.vertices.shape[:1], "i")
c.fill(Path.LINETO)
c[0] = Path.MOVETO
return Path(p.vertices, c)
else:
return p
def concatenate_paths(paths):
"""
concatenate list of paths into a single path.
"""
vertices = []
codes = []
for p in paths:
p = make_path_regular(p)
vertices.append(p.vertices)
codes.append(p.codes)
_path = Path(np.concatenate(vertices),
np.concatenate(codes))
return _path
| mit |
dimitri-yatsenko/pipeline | python/pipeline/utils/eye_tracking.py | 1 | 45937 | from collections import defaultdict
from itertools import count
from operator import attrgetter
from os import path as op
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tqdm import tqdm
import pickle
from ..exceptions import PipelineException
try:
import cv2
except ImportError:
print("Could not find cv2. You won't be able to use the pupil tracker.")
ANALOG_PACKET_LEN = 2000
class CVROIGrabber:
start = None
end = None
roi = None
def __init__(self, img):
self.img = img
self.draw_img = np.asarray(img / img.max(), dtype=float)
self.mask = 1 + 0 * img
self.exit = False
self.r = 40
self.X, self.Y = np.mgrid[:img.shape[0], :img.shape[1]]
def grab(self):
print('Contrast (std)', np.std(self.img))
img = np.asarray(self.img / self.img.max(), dtype=float)
cv2.namedWindow('real image')
cv2.setMouseCallback('real image', self, 0)
while not self.exit:
cv2.imshow('real image', img)
if (cv2.waitKey(0) & 0xFF) == ord('q'):
cv2.waitKey(1)
cv2.destroyAllWindows()
break
cv2.waitKey(2)
def __call__(self, event, x, y, flags, params):
# img = np.asarray(self.img , dtype=np.uint8)[...,None] * np.ones((1,1,3), dtype=np.uint8)
img = np.asarray(self.img / self.img.max(), dtype=float)
cv2.imshow('real image', self.draw_img)
if event == cv2.EVENT_LBUTTONDOWN:
print('Start Mouse Position: ' + str(x) + ', ' + str(y))
self.start = np.asarray([x, y])
elif event == cv2.EVENT_LBUTTONUP:
self.end = np.asarray([x, y])
x = np.vstack((self.start, self.end))
tmp = np.hstack((x.min(axis=0), x.max(axis=0)))
roi = np.asarray([[tmp[1], tmp[3]], [tmp[0], tmp[2]]], dtype=int) + 1
crop = img[roi[0, 0]:roi[0, 1], roi[1, 0]:roi[1, 1]]
crop = np.asarray(crop / crop.max(), dtype=float)
self.roi = roi
cv2.imshow('crop', crop)
# m = (img * self.mask).copy() # needed for a weird reason
self.draw_img = (img * self.mask).copy()
cv2.rectangle(self.draw_img, tuple(self.start), tuple(self.end), (0, 255, 0), 2)
cv2.imshow('real image', self.draw_img)
key = (cv2.waitKey(0) & 0xFF)
if key == ord('q'):
cv2.destroyAllWindows()
self.exit = True
elif key == ord('c'):
self.mask = 0 * self.mask + 1
elif event == cv2.EVENT_MBUTTONDOWN:
self.mask[(self.X - y) ** 2 + (self.Y - x) ** 2 < self.r ** 2] = 0.
self.draw_img[(self.X - y) ** 2 + (self.Y - x) ** 2 < self.r ** 2] = 0.
cv2.imshow('real image', self.draw_img)
key = (cv2.waitKey(0) & 0xFF)
if key == ord('q'):
cv2.destroyAllWindows()
self.exit = True
elif key == ord('c'):
self.mask = 0 * self.mask + 1
import math
class Point:
""" A point in a 2-d figure. """
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def is_near(self, x, y, thresh=4):
distance = math.sqrt((self.x - x)**2 + (self.y - y)**2)
return distance < thresh
def __repr__(self):
return 'Point({}, {})'.format(self.x, self.y)
class ROISelector:
""" Matplotlib interface to select an ROI from an image
Arguments:
image (np.array): A 2-d image to use for background.
Usage:
roi_selector = ROISelector(img) # opens a window that lets you select an ROI
(x1, y1), (x2, y2) = roi_selector.roi # P1 is always the upper left corner and P2 is the lower right one
"""
def __init__(self, image):
self.image = image
self.point1 = None
self.point2 = None
self.current = None
# Create figure
fig = plt.figure()
plt.imshow(image)
plt.gca().set_aspect('equal')
plt.gray()
plt.title('Click and drag to select ROI. Press <ENTER> to save.')
# Bind events
fig.canvas.mpl_connect('button_press_event', self.on_click)
fig.canvas.mpl_connect('motion_notify_event', self.on_move)
fig.canvas.mpl_connect('button_release_event', self.on_release)
fig.canvas.mpl_connect('key_press_event', self.on_press)
plt.show(block=True)
@property
def roi(self):
if self.point1 is None or self.point2 is None:
raise ValueError('No ROI was drawn')
else:
points = np.sort([[self.point1.x, self.point1.y],
[self.point2.x, self.point2.y]], axis=0) + 0.5
# we add 0.5 to have the upper corner be (0, 0) rather than (-0.5, -0.5)
return tuple(points[0]), tuple(points[1])
def on_click(self, event):
""" Start a new ROI or modify a previously drawn ROI"""
if event.xdata is not None and event.ydata is not None:
first_click = self.point1 is None or self.point2 is None
if (first_click or not (self.point1.is_near(event.xdata, event.ydata) or
self.point2.is_near(event.xdata, event.ydata))):
self.point1 = Point(event.xdata, event.ydata)
self.point2 = Point(event.xdata, event.ydata)
self.current = self.point2
else: # click is close to a previous point
self.current = (self.point2 if self.point2.is_near(event.xdata, event.ydata)
else self.point1)
self.current.x = event.xdata
self.current.y = event.ydata
self.redraw()
def on_move(self, event):
""" Update the current point if it is being dragged. """
if (self.current is not None and event.xdata is not None and
event.ydata is not None):
self.current.x = event.xdata
self.current.y = event.ydata
self.redraw()
def on_release(self, event):
""" Release the current point."""
self.current = None
self.redraw()
def on_press(self, event):
""" Close window if <ENTER> is pressed."""
if event.key == 'enter':
plt.close()
def redraw(self):
""" Draw points and a rectangle between them"""
plt.gca().clear()
plt.title('Click and drag to select ROI. Press <ENTER> to save.')
plt.imshow(self.image)
self.draw_rectangle(self.point1, self.point2, color=('dodgerblue' if self.current
else 'lime'))
plt.draw()
def draw_rectangle(self, p1, p2, color='dodgerblue'):
low_x, high_x = (p1.x, p2.x) if p1.x <= p2.x else (p2.x, p1.x)
low_y, high_y = (p1.y, p2.y) if p1.y <= p2.y else (p2.y, p1.y)
plt.plot([low_x, low_x], [low_y, high_y], color=color, lw=2)
plt.plot([high_x, high_x], [low_y, high_y], color=color, lw=2)
plt.plot([low_x, high_x], [low_y, low_y], color=color, lw=2)
plt.plot([low_x, high_x], [high_y, high_y], color=color, lw=2)
plt.plot(p1.x, p1.y, 'ok', mfc='gold')
plt.plot(p2.x, p2.y, 'ok', mfc='deeppink')
class PointLabeler:
""" Matplotlib interface to label points in an image.
Arguments:
image (np.array): A 2-d image to use for background.
percentile (float): Higher percentile used to clip the image to improve contrast.
Usage:
point_labeler = PointLabeler(img) # opens a window that lets you select an ROI
[[p1.x, p1.y], [p2.x, p2.y], ...] = point_labeler.points
"""
def __init__(self, image, percentile=100):
self.image = image
self._points = []
self.current = None
self.percentile = percentile
self._vmax = np.percentile(image, percentile) # vmax send to plt.imshow()
# Create figure
fig = plt.figure(figsize=(12, 12))
plt.imshow(image, vmax=self._vmax)
plt.gca().set_aspect('equal')
plt.gray()
plt.title('Click/drag points. Press d to delete last point, <ENTER> to save.')
# Bind events
fig.canvas.mpl_connect('button_press_event', self.on_click)
fig.canvas.mpl_connect('button_release_event', self.on_release)
fig.canvas.mpl_connect('key_press_event', self.on_press)
plt.show(block=True)
@property
def points(self):
return [[p.x + 0.5, p.y + 0.5] for p in self._points] # 0.5 to have the upper corner be (0, 0) rather than (-0.5, -0.5)
def on_click(self, event):
""" Create a new point or select a previous point. """
if event.xdata is not None and event.ydata is not None:
nearby_point = [p.is_near(event.xdata, event.ydata) for p in self._points]
if len(self._points) == 0 or not any(nearby_point):
new_point = Point()
self._points.append(new_point)
self.current = new_point
else:
self.current = self._points[nearby_point.index(True)]
def on_release(self, event):
""" Save point and release."""
if (self.current is not None and event.xdata is not None and
event.ydata is not None):
self.current.x = event.xdata
self.current.y = event.ydata
self.current = None
self.redraw()
def on_press(self, event):
""" Close window if <ENTER> is pressed."""
if event.key == 'enter':
plt.close()
if event.key == 'd':
if len(self._points) > 0:
self._points.pop()
self.redraw()
if event.key == '=' or event.key == '-':
self.percentile += (1 if event.key == '-' else -1)
self.percentile = np.clip(self.percentile, 0, 100)
self._vmax = np.percentile(self.image, self.percentile)
self.redraw()
def redraw(self):
""" Draw the points and lines between them. """
plt.gca().clear()
plt.title('Click/drag points. Press d to delete last point, <ENTER> to save.')
plt.imshow(self.image, vmax=self._vmax)
for i, p in enumerate(self._points):
plt.plot(p.x, p.y, 'ok', mfc='C{}'.format(i%10))
for p1, p2 in zip(self._points[:-1], self._points[1:]):
plt.plot([p1.x, p2.x], [p1.y, p2.y], color='lime', lw=1.5)
plt.draw()
class PupilTracker:
"""
Parameters:
perc_high : float # upper percentile for bright pixels
perc_low : float # lower percentile for dark pixels
perc_weight : float # threshold will be perc_weight*perc_low + (1- perc_weight)*perc_high
relative_area_threshold : float # enclosing rotating rectangle has to have at least that amount of area
ratio_threshold : float # ratio of major and minor radius cannot be larger than this
error_threshold : float # threshold on the RMSE of the ellipse fit
min_contour_len : int # minimal required contour length (must be at least 5)
margin : float # relative margin the pupil center should not be in
contrast_threshold : float # contrast below that threshold are considered dark
speed_threshold : float # eye center can at most move that fraction of the roi between frames
dr_threshold : float # maximally allow relative change in radius
"""
def __init__(self, param, mask=None):
self._params = param
self._center = None
self._radius = None
self._mask = mask
self._last_detection = 1
self._last_ellipse = None
@staticmethod
def goodness_of_fit(contour, ellipse):
center, size, angle = ellipse
angle *= np.pi / 180
err = 0
for coord in contour.squeeze().astype(np.float):
posx = (coord[0] - center[0]) * np.cos(-angle) - (coord[1] - center[1]) * np.sin(-angle)
posy = (coord[0] - center[0]) * np.sin(-angle) + (coord[1] - center[1]) * np.cos(-angle)
err += ((posx / size[0]) ** 2 + (posy / size[1]) ** 2 - 0.25) ** 2
return np.sqrt(err / len(contour))
@staticmethod
def restrict_to_long_axis(contour, ellipse, corridor):
center, size, angle = ellipse
angle *= np.pi / 180
R = np.asarray([[np.cos(-angle), - np.sin(-angle)], [np.sin(-angle), np.cos(-angle)]])
contour = np.dot(contour.squeeze() - center, R.T)
contour = contour[np.abs(contour[:, 0]) < corridor * ellipse[1][1] / 2]
return (np.dot(contour, R) + center).astype(np.int32)
def get_pupil_from_contours(self, contours, small_gray, mask, show_matching=5):
ratio_thres = self._params['ratio_threshold']
area_threshold = self._params['relative_area_threshold']
error_threshold = self._params['error_threshold']
min_contour = self._params['min_contour_len']
margin = self._params['margin']
speed_thres = self._params['speed_threshold']
dr_thres = self._params['dr_threshold']
err = np.inf
best_ellipse = None
best_contour = None
kernel = np.ones((3, 3))
results, cond = defaultdict(list), defaultdict(list)
for j, cnt in enumerate(contours):
mask2 = cv2.erode(mask, kernel, iterations=1)
idx = mask2[cnt[..., 1], cnt[..., 0]] > 0
cnt = cnt[idx]
if len(cnt) < min_contour: # otherwise fitEllipse won't work
continue
ellipse = cv2.fitEllipse(cnt)
((x, y), axes, angle) = ellipse
if min(axes) == 0: # otherwise ratio won't work
continue
ratio = max(axes) / min(axes)
area = np.prod(ellipse[1]) / np.prod(small_gray.shape)
curr_err = self.goodness_of_fit(cnt, ellipse)
results['ratio'].append(ratio)
results['area'].append(area)
results['rmse'].append(curr_err)
results['x coord'].append(x / small_gray.shape[1])
results['y coord'].append(y / small_gray.shape[0])
center = np.array([x / small_gray.shape[1], y / small_gray.shape[0]])
r = max(axes)
dr = 0 if self._radius is None else np.abs(r - self._radius) / self._radius
dx = 0 if self._center is None else np.sqrt(np.sum((center - self._center) ** 2))
results['dx'].append(dx)
results['dr/r'].append(dr)
matching_conditions = 1 * (ratio <= ratio_thres) + 1 * (area >= area_threshold) \
+ 1 * (curr_err < error_threshold) \
+ 1 * (margin < center[0] < 1 - margin) \
+ 1 * (margin < center[1] < 1 - margin) \
+ 1 * (dx < speed_thres * self._last_detection) \
+ 1 * (dr < dr_thres * self._last_detection)
cond['ratio'].append(ratio <= ratio_thres)
cond['area'].append(area >= area_threshold)
cond['rmse'].append(curr_err < error_threshold)
cond['x coord'].append(margin < center[0] < 1 - margin)
cond['y coord'].append(margin < center[1] < 1 - margin)
cond['dx'].append(dx < speed_thres * self._last_detection)
cond['dr/r'].append(dr < dr_thres * self._last_detection)
results['conditions'] = matching_conditions
cond['conditions'].append(True)
if curr_err < err and matching_conditions == 7:
best_ellipse = ellipse
best_contour = cnt
err = curr_err
cv2.ellipse(small_gray, ellipse, (0, 0, 255), 2)
elif matching_conditions >= show_matching:
cv2.ellipse(small_gray, ellipse, (255, 0, 0), 2)
if best_ellipse is None:
df = pd.DataFrame(results)
df2 = pd.DataFrame(cond)
print('-', end="", flush=True)
if 'conditions' in df.columns and np.any(df['conditions'] >= show_matching):
idx = df['conditions'] >= show_matching
df = df[idx]
df2 = df2[idx]
df[df2] = np.nan
print("\n", df, flush=True)
self._last_detection += 1
else:
self._last_detection = 1
return best_contour, best_ellipse
_running_avg = None
def preprocess_image(self, frame, eye_roi):
h = int(self._params['gaussian_blur'])
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img_std = np.std(gray)
small_gray = gray[slice(*eye_roi[0]), slice(*eye_roi[1])]
# Manual meso settins
if 'extreme_meso' in self._params and self._params['extreme_meso']:
c = self._params['running_avg']
p = self._params['exponent']
if self._running_avg is None:
self._running_avg = np.array(small_gray / 255) ** p * 255
else:
self._running_avg = c * np.array(small_gray / 255) ** p * 255 + (1 - c) * self._running_avg
small_gray = self._running_avg.astype(np.uint8)
cv2.imshow('power', small_gray)
# small_gray += self._running_avg.astype(np.uint8) - small_gray # big hack
# --- mesosetting end
blur = cv2.GaussianBlur(small_gray, (2 * h + 1, 2 * h + 1), 0) # play with blur
_, thres = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return gray, small_gray, img_std, thres, blur
@staticmethod
def display(gray, blur, thres, eye_roi, fr_count, n_frames, ncontours=0, contour=None, ellipse=None,
eye_center=None,
font=None):
if font is None:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.imshow('blur', blur)
cv2.imshow('threshold', thres)
cv2.putText(gray, "Frames {fr_count}/{frames} | Found contours {ncontours}".format(fr_count=fr_count,
frames=n_frames,
ncontours=ncontours),
(10, 30), font, 1, (255, 255, 255), 2)
# cv.drawContours(mask, contours, -1, (255), 1)
if contour is not None and ellipse is not None and eye_center is not None:
ellipse = list(ellipse)
ellipse[0] = tuple(eye_center)
ellipse = tuple(ellipse)
cv2.drawContours(gray, [contour], 0, (255, 0, 0), 1, offset=tuple(eye_roi[::-1, 0]))
cv2.ellipse(gray, ellipse, (0, 0, 255), 2)
epy, epx = np.round(eye_center).astype(int)
gray[epx - 3:epx + 3, epy - 3:epy + 3] = 0
cv2.imshow('frame', gray)
def track(self, videofile, eye_roi, display=False):
contrast_low = self._params['contrast_threshold']
mask_kernel = np.ones((3, 3))
print("Tracking videofile", videofile)
cap = cv2.VideoCapture(videofile)
traces = []
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fr_count = 0
if self._mask is not None:
small_mask = self._mask[slice(*eye_roi[0]), slice(*eye_roi[1])].squeeze()
else:
small_mask = np.ones(np.diff(eye_roi, axis=1).squeeze().astype(int), dtype=np.uint8)
while cap.isOpened():
if fr_count >= n_frames:
print("Reached end of videofile ", videofile)
break
# --- read frame
ret, frame = cap.read()
fr_count += 1
# --- if we don't get a frame, don't add any tracking results
if not ret:
traces.append(dict(frame_id=fr_count))
continue
# --- print out if there's not display
if fr_count % 500 == 0:
print("\tframe ({}/{})".format(fr_count, n_frames))
# --- preprocess and treshold images
gray, small_gray, img_std, thres, blur = self.preprocess_image(frame, eye_roi)
# --- if contrast is too low, skip it
if img_std < contrast_low:
traces.append(dict(frame_id=fr_count,
frame_intensity=img_std))
print('_', end="", flush=True)
if display:
self.display(gray, blur, thres, eye_roi, fr_count, n_frames)
continue
# --- detect contours
ellipse, eye_center, contour = None, None, None
if self._last_ellipse is not None:
mask = np.zeros(small_mask.shape, dtype=np.uint8)
cv2.ellipse(mask, tuple(self._last_ellipse), (255), thickness=cv2.FILLED)
# cv2.drawContours(mask, [self._last_contour], -1, (255), thickness=cv2.FILLED)
mask = cv2.dilate(mask, mask_kernel, iterations=self.dilation_iter.value)
thres *= mask
thres *= small_mask
_, contours, hierarchy1 = cv2.findContours(thres.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour, ellipse = self.get_pupil_from_contours(contours, blur, small_mask)
self._last_ellipse = ellipse
if contour is None:
traces.append(dict(frame_id=fr_count, frame_intensity=img_std))
else:
eye_center = eye_roi[::-1, 0] + np.asarray(ellipse[0])
self._center = np.asarray(ellipse[0]) / np.asarray(small_gray.shape[::-1])
self._radius = max(ellipse[1])
traces.append(dict(center=eye_center,
major_r=np.max(ellipse[1]),
rotated_rect=np.hstack(ellipse),
contour=contour.astype(np.int16),
frame_id=fr_count,
frame_intensity=img_std
))
if display:
self.display(self._mask * gray if self._mask is not None else gray, blur, thres, eye_roi,
fr_count, n_frames, ellipse=ellipse,
eye_center=eye_center, contour=contour, ncontours=len(contours))
if (cv2.waitKey(1) & 0xFF == ord('q')):
raise PipelineException('Tracking aborted')
cap.release()
cv2.destroyAllWindows()
return traces
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def identity(x):
return x
def div10(x):
return x/10
class Parameter:
def __init__(self, name, value, min=None, max=None, log_size=None,
set_transform=None, get_transform=None):
self._value = value
self.name = name
self.min = min
self.max = max
self.log_size = log_size
self.set_transform = set_transform if set_transform is not None else identity
self.get_transform = get_transform if get_transform is not None else identity
self.flush_log()
@property
def value(self):
return self.get_transform(self._value)
def set(self, val):
self._value = self.set_transform(val)
if self.min is not None:
self._value = max(self.min, self._value)
if self.max is not None:
self._value = min(self._value, self.max)
print(self.name, 'new value:', self._value)
def log(self, i):
self._log[i] = self.value
@property
def logtrace(self):
return np.array(self._log)
def flush_log(self):
if self.log_size is not None:
self._log = [None] * self.log_size
else:
self._log = None
class ManualTracker:
MAIN_WINDOW = "Main Window"
ROI_WINDOW = "ROI"
THRESHOLDED_WINDOW = "Thresholded"
PROGRESS_WINDOW = "Progress"
GRAPH_WINDOW = "Area"
MERGE = 1
BLOCK = 0
DEBUG = True
def add_track_bar(self, description, parameter, window=None):
cv2.createTrackbar(description,
window if window is not None else self.MAIN_WINDOW,
parameter.value, parameter.max, parameter.set)
@staticmethod
def from_backup(file):
trk = pickle.load(open(file, 'rb'))
trk.register_callbacks()
return trk
def __init__(self, videofile):
self.reset()
self.videofile = videofile
self.register_callbacks()
self.update_frame = True # must be true to ensure correct starting conditions
self.contours_detected = None
self.contours = None
self.area = None
self._mixing_log = None
self._progress_len = 1600
self._progress_height = 100
self._width = 800
self.window_size = 2000
self.dilation_factor = 1.3
def register_callbacks(self):
cv2.namedWindow(self.MAIN_WINDOW)
cv2.namedWindow(self.GRAPH_WINDOW)
self.add_track_bar("mask brush size", self.brush)
self.add_track_bar("frame tolerance", self.frame_tolerance)
self.add_track_bar("Gaussian blur filter half width", self.blur)
self.add_track_bar("Exponent", self.power)
self.add_track_bar("erosion/dilation iterations", self.dilation_iter)
self.add_track_bar("min contour length", self.min_contour_len)
cv2.createTrackbar("10x weight of current frame in running avg.", self.MAIN_WINDOW,
int(self.mixing_constant.value*10), 10, self.mixing_constant.set)
cv2.setMouseCallback(self.MAIN_WINDOW, self.mouse_callback)
cv2.setMouseCallback(self.GRAPH_WINDOW, self.graph_mouse_callback)
def recompute_area(self):
print('Recomputing areas')
assert self.contours is not None, 'contours must not be None'
assert self.contours_detected is not None, 'contours_detected must not be None'
self.area = np.zeros(len(self.contours))
_, frame = self.read_frame()
area = np.zeros(frame.shape[:2], dtype=np.uint8)
for i, c, ok in tqdm(zip(count(), self.contours, self.contours_detected), total=len(self.area)):
if c is None:
self.contours_detected[i] = False
self.area[i] = 0
else:
area = cv2.drawContours(area, [c], -1, (255), thickness=cv2.FILLED)
self.area[i] = (area > 0).sum()
area *= 0
self.plot_area()
def reset(self):
self.pause = False
self.step = 50
self._cap = None
self._frame_number = None
self._n_frames = None
self._last_frame = None
self._mask = None
self._merge_mask = None
self.mask_mode = self.BLOCK
# Parameters
self.brush = Parameter(name='mask_brush_size', value=20, min=1, max=100)
self.roi = Parameter(name='roi', value=None)
self.blur = Parameter(name='gauss_blur', value=3, min=1, max=20, get_transform=int)
self.power = Parameter(name='exponent', value=3, min=1, max=10)
self.dilation_iter = Parameter(name='dilation_iter', value=7, min=1, max=20)
self.min_contour_len = Parameter(name='min_contour_len', value=10, min=5, max=50)
self.mixing_constant = Parameter(name='running_avg_mix', value=1., min=.1, max=1.,
set_transform=div10)
self.frame_tolerance = Parameter(name='frame_tolerance', value=0, min=0, max=5)
self._skipped_frames = 0
self.roi_start = None
self.roi_end = None
self.t0 = 0
self.t1 = None
self.scroll_window = False
self.dilation_kernel = np.ones((3, 3))
self.histogram_equalize = False
self.skip = False
self.help = True
self._scale_factor = None
self.dsize = None
self._running_mean = None
self.backup_interval = 1000
self.backup_file = '/tmp/tracker.pkl'
self._drag = False
self.parameters = []
for e in self.__dict__.values():
if isinstance(e, Parameter):
self.parameters.append(e)
def set_log_size(self, n):
for p in self.parameters:
p.log_size = n
def flush_parameter_log(self):
for p in self.parameters:
p.flush_log()
def log_parameters(self, i):
for p in self.parameters:
p.log(i)
def mouse_callback(self, event, x, y, flags, param):
if self._scale_factor is not None:
x, y = map(int, (i / self._scale_factor for i in (x, y)))
if event == cv2.EVENT_LBUTTONDOWN:
mask = self._mask if self.mask_mode == self.BLOCK else self._merge_mask
color = (0, 0, 0) if self.mask_mode == self.BLOCK else (255, 255, 255)
if mask is not None:
cv2.circle(mask, (x, y), self.brush.value, color, -1)
elif event == cv2.EVENT_RBUTTONDOWN:
mask = self._mask if self.mask_mode == self.BLOCK else self._merge_mask
color = (0, 0, 0) if self.mask_mode == self.MERGE else (255, 255, 255)
if mask is not None:
cv2.circle(mask, (x, y), self.brush.value, color, -1)
elif event == cv2.EVENT_MBUTTONDOWN:
self.roi_start = (x, y)
self._drag = True
elif event == cv2.EVENT_MOUSEMOVE and self._drag:
self.roi_end = (x, y)
elif event == cv2.EVENT_MBUTTONUP:
self.roi_end = (x, y)
self._drag = False
if self.roi_end[0] != self.roi_start[0] and self.roi_end[1] != self.roi_start[1]:
x = np.vstack((self.roi_start, self.roi_end))
tmp = np.hstack((x.min(axis=0), x.max(axis=0)))
self.roi.set(np.asarray([[tmp[1], tmp[3]], [tmp[0], tmp[2]]], dtype=int) + 1)
else:
print('ROI endpoints are not different Paul! Setting ROI to None!')
self.roi.set(None)
def graph_mouse_callback(self, event, x, y, flags, param):
t0, t1 = self.t0, self.t1
dt = t1 - t0
sanitize = lambda t: int(max(min(t, self._n_frames - 1), 0))
if event == cv2.EVENT_MBUTTONDOWN:
frame = sanitize(t0 + x / self._progress_len * dt)
self.goto_frame(frame)
elif event == cv2.EVENT_LBUTTONDOWN:
self.t0_tmp = sanitize(t0 + x / self._progress_len * dt)
elif event == cv2.EVENT_LBUTTONUP:
t1 = sanitize(t0 + x / self._progress_len * dt)
if t1 < self.t0_tmp:
self.t0, self.t1 = t1, self.t0_tmp
elif self.t0_tmp == t1:
self.t0, self.t1 = self.t0_tmp, self.t0_tmp + 1
else:
self.t0, self.t1 = self.t0_tmp, t1
elif event == cv2.EVENT_RBUTTONDOWN:
self.del_tmp = sanitize(t0 + x / self._progress_len * dt)
elif event == cv2.EVENT_RBUTTONUP:
t1 = sanitize(t0 + x / self._progress_len * dt)
if t1 < self.del_tmp:
t0, t1 = t1, self.del_tmp
else:
t0, t1 = self.del_tmp, t1
self.contours_detected[t0:t1] = False
self.contours[t0:t1] = None
def process_key(self, key):
if key == ord('q'):
return False
elif key == ord(' '):
self.pause = not self.pause
return True
elif key == ord('s'):
self.skip = not self.skip
return True
elif key == ord('a'):
self.t0, self.t1 = 0, self._n_frames
return True
elif key == ord('b'):
self.goto_frame(self._frame_number - self.step)
return True
elif key == ord('e'):
self.histogram_equalize = not self.histogram_equalize
return True
elif key == ord('r'):
self.roi_start = None
self.roi_end = None
self.roi = None
return True
elif key == ord('c'):
self._mask = np.ones_like(self._mask) * 255
self._merge_mask = np.zeros_like(self._merge_mask)
elif key == ord('h'):
self.help = not self.help
return True
elif key == ord('m'):
self.mask_mode = self.MERGE if self.mask_mode == self.BLOCK else self.BLOCK
return True
elif key == ord('t'):
self.focus_window()
return True
elif key == ord('w'):
self.scroll_window = ~self.scroll_window
self.t0 = max(0, self._frame_number - self.window_size)
self.t1 = min(self._n_frames, self.t0 + self.window_size)
return True
return True
help_text = """
KEYBOARD:
q : quits
space : (un)pause
a : reset area
s : toggle skip
b : jump back 10 frames
r : delete roi
c : delete mask
e : toggle histogram equalization
h : toggle help
m : toggle mask mode
t : focus window on cursor
w : toggle scrolling window
MOUSE:
middle drag : drag ROI
left click : add to mask
right click : delete from mask
middle click in area : jump to location
drag and drop in area : zoom in
drag and drop in area : drop frames
"""
def display_frame_number(self, img):
font = cv2.FONT_HERSHEY_SIMPLEX
fs = .6
cv2.putText(img, "[{fr_count:05d}/{frames:05d}]".format(fr_count=self._frame_number, frames=self._n_frames),
(10, 30), font, fs, (255, 144, 30), 2)
if self.contours[self._frame_number] is not None:
cv2.putText(img, "OK", (200, 30), font, fs, (0, 255, 0), 2)
else:
cv2.putText(img, "NOT OK", (200, 30), font, fs, (0, 0, 255), 2)
cv2.putText(img, "Mask Mode {}".format('MERGE' if self.mask_mode == self.MERGE else 'BLOCK'),
(500, 30), font,
fs, (0, 140, 255), 2)
cv2.putText(img, "Skipped Frames {}/{}".format(self._skipped_frames, self.frame_tolerance.value),
(700, 30), font,
fs, (127, 255, 127), 2)
if self.skip:
cv2.putText(img, "Skip", (10, 70), font, fs, (0, 0, 255), 2)
if self.help:
y0, dy = 70, 20
for i, line in enumerate(self.help_text.replace('\t', ' ').split('\n')):
y = y0 + i * dy
cv2.putText(img, line, (10, y), font, fs, (255, 144, 30), 2)
def read_frame(self):
if not self.pause or self.update_frame:
if not self.update_frame:
self._frame_number += 1
self.update_frame = False
ret, frame = self._cap.read()
self._last_frame = ret, frame
if self._mask is None:
self._mask = np.ones_like(frame) * 255
if self._merge_mask is None:
self._merge_mask = np.zeros_like(frame)
self._last_frame = ret, frame
if ret and frame is not None:
return ret, frame.copy()
else:
return ret, None
else:
ret, frame = self._last_frame
return ret, frame.copy()
def preprocess_image(self, frame):
h = self.blur.value
if self.power.value > 1:
frame = np.array(frame / 255) ** self.power.value * 255
frame = frame.astype(np.uint8)
if self.histogram_equalize:
cv2.equalizeHist(frame, frame)
if self._running_mean is None or frame.shape != self._running_mean.shape:
self._running_mean = np.array(frame)
elif not self.pause:
a = self.mixing_constant.value
self._running_mean = np.uint8(a * frame + (1 - a) * self._running_mean)
frame = np.array(self._running_mean)
blur = cv2.GaussianBlur(frame, (2 * h + 1, 2 * h + 1), 0)
_, thres = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
mask = cv2.erode(thres, self.dilation_kernel, iterations=self.dilation_iter.value)
mask = cv2.dilate(mask, self.dilation_kernel, iterations=int(self.dilation_factor * self.dilation_iter.value))
return thres, blur, mask
def find_contours(self, thres):
contours, hierarchy = cv2.findContours(thres, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE) # remove copy when cv2=3.2 is installed
if len(contours) > 1:
contours = [c for i, c in enumerate(contours) if hierarchy[0, i, 3] == -1]
contours = [cv2.convexHull(c) for c in contours]
if len(contours) > 1 and self._merge_mask is not None and np.any(self._merge_mask > 0):
small_merge_mask = self._merge_mask[slice(*self.roi.value[0]), slice(*(self.roi.value[1] + 1)), 0]
merge = []
other = []
for i in range(len(contours)):
tmp = np.zeros_like(thres)
cv2.drawContours(tmp, contours, i, (255), thickness=cv2.FILLED)
cv2.bitwise_and(tmp, small_merge_mask, dst=tmp)
if tmp.sum() > 0:
merge.append(contours[i])
else:
other.append(contours[i])
contours = ([cv2.convexHull(np.vstack(merge))] if len(merge) > 0 else []) + other
contours = [c + self.roi.value[::-1, 0][None, None, :] for c in contours if len(c) >= self.min_contour_len.value]
return contours
def focus_window(self):
self.t0 = max(self._frame_number - 250, 0)
self.t1 = min(self._frame_number + 750, self._n_frames)
def goto_frame(self, no):
self._running_mean = None
self._frame_number = min(max(no, 0), self._n_frames - 1)
self._cap.set(cv2.CAP_PROP_POS_FRAMES, self._frame_number)
self.update_frame = True
def normalize_graph(self, signal, min_zero=True):
height = self._progress_height
if not min_zero:
v = np.abs(signal).max() + 1
signal = (signal / v + 1) / 2
else:
signal = signal / (signal.max() + 1)
return (height - signal * height).astype(int)
def plot_area(self):
t0, t1 = self.t0, self.t1
dt = t1 - t0
idx = np.linspace(t0, t1, self._progress_len, endpoint=False).astype(int)
height = self._progress_height
graph = (self.contours_detected[idx].astype(np.float) * 255)[None, :, None]
graph = np.tile(graph, (height, 1, 3)).astype(np.uint8)
area = self.normalize_graph(self.area[idx])
detected = self.contours_detected[idx]
for x, y1, y2, det1, det2 in zip(count(), area[:-1], area[1:], detected[:-1], detected[1:]):
if det1 and det2:
graph = cv2.line(graph, (x, y1), (x + 1, y2), (209, 133, 4), thickness=2)
if t0 <= self._frame_number <= t1:
x = int((self._frame_number - t0) / dt * self._progress_len)
graph = cv2.line(graph, (x, 0), (x, height), (0, 255, 0), 2)
cv2.imshow(self.GRAPH_WINDOW, graph)
def parameter_names(self):
return tuple(p.name for p in self.parameters)
def parameter_iter(self):
names = self.parameter_names()
for frame_number, *params in zip(count(), *map(attrgetter('logtrace'), self.parameters)):
yield dict(zip(names, params), frame_id=frame_number)
def backup(self):
cap = self._cap
self._cap = None
print('Saving tracker to', self.backup_file)
pickle.dump(self, open(self.backup_file, 'wb'), pickle.HIGHEST_PROTOCOL)
self._cap = cap
def run(self):
iterations = 0
self._cap = cap = cv2.VideoCapture(self.videofile)
self._n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.set_log_size(self._n_frames)
self.flush_parameter_log()
self._frame_number = 0
self.update_frame = True # ensure correct starting conditions
self.t0 = 0
self.t1 = self._n_frames
if self.contours_detected is not None and self.contours_detected is not None:
self.recompute_area()
self.pause = True
else:
self.area = np.zeros(self._n_frames)
self.contours_detected = np.zeros(self._n_frames, dtype=bool)
self.contours = np.zeros(self._n_frames, dtype=object)
self.contours[:] = None
while cap.isOpened():
if not self.pause:
iterations += 1
if iterations % self.backup_interval == self.backup_interval - 1:
self.backup()
if self._frame_number >= self._n_frames - 1:
if not self.pause:
print("Reached end of videofile. Press Q to exit. Or go back to fix stuff.", self.videofile)
self.pause = True
self.log_parameters(self._frame_number)
ret, frame = self.read_frame()
if self.scroll_window and not self.pause:
self.t0 = min(self.t0 + 1, self._n_frames - self.scroll_window)
self.t1 = min(self.t1 + 1, self._n_frames)
if ret and self.roi_start is not None and self.roi_end is not None:
cv2.rectangle(frame, self.roi_start, self.roi_end, (0, 255, 255), 2)
if ret and not self.skip and self.roi.value is not None:
small_gray = cv2.cvtColor(frame[slice(*self.roi.value[0]), slice(*self.roi.value[1]), :],
cv2.COLOR_BGR2GRAY)
try:
thres, small_gray, dilation_mask = self.preprocess_image(small_gray)
except Exception as e:
print('Problems with processing reversing to frame', self._frame_number - 10, 'Please redraw ROI')
print('Error message is', str(e))
self.goto_frame(self._frame_number - 10)
self.roi_start = self.roi_end = self.roi = None
self.pause = True
if self.DEBUG:
raise
else:
if self._mask is not None:
small_mask = self._mask[slice(*self.roi.value[0]), slice(*(self.roi.value[1] + 1)), 0]
cv2.bitwise_and(thres, small_mask, dst=thres)
cv2.bitwise_and(thres, dilation_mask, dst=thres)
contours = self.find_contours(thres)
cv2.drawContours(frame, contours, -1, (0, 255, 0), 3)
cv2.drawContours(small_gray, contours, -1, (127, 127, 127), 3,
offset=tuple(-self.roi.value[::-1, 0]))
if len(contours) > 1:
if not self.pause:
self._skipped_frames += 1
if self._skipped_frames > self.frame_tolerance.value:
self.pause = True
elif len(contours) == 1:
self._skipped_frames = 0
area = np.zeros_like(small_gray)
area = cv2.drawContours(area, contours, -1, (255), thickness=cv2.FILLED,
offset=tuple(-self.roi.value[::-1, 0]))
self.area[self._frame_number] = (area > 0).sum()
self.contours_detected[self._frame_number] = True
self.contours[self._frame_number] = contours[0]
else:
self._skipped_frames = 0
cv2.imshow(self.ROI_WINDOW, small_gray)
cv2.imshow(self.THRESHOLDED_WINDOW, thres)
# --- plotting
if self._merge_mask is not None:
if np.any(self._merge_mask > 0):
tm = cv2.cvtColor(self._merge_mask, cv2.COLOR_BGR2GRAY)
_, tm = cv2.threshold(tm, 127, 255, cv2.THRESH_BINARY)
_, mcontours, _ = cv2.findContours(tm, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame, mcontours, -1, (0, 140, 255), thickness=3)
self.display_frame_number(frame)
cv2.bitwise_and(frame, self._mask, dst=frame)
if self._scale_factor is None:
self._scale_factor = self._width / frame.shape[1]
self.dsize = tuple(int(self._scale_factor * s) for s in frame.shape[:2])[::-1]
frame = cv2.resize(frame, self.dsize)
cv2.imshow(self.MAIN_WINDOW, frame)
self.plot_area()
if not self.process_key(cv2.waitKey(5) & 0xFF):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
tracker = ManualTracker('video2.mp4')
tracker.run()
| lgpl-3.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/widgets.py | 10 | 56160 | """
GUI Neutral widgets
===================
Widgets that are designed to work for any of the GUI backends.
All of these widgets require you to predefine an :class:`matplotlib.axes.Axes`
instance and pass that as the first arg. matplotlib doesn't try to
be too smart with respect to layout -- you will have to figure out how
wide and tall you want your Axes to be to accommodate your widget.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import numpy as np
from .mlab import dist
from .patches import Circle, Rectangle
from .lines import Line2D
from .transforms import blended_transform_factory
class LockDraw:
"""
Some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstances, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use ``matplotlib.widgets.lock(someobj)`` to pr
"""
# FIXME: This docstring ends abruptly without...
def __init__(self):
self._owner = None
def __call__(self, o):
"""reserve the lock for *o*"""
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
"""release the lock"""
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
"""drawing is available to *o*"""
return not self.locked() or self.isowner(o)
def isowner(self, o):
"""Return True if *o* owns this lock"""
return self._owner is o
def locked(self):
"""Return True if the lock is currently held by an owner"""
return self._owner is not None
class Widget(object):
"""
Abstract base class for GUI neutral widgets
"""
drawon = True
eventson = True
class AxesWidget(Widget):
"""Widget that is connected to a single
:class:`~matplotlib.axes.Axes`.
To guarantee that the widget remains responsive and not garbage-collected,
a reference to the object should be maintained by the user.
This is necessary because the callback registry
maintains only weak-refs to the functions, which are member
functions of the widget. If there are no references to the widget
object it may be garbage collected which will disconnect the
callbacks.
Attributes:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget
*canvas* : :class:`~matplotlib.backend_bases.FigureCanvasBase` subclass
The parent figure canvas for the widget.
*active* : bool
If False, the widget does not respond to events.
"""
def __init__(self, ax):
self.ax = ax
self.canvas = ax.figure.canvas
self.cids = []
self.active = True
def connect_event(self, event, callback):
"""Connect callback with an event.
This should be used in lieu of `figure.canvas.mpl_connect` since this
function stores call back ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self.cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self.cids:
self.canvas.mpl_disconnect(c)
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
class Button(AxesWidget):
"""
A GUI neutral button.
For the button to remain responsive
you must keep a reference to it.
The following attributes are accessible
*ax*
The :class:`matplotlib.axes.Axes` the button renders into.
*label*
A :class:`matplotlib.text.Text` instance.
*color*
The color of the button when not hovering.
*hovercolor*
The color of the button when hovering.
Call :meth:`on_clicked` to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
Parameters
----------
ax : matplotlib.axes.Axes
The :class:`matplotlib.axes.Axes` instance the button
will be placed into.
label : str
The button text. Accepts string.
image : array, mpl image, PIL image
The image to place in the button, if not *None*.
Can be any legal arg to imshow (numpy array,
matplotlib Image instance, or PIL image).
color : color
The color of the button when not activated
hovercolor : color
The color of the button when the mouse is over it
"""
AxesWidget.__init__(self, ax)
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.connect_event('button_press_event', self._click)
self.connect_event('button_release_event', self._release)
self.connect_event('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if self.ignore(event):
return
if event.inaxes != self.ax:
return
if not self.eventson:
return
if event.canvas.mouse_grabber != self.ax:
event.canvas.grab_mouse(self.ax)
def _release(self, event):
if self.ignore(event):
return
if event.canvas.mouse_grabber != self.ax:
return
event.canvas.release_mouse(self.ax)
if not self.eventson:
return
if event.inaxes != self.ax:
return
for cid, func in six.iteritems(self.observers):
func(event)
def _motion(self, event):
if self.ignore(event):
return
if event.inaxes == self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon:
self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this *func* with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
class Slider(AxesWidget):
"""
A slider representing a floating point range.
For the slider
to remain responsive you must maintain a reference to it.
The following attributes are defined
*ax* : the slider :class:`matplotlib.axes.Axes` instance
*val* : the current slider value
*vline* : a :class:`matplotlib.lines.Line2D` instance
representing the initial value of the slider
*poly* : A :class:`matplotlib.patches.Polygon` instance
which is the slider knob
*valfmt* : the format string for formatting the slider text
*label* : a :class:`matplotlib.text.Text` instance
for the slider label
*closedmin* : whether the slider is closed on the minimum
*closedmax* : whether the slider is closed on the maximum
*slidermin* : another slider - if not *None*, this slider must be
greater than *slidermin*
*slidermax* : another slider - if not *None*, this slider must be
less than *slidermax*
*dragging* : allow for mouse dragging on slider
Call :meth:`on_changed` to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None,
slidermax=None, dragging=True, **kwargs):
"""
Create a slider from *valmin* to *valmax* in axes *ax*.
additional kwargs are passed on to ``self.poly`` which is the
:class:`matplotlib.patches.Rectangle` which draws the slider
knob. See the :class:`matplotlib.patches.Rectangle` documentation
valid property names (e.g., *facecolor*, *edgecolor*, *alpha*, ...)
Parameters
----------
ax : Axes
The Axes to put the slider in
label : str
Slider label
valmin : float
The minimum value of the slider
valmax : float
The maximum value of the slider
valinit : float
The slider initial position
label : str
The slider label
valfmt : str
Used to format the slider value, fprint format string
closedmin : bool
Indicate whether the slider interval is closed on the bottom
closedmax : bool
Indicate whether the slider interval is closed on the top
slidermin : Slider or None
Do not allow the current slider to have a value less than
`slidermin`
slidermax : Slider or None
Do not allow the current slider to have a value greater than
`slidermax`
dragging : bool
if the slider can be dragged by the mouse
"""
AxesWidget.__init__(self, ax)
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin, valinit, 0, 1, **kwargs)
self.vline = ax.axvline(valinit, 0, 1, color='r', lw=1)
self.valfmt = valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
self.connect_event('button_press_event', self._update)
self.connect_event('button_release_event', self._update)
if dragging:
self.connect_event('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt % valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
self.drag_active = False
def _update(self, event):
"""update the slider position"""
if self.ignore(event):
return
if event.button != 1:
return
if event.name == 'button_press_event' and event.inaxes == self.ax:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
elif ((event.name == 'button_release_event') or
(event.name == 'button_press_event' and
event.inaxes != self.ax)):
self.drag_active = False
event.canvas.release_mouse(self.ax)
return
val = event.xdata
if val <= self.valmin:
if not self.closedmin:
return
val = self.valmin
elif val >= self.valmax:
if not self.closedmax:
return
val = self.valmax
if self.slidermin is not None and val <= self.slidermin.val:
if not self.closedmin:
return
val = self.slidermin.val
if self.slidermax is not None and val >= self.slidermax.val:
if not self.closedmax:
return
val = self.slidermax.val
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[2] = val, 1
xy[3] = val, 0
self.poly.xy = xy
self.valtext.set_text(self.valfmt % val)
if self.drawon:
self.ax.figure.canvas.draw_idle()
self.val = val
if not self.eventson:
return
for cid, func in six.iteritems(self.observers):
func(val)
def on_changed(self, func):
"""
When the slider value is changed, call *func* with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
def reset(self):
"""reset the slider to the initial value if needed"""
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(AxesWidget):
"""
A GUI neutral radio button.
For the check buttons to remain responsive you much keep a
reference to this object.
The following attributes are exposed
*ax*
The :class:`matplotlib.axes.Axes` instance the buttons are
located in
*labels*
List of :class:`matplotlib.text.Text` instances
*lines*
List of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have ``set_visible(False)``
when its box is not checked.
*rectangles*
List of :class:`matplotlib.patches.Rectangle` instances
Connect to the CheckButtons with the :meth:`on_clicked` method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to :class:`matplotlib.axes.Axes` instance *ax*
*labels*
A len(buttons) list of labels as strings
*actives*
A len(buttons) list of booleans indicating whether
the button is active
"""
AxesWidget.__init__(self, ax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels) > 1:
dy = 1. / (len(labels) + 1)
ys = np.linspace(1 - dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color': 'k', 'linewidth': 1.25,
'transform': ax.transAxes, 'solid_capstyle': 'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy / 2., dy / 2.
x, y = 0.05, y - h / 2.
p = Rectangle(xy=(x, y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x + w], [y + h, y], **lineparams)
l2 = Line2D([x, x + w], [y, y + h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1, l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
self.connect_event('button_press_event', self._clicked)
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if self.ignore(event):
return
if event.button != 1:
return
if event.inaxes != self.ax:
return
for p, t, lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y)):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon:
self.ax.figure.canvas.draw()
if not self.eventson:
return
for cid, func in six.iteritems(self.observers):
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call *func* with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
class RadioButtons(AxesWidget):
"""
A GUI neutral radio button
For the buttons to remain responsive
you much keep a reference to this object.
The following attributes are exposed
*ax*
The :class:`matplotlib.axes.Axes` instance the buttons are in
*activecolor*
The color of the button when clicked
*labels*
A list of :class:`matplotlib.text.Text` instances
*circles*
A list of :class:`matplotlib.patches.Circle` instances
Connect to the RadioButtons with the :meth:`on_clicked` method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to :class:`matplotlib.axes.Axes` instance *ax*
*labels*
A len(buttons) list of labels as strings
*active*
The index into labels for the button that is active
*activecolor*
The color of the button when clicked
"""
AxesWidget.__init__(self, ax)
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1. / (len(labels) + 1)
ys = np.linspace(1 - dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt == active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
self.connect_event('button_press_event', self._clicked)
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if self.ignore(event):
return
if event.button != 1:
return
if event.inaxes != self.ax:
return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p, t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else:
return
for p in self.circles:
if p == inp:
color = self.activecolor
else:
color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon:
self.ax.figure.canvas.draw()
if not self.eventson:
return
for cid, func in six.iteritems(self.observers):
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call *func* with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
"""remove the observer with connection id *cid*"""
try:
del self.observers[cid]
except KeyError:
pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of a :class:`matplotlib.figure.Figure`
"""
def __init__(self, targetfig, toolfig):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
# FIXME: The docstring seems to just abruptly end without...
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s' % (self.slider.label.get_text(),
self.slider.valfmt)
return fmt % x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left',
0, 1, targetfig.subplotpars.left,
closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom,
'bottom', 0, 1,
targetfig.subplotpars.bottom,
closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1,
targetfig.subplotpars.right,
closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1,
targetfig.subplotpars.top,
closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace',
0, 1, targetfig.subplotpars.wspace,
closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace',
0, 1, targetfig.subplotpars.hspace,
closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace,)
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon:
self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon:
self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon:
self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon:
self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon:
self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon:
self.targetfig.canvas.draw()
class Cursor(AxesWidget):
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
*horizOn*
Controls the visibility of the horizontal line
*vertOn*
Controls the visibility of the horizontal line
and the visibility of the cursor itself with the *visible* attribute.
For the cursor to remain responsive you much keep a reference to
it.
"""
def __init__(self, ax, horizOn=True, vertOn=True, useblit=False,
**lineprops):
"""
Add a cursor to *ax*. If ``useblit=True``, use the backend-
dependent blitting features for faster updates (GTKAgg
only for now). *lineprops* is a dictionary of line properties.
.. plot :: mpl_examples/widgets/cursor.py
"""
# TODO: Is the GTKAgg limitation still true?
AxesWidget.__init__(self, ax)
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('draw_event', self.clear)
self.visible = True
self.horizOn = horizOn
self.vertOn = vertOn
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
lineprops['animated'] = True
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
"""clear the cursor"""
if self.ignore(event):
return
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
"""on mouse motion draw the cursor if visible"""
if self.ignore(event):
return
if not self.canvas.widgetlock.available(self):
return
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible:
return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor(Widget):
"""
Provide a vertical (default) and/or horizontal line cursor shared between
multiple axes.
For the cursor to remain responsive you much keep a reference to
it.
Example usage::
from matplotlib.widgets import MultiCursor
from pylab import figure, show, np
t = np.arange(0.0, 2.0, 0.01)
s1 = np.sin(2*np.pi*t)
s2 = np.sin(4*np.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1,
horizOn=False, vertOn=True)
show()
"""
def __init__(self, canvas, axes, useblit=True, horizOn=False, vertOn=True,
**lineprops):
self.canvas = canvas
self.axes = axes
self.horizOn = horizOn
self.vertOn = vertOn
xmin, xmax = axes[-1].get_xlim()
ymin, ymax = axes[-1].get_ylim()
xmid = 0.5 * (xmin + xmax)
ymid = 0.5 * (ymin + ymax)
self.visible = True
self.useblit = useblit and self.canvas.supports_blit
self.background = None
self.needclear = False
if self.useblit:
lineprops['animated'] = True
if vertOn:
self.vlines = [ax.axvline(xmid, visible=False, **lineprops)
for ax in axes]
else:
self.vlines = []
if horizOn:
self.hlines = [ax.axhline(ymid, visible=False, **lineprops)
for ax in axes]
else:
self.hlines = []
self.connect()
def connect(self):
"""connect events"""
self._cidmotion = self.canvas.mpl_connect('motion_notify_event',
self.onmove)
self._ciddraw = self.canvas.mpl_connect('draw_event', self.clear)
def disconnect(self):
"""disconnect events"""
self.canvas.mpl_disconnect(self._cidmotion)
self.canvas.mpl_disconnect(self._ciddraw)
def clear(self, event):
"""clear the cursor"""
if self.useblit:
self.background = (
self.canvas.copy_from_bbox(self.canvas.figure.bbox))
for line in self.vlines + self.hlines:
line.set_visible(False)
def onmove(self, event):
if event.inaxes is None:
return
if not self.canvas.widgetlock.available(self):
return
self.needclear = True
if not self.visible:
return
if self.vertOn:
for line in self.vlines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
if self.horizOn:
for line in self.hlines:
line.set_ydata((event.ydata, event.ydata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
if self.vertOn:
for ax, line in zip(self.axes, self.vlines):
ax.draw_artist(line)
if self.horizOn:
for ax, line in zip(self.axes, self.hlines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector(AxesWidget):
"""
Select a min/max range of the x or y axes for a matplotlib Axes.
For the selector to remain responsive you much keep a reference to
it.
Example usage::
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
*onmove_callback* is an optional callback that is called on mouse
move within the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False,
rectprops=None, onmove_callback=None, span_stays=False):
"""
Create a span selector in *ax*. When a selection is made, clear
the span and call *onselect* with::
onselect(vmin, vmax)
and clear the span.
*direction* must be 'horizontal' or 'vertical'
If *minspan* is not *None*, ignore events smaller than *minspan*
The span rectangle is drawn with *rectprops*; default::
rectprops = dict(facecolor='red', alpha=0.5)
Set the visible attribute to *False* if you want to turn off
the functionality of the span selector
If *span_stays* is True, the span stays visble after making
a valid selection.
"""
AxesWidget.__init__(self, ax)
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.visible = True
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.minspan = minspan
self.span_stays = span_stays
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
# Set useblit based on original canvas.
self.useblit = useblit and self.canvas.supports_blit
# Reset canvas so that `new_axes` connects events.
self.canvas = None
self.new_axes(ax)
def new_axes(self, ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
self.disconnect_events()
self.canvas = ax.figure.canvas
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData,
self.ax.transAxes)
w, h = 0, 1
else:
trans = blended_transform_factory(self.ax.transAxes,
self.ax.transData)
w, h = 1, 0
self.rect = Rectangle((0, 0), w, h,
transform=trans,
visible=False,
**self.rectprops)
if self.span_stays:
self.stay_rect = Rectangle((0, 0), w, h,
transform=trans,
visible=False,
**self.rectprops)
self.ax.add_patch(self.stay_rect)
if not self.useblit:
self.ax.add_patch(self.rect)
def update_background(self, event):
"""force an update of the background"""
# If you add a call to `ignore` here, you'll want to check edge case:
# `release` can call a draw event even when `ignore` is True.
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
"""return *True* if *event* should be ignored"""
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
widget_off = not self.visible or not self.active
non_event = event.inaxes != self.ax or event.button != 1
return widget_off or non_event
def press(self, event):
"""on button press event"""
if self.ignore(event):
return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.span_stays:
self.stay_rect.set_visible(False)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
"""on button release event"""
if self.ignore(event) and not self.buttonDown:
return
if self.pressv is None:
return
self.buttonDown = False
self.rect.set_visible(False)
if self.span_stays:
self.stay_rect.set_x(self.rect.get_x())
self.stay_rect.set_y(self.rect.get_y())
self.stay_rect.set_width(self.rect.get_width())
self.stay_rect.set_height(self.rect.get_height())
self.stay_rect.set_visible(True)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin > vmax:
vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span < self.minspan:
return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
"""
Draw using newfangled blit or oldfangled draw depending
on *useblit*
"""
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
"""on motion notify event"""
if self.pressv is None or self.ignore(event):
return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv > maxv:
minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv - minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv - minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin > vmax:
vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
class RectangleSelector(AxesWidget):
"""
Select a rectangular region of an axes.
For the cursor to remain responsive you much keep a reference to
it.
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data',
button=None):
"""
Create a selector in *ax*. When a selection is made, clear
the span and call onselect with::
onselect(pos_1, pos_2)
and clear the drawn box/line. The ``pos_1`` and ``pos_2`` are
arrays of length 2 containing the x- and y-coordinate.
If *minspanx* is not *None* then events smaller than *minspanx*
in x direction are ignored (it's the same for y).
The rectangle is drawn with *rectprops*; default::
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with *lineprops*; default::
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use *drawtype* if you want the mouse to draw a line,
a box or nothing between click and actual position by setting
``drawtype = 'line'``, ``drawtype='box'`` or ``drawtype = 'none'``.
*spancoords* is one of 'data' or 'pixels'. If 'data', *minspanx*
and *minspanx* will be interpreted in the same coordinates as
the x and y axis. If 'pixels', they are in pixels.
*button* is a list of integers indicating which mouse buttons should
be used for rectangle selection. You can also specify a single
integer if only a single button is desired. Default is *None*,
which does not limit which button can be used.
Note, typically:
1 = left mouse button
2 = center mouse button (scroll wheel)
3 = right mouse button
"""
AxesWidget.__init__(self, ax)
self.visible = True
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor='black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0, 0),
0, 1, visible=False, **self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth=2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0, 0], [0, 0], visible=False,
**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit and self.canvas.supports_blit
self.minspanx = minspanx
self.minspany = minspany
if button is None or isinstance(button, list):
self.validButtons = button
elif isinstance(button, int):
self.validButtons = [button]
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
"""force an update of the background"""
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
"""return *True* if *event* should be ignored"""
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# Only do rectangle selection if event was triggered
# with a desired button
if self.validButtons is not None:
if not event.button in self.validButtons:
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress is None:
return event.inaxes != self.ax
# If a button was pressed, check if the release-button is the
# same. If event is out of axis, limit the data coordinates to axes
# boundaries.
if event.button == self.eventpress.button and event.inaxes != self.ax:
(xdata, ydata) = self.ax.transData.inverted().transform_point(
(event.x, event.y))
x0, x1 = self.ax.get_xbound()
y0, y1 = self.ax.get_ybound()
xdata = max(x0, xdata)
xdata = min(x1, xdata)
ydata = max(y0, ydata)
ydata = min(y1, ydata)
event.xdata = xdata
event.ydata = ydata
return False
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes != self.ax or
event.button != self.eventpress.button)
def press(self, event):
"""on button press event"""
if self.ignore(event):
return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
"""on button release event"""
if self.eventpress is None or self.ignore(event):
return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords == 'data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords == 'pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx < self.minspanx
yproblems = self.minspany is not None and spany < self.minspany
if (((self.drawtype == 'box') or (self.drawtype == 'line')) and
(xproblems or yproblems)):
# check if drawn distance (if it exists) is not too small in
# neither x nor y-direction
return
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
"""draw using newfangled blit or oldfangled draw depending on
useblit
"""
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
"""on motion notify event if box/line is wanted"""
if self.eventpress is None or self.ignore(event):
return
x, y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx > maxx:
minx, maxx = maxx, minx # get them in the right order
if miny > maxy:
miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx - minx) # set width and height of box
self.to_draw.set_height(maxy - miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
"""
Use this to activate / deactivate the RectangleSelector
from your program with an boolean parameter *active*.
"""
self.active = active
def get_active(self):
""" Get status of active mode (boolean variable)"""
return self.active
class LassoSelector(AxesWidget):
"""Selection curve of an arbitrary shape.
For the selector to remain responsive you much keep a reference to
it.
The selected path can be used in conjunction with
:func:`~matplotlib.path.Path.contains_point` to select
data points from an image.
In contrast to :class:`Lasso`, `LassoSelector` is written with an interface
similar to :class:`RectangleSelector` and :class:`SpanSelector` and will
continue to interact with the axes until disconnected.
Parameters:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget.
*onselect* : function
Whenever the lasso is released, the `onselect` function is called and
passed the vertices of the selected path.
Example usage::
ax = subplot(111)
ax.plot(x,y)
def onselect(verts):
print verts
lasso = LassoSelector(ax, onselect)
"""
def __init__(self, ax, onselect=None, useblit=True, lineprops=None):
AxesWidget.__init__(self, ax)
self.useblit = useblit and self.canvas.supports_blit
self.onselect = onselect
self.verts = None
if lineprops is None:
lineprops = dict()
self.line = Line2D([], [], **lineprops)
self.line.set_visible(False)
self.ax.add_line(self.line)
self.connect_event('button_press_event', self.onpress)
self.connect_event('button_release_event', self.onrelease)
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('draw_event', self.update_background)
def ignore(self, event):
wrong_button = hasattr(event, 'button') and event.button != 1
return not self.active or wrong_button
def onpress(self, event):
if self.ignore(event) or event.inaxes != self.ax:
return
self.verts = [(event.xdata, event.ydata)]
self.line.set_visible(True)
def onrelease(self, event):
if self.ignore(event):
return
if self.verts is not None:
if event.inaxes == self.ax:
self.verts.append((event.xdata, event.ydata))
self.onselect(self.verts)
self.line.set_data([[], []])
self.line.set_visible(False)
self.verts = None
def onmove(self, event):
if self.ignore(event) or event.inaxes != self.ax:
return
if self.verts is None:
return
if event.inaxes != self.ax:
return
if event.button != 1:
return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(list(zip(*self.verts)))
if self.useblit:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def update_background(self, event):
if self.ignore(event):
return
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
class Lasso(AxesWidget):
"""Selection curve of an arbitrary shape.
The selected path can be used in conjunction with
:func:`~matplotlib.path.Path.contains_point` to select data points
from an image.
Unlike :class:`LassoSelector`, this must be initialized with a starting
point `xy`, and the `Lasso` events are destroyed upon release.
Parameters:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget.
*xy* : array
Coordinates of the start of the lasso.
*callback* : function
Whenever the lasso is released, the `callback` function is called and
passed the vertices of the selected path.
"""
def __init__(self, ax, xy, callback=None, useblit=True):
AxesWidget.__init__(self, ax)
self.useblit = useblit and self.canvas.supports_blit
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
x, y = xy
self.verts = [(x, y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.ax.add_line(self.line)
self.callback = callback
self.connect_event('button_release_event', self.onrelease)
self.connect_event('motion_notify_event', self.onmove)
def onrelease(self, event):
if self.ignore(event):
return
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts) > 2:
self.callback(self.verts)
self.ax.lines.remove(self.line)
self.verts = None
self.disconnect_events()
def onmove(self, event):
if self.ignore(event):
return
if self.verts is None:
return
if event.inaxes != self.ax:
return
if event.button != 1:
return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(list(zip(*self.verts)))
if self.useblit:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
| mit |
PaulGrimal/peach | tutorial/neural-networks/radial-basis-function.py | 6 | 4556 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/radial-basis-function.py
# Radial basis functions for interpolation.
################################################################################
# Different kinds of neural network use different functions as activations for
# some neurons. The Radial Basis Network (RBFN) uses radial basis functions
# (RBFs), which are functions that are symmetric according to the origin. These
# functions offer good local approximation for a function and, for that reason,
# they are especially good for interpolation and function approximation. In
# fact, it can be prooved that any continuous function can be approximated by
# this type of neural network.
#
# In this network, the first layer contains RBFs as activation functions.
# Because of this, the training is done in a different way, even though a
# gradient approach could be used. However, it is in general better to use a
# clustering algorithm, such as K-Means, to find the centers of the functions,
# and compute the width of the functions proportionally to the greatest distance
# among centers. The second layer of the network is a linear weighted combiner
# that sums the contribution of each neuron.
#
# In this tutorial, we will see how to use Peach's implementation of RBFNs to
# interpolate a function. We start, as always, by importing the necessary
# modules.
from numpy import *
import peach as p
from random import randrange
# Let's define the parameters of the simulation. We desire to interpolate a
# period of a cosine, from 20 samples of the function. The lines below create
# the training set.
x = linspace(-pi, pi, 20)
x = x.reshape((20, 1))
y = cos(x)
# Next, we create the network and the corresponding algorithms. We will use a
# 7th-order network to make the interpolation. This must suffice, but it is
# expected that small errors occur, especially near the limits of the interval.
N = 7 # Order of the simulation
# We will use the K-Means algorithm to clusterize the centers. Any other such
# algorithm (such as SOMs or Fuzzy C-Means) could be used. We initialize the
# centers with a reasonable set of values. linearly distributed through the
# interval. Upon calling the K-Means, we receive the clustered centers as a
# result.
km = p.KMeans(x, N)
km.c = linspace(-pi, pi, N)
c = km()
# Here, we initialize the Radial Basis Network. Notice that we don't need to
# pass a lot of parameters to the network -- only the centers, here represented
# by ``c``, are mandatory. The default RBF is the gaussian, and the default
# combiner is Linear, but we could specify different functions using ``phi`` and
# ``phi2`` arguments, respectively. The learning procedure for the second layer
# is the backpropagation (to take care of the situations in which ``phi2`` is
# not Linear) but this can be changed through the ``lrule`` argument.
rbf = p.RBFN(c)
# Now, we present some examples from the training set to the network. Notice
# that we already know where the centers are located, and in the instantiation
# of the RBFN algorithm the widths of the RBFs were computed. You can access the
# widths through the ``width`` property.
i = 0
error = 1.
while i < 5000 and error > 5.e-2:
# We choose randomly one point in the training set
j = randrange(20)
# And feed it to the network.
e = rbf.feed(x[j], y[j])
i = i + 1
# In the end of the training, you can inspect the weights of the second layer by
# using the ``weights`` property. We print them here, for reference:
print rbf.weights
# We will now plot the result. We apply the RBFN in 500 points in the domain
# from -pi to pi. Notice that the result of a neural network is a two-dimension
# array, so we select first line, first column
t = linspace(-pi, pi, 500)
yh = [ ]
for tj in t:
yh.append(rbf(tj)[0, 0])
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``linear-prediction.png``.
try:
import pylab
pylab.grid(True)
pylab.stem(x, y, "k-", "ko", "k-")
pylab.plot(t, yh)
pylab.xlim([ -pi, pi ])
pylab.ylim([ amin(y)-0.1, amax(y)+0.1 ])
pylab.xticks([ -pi, -pi/2, 0., pi/2, pi ])
pylab.gca().set_xticklabels([ r'$-\pi$', r'$-\pi/2$', r'$0$', r'$\pi/2$', r'$\pi$' ])
pylab.savefig("radial-basis-function.png")
except ImportError:
pass | lgpl-2.1 |
beepee14/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
AdiPersonalWorks/ATOS_GOM_SystemPrototyping | GOM_IntelligentTutoring.py | 1 | 2766 | '''
Intelligent Evaluation System
@author : TEAM
last changed: 16-01-2017
Changelogs:
1) 16-01-2017: Created
'''
# Importing required libraries
import csv
import pandas as pd
quest_list = pd.read_csv("QuestionSet_1.csv") # Change path as required
# Remove all NaN here later
# Variable initializations.
# (Change as required)
easy_quest = 0
med_quest = 5
dif_quest = 10
count = 1
correct = 0 # To keep track of the number of correct answers
# Forming the required arrays
all_questions = quest_list.Question
all_option1 = quest_list.Option1
all_option2= quest_list.Option2
all_option3 = quest_list.Option3
all_option4 = quest_list.Option4
all_answers = quest_list.Answer
level = 1
print("Each question has only one correct answer. Please choose from options 1-4")
print("There will be a total of five questions")
print("All the best !")
for i in range(0,5):
# For Easy level questions
if level == 1:
print("\n")
print("Question "+str(count)+"-> "+all_questions[easy_quest])
print("1. "+all_option1[easy_quest])
print("2. "+all_option2[easy_quest])
print("3. "+all_option3[easy_quest])
print("4. "+all_option4[easy_quest])
val = input("-> ")
count = count + 1
if val==all_answers[easy_quest]:
print("Correct Answer!!!")
level = 2
correct = correct+1
easy_quest = easy_quest+1
# For Medium level questions
elif level == 2:
print("\n")
print("Question "+str(count)+"-> "+all_questions[med_quest])
print("1. "+all_option1[med_quest])
print("2. "+all_option2[med_quest])
print("3. "+all_option3[med_quest])
print("4. "+all_option4[med_quest])
val = input("-> ")
count = count + 1
if val==all_answers[med_quest]:
print("Correct Answer!!!")
level = 3
correct = correct+1
else:
level = 1
med_quest = med_quest+1
# For Difficult level questions
elif level == 3:
print("\n")
print("Question "+str(count)+"-> "+all_questions[dif_quest])
print("1. "+all_option1[dif_quest])
print("2. "+all_option2[dif_quest])
print("3. "+all_option3[dif_quest])
print("4. "+all_option4[dif_quest])
val = input("-> ")
count = count + 1
if val==all_answers[dif_quest]:
print("Correct Answer!!!")
level = 3
correct = correct+1
else:
level = 2
dif_quest = dif_quest+1
print("\n")
print("You have scored -> "+str(correct)+"/5")
| mit |
cyhsc/project1 | test.py | 1 | 4170 | import sys
import os
import pandas as pd
import config
from quote import Quote
from analysis import Analysis
from renko_pattern import RenkoPatterns
from finviz import Finviz
from scan import Scan
from tweets import Tweets
from yahoo import Yahoo
from wsj import WSJ
from filter import Filter
DATA_DIR = config.DATA_DIR
ANALYSIS_DIR = config.ANALYSIS_DIR
sym_file = config.TRADABLE_STOCKS
cur_sym_file = config.CUR_SYM
symbol = 'SPY'
#-------------------------------------------------
# Read in all the symbols from file
#-------------------------------------------------
def get_tradable_stocks():
symbols = []
lines = open(sym_file, 'r').read().split('\n')
for line in lines:
if len(line) > 0:
symbols.append(line.split(',')[0])
return symbols
def test1(sym):
q = Quote()
df = q.get(sym, 'nasdaq')
print df
def test2(sym):
df = pd.read_csv(DATA_DIR + sym + '.csv', index_col = 0)
a = Analysis()
a.analysis(sym, df, None)
#a.renko(df)
def test3(sym):
df = pd.read_csv(DATA_DIR + sym + '_renko.csv', index_col = 0)
p = RenkoPatterns()
found, pattern_list = p.pattern_wbw(df)
for item in pattern_list:
print item
print 'Found at last bar is', found
def test4():
fv = Finviz()
fv.get_tradable_stocks()
name, sector, industry = fv.get_classification('CSCO')
print 'Name:', name
print 'Sector:', sector
print 'Industry:', industry
def test5():
symbol_list = []
lines = open(config.TRADABLE_STOCKS, 'r').read().split('\n')
for line in lines:
if len(line) > 0:
symbol_list.append(line)
#symbol_list = ['CSCO']
q = Quote()
a = Analysis()
p = RenkoPatterns()
spy_df = q.get('spy', 'google')
for sym in symbol_list:
df = q.get(sym, 'google')
if df is not None:
a.analysis(sym, df, spy_df)
df.to_csv(DATA_DIR + sym + '.csv')
def test6(sym):
q = Quote()
q.update(sym)
def test7():
sym_list = ['CSCO']
sc = Scan()
sc.run(sym_list)
def test8():
q = Quote()
filelist = [ f for f in os.listdir(DATA_DIR) if f.endswith('.csv') and not f.endswith('_analysis.csv') and not f.endswith('_renko.csv')]
for f in filelist:
sym = f.split('.')[0]
if sym == 'SPY':
continue
print 'Analysing', sym, '....'
df = pd.read_csv(DATA_DIR + f, index_col = 0)
if df.loc['2017-07-31']['open'] == '-' or df.loc['2017-07-31']['high'] == '-' or df.loc['2017-07-31']['low'] == '-' or df.loc['2017-07-31']['close'] == '-':
ndf = q.get(sym, 'nasdaq')
print df.loc['2017-07-31']
print ndf.loc['2017-07-31']
df.replace(df.loc['2017-07-31'], ndf.loc['2017-07-31'], True)
print df.loc['2017-07-31']
print ndf.loc['2017-07-31']
df.to_csv(DATA_DIR + sym + '.csv')
def test9():
sc = Scan()
sym_list = sc.ibd_watch_list('ibd 50')
sc.run(sym_list)
def test10():
q = Quote()
filelist = [ f for f in os.listdir(DATA_DIR) if f.endswith('.csv') and not f.endswith('_analysis.csv') and not f.endswith('_renko.csv')]
for f in filelist:
sym = f.split('.')[0]
print 'Sanitizing', sym, '....'
q.sanitize(sym)
def test11():
f = Filter()
f.run()
# ==============================================================================
# Main
# ==============================================================================
def main(argv):
# --------------------------------------------------------------
# Set Pandas print option to print all rows and columns
# --------------------------------------------------------------
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 1000)
if not os.path.isdir(DATA_DIR):
os.makedirs(DATA_DIR)
#test1(symbol)
#test2('FIZZ')
#test3(symbol)
#test4()
#test5()
#test6('CSCO')
#test7()
#test8()
#test9()
#test10()
test11()
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 |
errantlinguist/tangrams-restricted | analysis/scripts/write_target_ref_utts.py | 1 | 10210 | #!/usr/bin/env python3
"""
Reads in event and utterance files for a directory of sessions and prints the utterance information alongside the target referent entity information on the same row to the standard output stream.
Processes the utterances in the following order:
1. Removes empty utterances, i.e. those only containing metalanguage tokens
2. Removes non-instructor utterances
3. Merges all utterances from a single speaker in a given round into one
"""
__author__ = "Todd Shore <[email protected]>"
__copyright__ = "Copyright (C) 2016-2017 Todd Shore"
__license__ = "GNU General Public License, Version 3"
import argparse
import csv
import itertools
import logging
import sys
from typing import Any, Iterable, Sequence, Tuple, TypeVar
import packaging.version
import pandas as pd
from tangrams_analysis import session_data as sd, utterances
DYAD_COL_NAME = "DYAD"
OUTPUT_FILE_DIALECT = csv.excel_tab
OUTPUT_FILE_ENCODING = "utf-8"
T = TypeVar("T")
class SessionRoundTokenTypeSetDataFrameFactory(object):
@staticmethod
def __create_summary_row(desc: str, datum: Tuple[int, ...]) -> Tuple[Any, ...]:
return tuple(itertools.chain((desc,), datum))
def __init__(self, utt_reader: utterances.UtteranceTabularDataReader):
self.utt_reader = utt_reader
self.orig_utt_df_shape_union = (0, 0)
self.nonempty_utt_df_shape_union = (0, 0)
self.instructor_utt_df_shape_union = (0, 0)
self.speaker_utt_df_shape_union = (0, 0)
self.unmerged_utt_df_shape_union = (0, 0)
def __call__(self, session_data: sd.SessionData) -> pd.DataFrame:
session_name = session_data.name
print("Reading events and utterances for \"{}\".".format(session_name), file=sys.stderr)
events_df = session_data.read_events()
events_df = remove_nonreferent_entity_utts(events_df)
events_df[DYAD_COL_NAME] = session_name
utts_df = self.__read_utts(session_data.utts)
utts_df = self.__remove_empty_utts(utts_df)
utts_df = self.__remove_noninstructor_utts(utts_df)
utts_df = self.__merge_speaker_utts(utts_df)
# Do an inner merge with the events dataframe so that utterances without events (e.g. utterances in the pre-game round "0") are not included
result = events_df.merge(utts_df, how="inner", left_on=sd.EventDataColumn.ROUND_ID.value,
right_on=utterances.UtteranceTabularDataColumn.ROUND_ID.value)
unmerged_rounds = frozenset(utts_df[utterances.UtteranceTabularDataColumn.ROUND_ID.value].unique()) - frozenset(
result[sd.EventDataColumn.ROUND_ID.value].unique())
logging.debug("Merged events and utterance dataframes. Rounds with utts which were not merged: %s",
", ".join(sorted(str(round_id) for round_id in unmerged_rounds)))
unmerged_utt_rows = utts_df.loc[
utts_df[utterances.UtteranceTabularDataColumn.ROUND_ID.value].isin(unmerged_rounds)]
assert not unmerged_utt_rows.empty == bool(unmerged_rounds)
self.unmerged_utt_df_shape_union = shape_union(self.unmerged_utt_df_shape_union, unmerged_utt_rows.shape)
assert result.loc[result[sd.EventDataColumn.ROUND_ID.value] < 1].empty
return result
def create_summary_rows(self):
result = (
("DESC", "ROWS", "COLS"),
self.__create_summary_row("Original DF", self.orig_utt_df_shape_union),
self.__create_summary_row("After removing empty utterances", self.nonempty_utt_df_shape_union),
self.__create_summary_row("After removing non-instructor utterances", self.instructor_utt_df_shape_union),
self.__create_summary_row("After merging speaker utterances", self.speaker_utt_df_shape_union),
self.__create_summary_row("Unmerged utterance DF", self.unmerged_utt_df_shape_union),
)
assert len(frozenset(len(row) for row in result)) == 1
return result
def __merge_speaker_utts(self, utts_df: pd.DataFrame) -> pd.DataFrame:
logging.debug("Merging speaker utterances.")
old_shape = utts_df.shape
result = utterances.merge_speaker_utts(utts_df)
new_shape = result.shape
logging.debug("Row count after merging speaker utterances: %d; Diff: %d", new_shape[0],
old_shape[0] - new_shape[0])
self.speaker_utt_df_shape_union = shape_union(self.speaker_utt_df_shape_union, new_shape)
return result
def __read_utts(self, infile_path: str):
logging.debug("Reading utterance data from \"%s\".", infile_path)
result = self.utt_reader(infile_path)
shape = result.shape
logging.debug("Read utterances with shape %s.", shape)
self.orig_utt_df_shape_union = shape_union(self.orig_utt_df_shape_union, shape)
return result
def __remove_empty_utts(self, utts_df: pd.DataFrame) -> pd.DataFrame:
logging.debug("Removing empty utterances from dataframe.")
old_shape = utts_df.shape
result = utts_df.loc[
utts_df[utterances.UtteranceTabularDataColumn.TOKEN_SEQ.value].str.len() > 0]
new_shape = result.shape
logging.debug("Row count after removing empty utterances: %d; Diff: %d", new_shape[0],
old_shape[0] - new_shape[0])
self.nonempty_utt_df_shape_union = shape_union(self.nonempty_utt_df_shape_union, new_shape)
return result
def __remove_noninstructor_utts(self, utts_df: pd.DataFrame) -> pd.DataFrame:
logging.debug("Removing non-instructor utterances from dataframe.")
old_shape = utts_df.shape
result = utts_df.loc[
utts_df[utterances.UtteranceTabularDataColumn.DIALOGUE_ROLE.value] == "INSTRUCTOR"]
new_shape = result.shape
logging.debug("Row count after removing non-instructor utterances: %d; Diff: %d", new_shape[0],
old_shape[0] - new_shape[0])
self.instructor_utt_df_shape_union = shape_union(self.instructor_utt_df_shape_union, new_shape)
return result
def remove_nonreferent_entity_utts(events_df: pd.DataFrame) -> pd.DataFrame:
logging.debug("Removing non-referent, non-new-turn-request rows.")
orig_shape = events_df.shape
result = events_df.loc[(events_df[sd.EventDataColumn.REFERENT_ENTITY.value] == True) & (
events_df[sd.EventDataColumn.EVENT_NAME.value] == "nextturn.request")]
new_shape = result.shape
logging.debug("Removed %d non-referent, non new-turn-request entity rows; New shape is %s.",
orig_shape[0] - new_shape[0], new_shape)
return result
def shape_union(s1: Tuple[int, int], s2: Tuple[int, int]) -> Tuple[int, int]:
assert len(s1) == 2
assert len(s2) == 2
return s1[0] + s2[0], max(s1[1], s2[1])
def sort_cols(df: pd.DataFrame) -> pd.DataFrame:
partial_ordering = (
DYAD_COL_NAME, sd.EventDataColumn.ROUND_ID.value, sd.EventDataColumn.SCORE.value,
sd.EventDataColumn.EVENT_ID.value,
sd.EventDataColumn.EVENT_NAME.value, sd.EventDataColumn.EVENT_TIME.value, sd.EventDataColumn.SUBMITTER.value,
sd.EventDataColumn.ENTITY_ID.value, utterances.UtteranceTabularDataColumn.SPEAKER_ID.value,
utterances.UtteranceTabularDataColumn.DIALOGUE_ROLE.value,
utterances.UtteranceTabularDataColumn.START_TIME.value,
utterances.UtteranceTabularDataColumn.END_TIME.value,
utterances.UtteranceTabularDataColumn.TOKEN_SEQ.value)
return __reindex_cols(df, partial_ordering)
def __create_str_repr(token_seq: Iterable[str]) -> str:
return " ".join(token_seq)
def __element_order(elem: T, ordering: Sequence[T]) -> int:
try:
result = ordering.index(elem)
except ValueError:
result = len(ordering)
return result
def __reindex_cols_old_api(df: pd.DataFrame, partial_ordering: Sequence[str]) -> pd.DataFrame:
return df.reindex_axis(sorted(df.columns, key=lambda col_name: __element_order(col_name, partial_ordering)), axis=1,
copy=False)
def __reindex_cols_new_api(df: pd.DataFrame, partial_ordering: Sequence[str]) -> pd.DataFrame:
return df.reindex(sorted(df.columns, key=lambda col_name: __element_order(col_name, partial_ordering)), axis=1,
copy=False)
__PANDAS_API_BREAKING_VERSION = "0.21.0"
if packaging.version.parse(pd.__version__) < packaging.version.parse(__PANDAS_API_BREAKING_VERSION):
logging.warning("Version of installed pandas (\"%s\") is older than expected (\"%s\").", pd.__version__,
__PANDAS_API_BREAKING_VERSION)
__reindex_cols = __reindex_cols_old_api
else:
__reindex_cols = __reindex_cols_new_api
def __create_argparser() -> argparse.ArgumentParser:
result = argparse.ArgumentParser(
description="Reads in event and utterance files for a directory of sessions and prints the utterance information alongside the target referent entity information on the same row to the standard output stream.")
result.add_argument("inpaths", metavar="INPATH", nargs='+',
help="The paths to search for sessions to process.")
return result
def __main(args):
print("Will remove non-instructor utterances and concatenate speaker utterances.", file=sys.stderr)
df_factory = SessionRoundTokenTypeSetDataFrameFactory(utterances.UtteranceTabularDataReader())
inpaths = args.inpaths
print("Looking for session data underneath {}.".format(inpaths), file=sys.stderr)
session_utt_df = pd.concat(df_factory(session_data) for _, session_data in sd.walk_session_data(inpaths))
print("DF shape is {}; {} unique dyad(s).".format(session_utt_df.shape,
session_utt_df[
DYAD_COL_NAME].nunique()),
file=sys.stderr)
print("Utterance dataframe summary:", file=sys.stderr)
# https://pythonconquerstheuniverse.wordpress.com/2011/05/08/newline-conversion-in-python-3/
summary_writer = csv.writer(sys.stderr, dialect=OUTPUT_FILE_DIALECT, lineterminator="\n")
summary_writer.writerows(df_factory.create_summary_rows())
session_utt_df[utterances.UtteranceTabularDataColumn.TOKEN_SEQ.value] = session_utt_df[
utterances.UtteranceTabularDataColumn.TOKEN_SEQ.value].transform(__create_str_repr)
# Ensure that rows are sorted in order of which round they are for and their chronological ordering withing each round, sorting finally by dialogue role as a tiebreaker
session_utt_df.sort_values(
by=[DYAD_COL_NAME, sd.EventDataColumn.ROUND_ID.value, utterances.UtteranceTabularDataColumn.START_TIME.value,
utterances.UtteranceTabularDataColumn.END_TIME.value,
utterances.UtteranceTabularDataColumn.DIALOGUE_ROLE.value], inplace=True)
session_utt_df = sort_cols(session_utt_df)
session_utt_df.to_csv(sys.stdout, sep=OUTPUT_FILE_DIALECT.delimiter, encoding=OUTPUT_FILE_ENCODING, index=False)
if __name__ == "__main__":
__main(__create_argparser().parse_args())
| gpl-3.0 |
empireryan/director | src/python/ddapp/segmentation.py | 4 | 160980 | import os
import sys
import math
import vtk
import colorsys
import time
import functools
import traceback
import PythonQt
from PythonQt import QtCore, QtGui
import ddapp.applogic as app
from ddapp import objectmodel as om
from ddapp import perception
from ddapp import lcmUtils
from ddapp import roboturdf
from ddapp import transformUtils
from ddapp import visualization as vis
from ddapp.transformUtils import getTransformFromAxes
from ddapp.timercallback import TimerCallback
from ddapp import mapsregistrar
from ddapp import affordancemanager
from ddapp.affordanceitems import *
from ddapp.visualization import *
from ddapp.filterUtils import *
from ddapp.fieldcontainer import FieldContainer
from ddapp.segmentationroutines import *
from ddapp import cameraview
import numpy as np
import vtkNumpy
from debugVis import DebugData
from shallowCopy import shallowCopy
import ioUtils
from ddapp.uuidutil import newUUID
import drc as lcmdrc
import bot_core as lcmbotcore
import vs as lcmvs
from ddapp import lcmUtils
DRILL_TRIANGLE_BOTTOM_LEFT = 'bottom left'
DRILL_TRIANGLE_BOTTOM_RIGHT = 'bottom right'
DRILL_TRIANGLE_TOP_LEFT = 'top left'
DRILL_TRIANGLE_TOP_RIGHT = 'top right'
# using drc plane segmentation instead of PCL
planeSegmentationFilter = vtk.vtkPlaneSegmentation
#planeSegmentationFilter = vtk.vtkPCLSACSegmentationPlane
_defaultSegmentationView = None
def getSegmentationView():
return _defaultSegmentationView or app.getViewManager().findView('Segmentation View')
def getDRCView():
return app.getDRCView()
def switchToView(viewName):
app.getViewManager().switchToView(viewName)
def getCurrentView():
return app.getCurrentRenderView()
def initAffordanceManager(view):
'''
Normally the affordance manager is initialized by the application.
This function can be called from scripts and tests to initialize the manager.
'''
global affordanceManager
affordanceManager = affordancemanager.AffordanceObjectModelManager(view)
def cropToLineSegment(polyData, point1, point2):
line = np.array(point2) - np.array(point1)
length = np.linalg.norm(line)
axis = line / length
polyData = labelPointDistanceAlongAxis(polyData, axis, origin=point1, resultArrayName='dist_along_line')
return thresholdPoints(polyData, 'dist_along_line', [0.0, length])
'''
icp programmable filter
import vtkFiltersGeneralPython as filtersGeneral
points = inputs[0]
block = inputs[1]
print points.GetNumberOfPoints()
print block.GetNumberOfPoints()
if points.GetNumberOfPoints() < block.GetNumberOfPoints():
block, points = points, block
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(points.VTKObject)
icp.SetTarget(block.VTKObject)
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.Update()
t = filtersGeneral.vtkTransformPolyDataFilter()
t.SetInput(points.VTKObject)
t.SetTransform(icp)
t.Update()
output.ShallowCopy(t.GetOutput())
'''
def lockAffordanceToHand(aff, hand='l_hand'):
linkFrame = getLinkFrame(hand)
affT = aff.actor.GetUserTransform()
if not hasattr(aff, 'handToAffT') or not aff.handToAffT:
aff.handToAffT = computeAToB(linkFrame, affT)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(aff.handToAffT)
t.Concatenate(linkFrame)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
handAffUpdater = None
def lockToHandOn():
aff = getDefaultAffordanceObject()
if not aff:
return
global handAffUpdater
if handAffUpdater is None:
handAffUpdater = TimerCallback()
handAffUpdater.targetFps = 30
handAffUpdater.callback = functools.partial(lockAffordanceToHand, aff)
handAffUpdater.start()
def lockToHandOff():
aff = getDefaultAffordanceObject()
if not aff:
return
handAffUpdater.stop()
aff.handToAffT = None
class DisparityPointCloudItem(vis.PolyDataItem):
def __init__(self, name, imagesChannel, cameraName, imageManager):
vis.PolyDataItem.__init__(self, name, vtk.vtkPolyData(), view=None)
self.addProperty('Channel', imagesChannel)
self.addProperty('Camera name', cameraName)
self.addProperty('Decimation', 0, attributes=om.PropertyAttributes(enumNames=['1', '2', '4', '8', '16']))
self.addProperty('Remove Size', 1000, attributes=om.PropertyAttributes(decimals=0, minimum=0, maximum=100000.0, singleStep=1000))
self.addProperty('Target FPS', 1.0, attributes=om.PropertyAttributes(decimals=1, minimum=0.1, maximum=30.0, singleStep=0.1))
self.timer = TimerCallback()
self.timer.callback = self.update
self.lastUtime = 0
self.imageManager = imageManager
self.cameraName = cameraName
self.setProperty('Visible', False)
def _onPropertyChanged(self, propertySet, propertyName):
vis.PolyDataItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Visible':
if self.getProperty(propertyName):
self.timer.start()
else:
self.timer.stop()
elif propertyName in ('Decimation', 'Remove outliers'):
self.lastUtime = 0
def onRemoveFromObjectModel(self):
vis.PolyDataItem.onRemoveFromObjectModel(self)
self.timer.stop()
def update(self):
utime = self.imageManager.queue.getCurrentImageTime(self.cameraName)
if utime == self.lastUtime:
return
if (utime < self.lastUtime ):
temp=0 # dummy
elif (utime - self.lastUtime < 1E6/self.getProperty('Target FPS')):
return
decimation = int(self.properties.getPropertyEnumValue('Decimation'))
removeSize = int(self.properties.getProperty('Remove Size'))
polyData = getDisparityPointCloud(decimation, imagesChannel=self.getProperty('Channel'), cameraName=self.getProperty('Camera name'),
removeOutliers=False, removeSize=removeSize)
self.setPolyData(polyData)
if not self.lastUtime:
self.setProperty('Color By', 'rgb_colors')
self.lastUtime = utime
def getRandomColor():
'''
Return a random color as a list of RGB values between 0.0 and 1.0.
'''
return colorsys.hsv_to_rgb(np.random.rand(), 1.0, 0.9)
def extractLargestCluster(polyData, minClusterSize=100):
polyData = applyEuclideanClustering(polyData, minClusterSize=minClusterSize)
return thresholdPoints(polyData, 'cluster_labels', [1, 1])
def segmentGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
''' A More complex ground removal algorithm. Works when plane isn't
preceisely flat. First clusters on z to find approx ground height, then fits a plane there
'''
searchRegionThickness = 0.5
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
groundHeight = np.percentile(zvalues, 5)
vtkNumpy.addNumpyToVtk(polyData, zvalues.copy(), 'z')
searchRegion = thresholdPoints(polyData, 'z', [groundHeight - searchRegionThickness/2.0, groundHeight + searchRegionThickness/2.0])
updatePolyData(searchRegion, 'ground search region', parent=getDebugFolder(), colorByName='z', visible=False)
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=[0,0,1], perpendicularAxis=[0,0,1], returnOrigin=True)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-groundThickness/2.0, groundThickness/2.0])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [sceneHeightFromGround, 100])
return origin, normal, groundPoints, scenePoints
def segmentGroundPlane():
inputObj = om.findObjectByName('pointcloud snapshot')
inputObj.setProperty('Visible', False)
polyData = shallowCopy(inputObj.polyData)
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
groundHeight = np.percentile(zvalues, 5)
searchRegion = thresholdPoints(polyData, 'z', [groundHeight - 0.3, groundHeight + 0.3])
updatePolyData(searchRegion, 'ground search region', parent=getDebugFolder(), colorByName='z', visible=False)
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=[0,0,1], perpendicularAxis=[0,0,1], returnOrigin=True)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [0.05, 10])
updatePolyData(groundPoints, 'ground points', alpha=0.3)
updatePolyData(scenePoints, 'scene points', alpha=0.3)
#scenePoints = applyEuclideanClustering(scenePoints, clusterTolerance=0.10, minClusterSize=100, maxClusterSize=1e6)
#updatePolyData(scenePoints, 'scene points', colorByName='cluster_labels')
def applyLocalPlaneFit(polyData, searchPoint, searchRadius, searchRadiusEnd=None, removeGroundFirst=True):
useVoxelGrid = True
voxelGridSize = 0.03
distanceToPlaneThreshold = 0.02
if useVoxelGrid:
polyData = applyVoxelGrid(polyData, leafSize=voxelGridSize)
if removeGroundFirst:
_, polyData = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.04)
cropped = cropToSphere(polyData, searchPoint, searchRadius)
updatePolyData(cropped, 'crop to sphere', visible=False, colorByName='distance_to_point')
polyData, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, searchOrigin=searchPoint, searchRadius=searchRadius)
if searchRadiusEnd is not None:
polyData, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, perpendicularAxis=normal, angleEpsilon=math.radians(30), searchOrigin=searchPoint, searchRadius=searchRadiusEnd)
fitPoints = thresholdPoints(polyData, 'dist_to_plane', [-distanceToPlaneThreshold, distanceToPlaneThreshold])
updatePolyData(fitPoints, 'fitPoints', visible=False)
fitPoints = labelDistanceToPoint(fitPoints, searchPoint)
clusters = extractClusters(fitPoints, clusterTolerance=0.05, minClusterSize=3)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
fitPoints = clusters[0]
return fitPoints, normal
normalEstimationSearchRadius = 0.065
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(polyData)
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotPlaneNormal = np.abs(np.dot(normals, normal))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotPlaneNormal, 'normals_dot_plane_normal')
showPolyData(scenePoints, 'scene_with_normals', parent=getDebugFolder(), colorByName='normals_dot_plane_normal')
surfaces = thresholdPoints(scenePoints, 'normals_dot_plane_normal', [0.95, 1.0])
clusters = extractClusters(surfaces, clusterTolerance=0.1, minClusterSize=5)
clusters = clusters[:10]
for i, cluster in enumerate(clusters):
showPolyData(cluster, 'plane cluster %i' % i, parent=getDebugFolder(), visible=False)
return fitPoints
def orientToMajorPlane(polyData, pickedPoint):
'''
Find the largest plane and transform the cloud to align that plane
Use the given point as the origin
'''
distanceToPlaneThreshold=0.02
searchRadius = 0.5
planePoints, origin, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, searchOrigin=pickedPoint, searchRadius=searchRadius, returnOrigin=True)
vis.updatePolyData(planePoints, 'local plane fit', color=[0,1,0], parent=getDebugFolder(), visible=False)
planeFrame = transformUtils.getTransformFromOriginAndNormal(pickedPoint, normal)
vis.updateFrame(planeFrame, 'plane frame', scale=0.15, parent=getDebugFolder(), visible=False)
polyData = transformPolyData(polyData, planeFrame.GetLinearInverse() )
# if the mean point is below the horizontal plane, flip the cloud
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
midCloudHeight = np.mean(zvalues)
if (midCloudHeight < 0):
flipTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,180,0])
polyData = transformPolyData(polyData, flipTransform )
return polyData, planeFrame
def getMajorPlanes(polyData, useVoxelGrid=True):
voxelGridSize = 0.01
distanceToPlaneThreshold = 0.02
if useVoxelGrid:
polyData = applyVoxelGrid(polyData, leafSize=voxelGridSize)
polyDataList = []
minClusterSize = 100
while len(polyDataList) < 25:
f = planeSegmentationFilter()
f.SetInput(polyData)
f.SetDistanceThreshold(distanceToPlaneThreshold)
f.Update()
polyData = shallowCopy(f.GetOutput())
outliers = thresholdPoints(polyData, 'ransac_labels', [0, 0])
inliers = thresholdPoints(polyData, 'ransac_labels', [1, 1])
largestCluster = extractLargestCluster(inliers)
#i = len(polyDataList)
#showPolyData(inliers, 'inliers %d' % i, color=getRandomColor(), parent='major planes')
#showPolyData(outliers, 'outliers %d' % i, color=getRandomColor(), parent='major planes')
#showPolyData(largestCluster, 'cluster %d' % i, color=getRandomColor(), parent='major planes')
if largestCluster.GetNumberOfPoints() > minClusterSize:
polyDataList.append(largestCluster)
polyData = outliers
else:
break
return polyDataList
def showMajorPlanes(polyData=None):
if not polyData:
inputObj = om.findObjectByName('pointcloud snapshot')
inputObj.setProperty('Visible', False)
polyData = inputObj.polyData
om.removeFromObjectModel(om.findObjectByName('major planes'))
folderObj = om.findObjectByName('segmentation')
folderObj = om.getOrCreateContainer('major planes', folderObj)
origin = SegmentationContext.getGlobalInstance().getViewFrame().GetPosition()
polyData = labelDistanceToPoint(polyData, origin)
polyData = thresholdPoints(polyData, 'distance_to_point', [1, 4])
polyDataList = getMajorPlanes(polyData)
for i, polyData in enumerate(polyDataList):
obj = showPolyData(polyData, 'plane %d' % i, color=getRandomColor(), visible=True, parent='major planes')
obj.setProperty('Point Size', 3)
def cropToBox(polyData, transform, dimensions):
'''
dimensions is length 3 describing box dimensions
'''
origin = np.array(transform.GetPosition())
axes = transformUtils.getAxesFromTransform(transform)
for axis, length in zip(axes, dimensions):
cropAxis = np.array(axis)*(length/2.0)
polyData = cropToLineSegment(polyData, origin - cropAxis, origin + cropAxis)
return polyData
def cropToBounds(polyData, transform, bounds):
'''
bounds is a 2x3 containing the min/max values along the transform axes to use for cropping
'''
origin = np.array(transform.GetPosition())
axes = transformUtils.getAxesFromTransform(transform)
for axis, bound in zip(axes, bounds):
axis = np.array(axis)/np.linalg.norm(axis)
polyData = cropToLineSegment(polyData, origin + axis*bound[0], origin + axis*bound[1])
return polyData
def cropToSphere(polyData, origin, radius):
polyData = labelDistanceToPoint(polyData, origin)
return thresholdPoints(polyData, 'distance_to_point', [0, radius])
def applyPlaneFit(polyData, distanceThreshold=0.02, expectedNormal=None, perpendicularAxis=None, angleEpsilon=0.2, returnOrigin=False, searchOrigin=None, searchRadius=None):
expectedNormal = expectedNormal if expectedNormal is not None else [-1,0,0]
fitInput = polyData
if searchOrigin is not None:
assert searchRadius
fitInput = cropToSphere(fitInput, searchOrigin, searchRadius)
# perform plane segmentation
f = planeSegmentationFilter()
f.SetInput(fitInput)
f.SetDistanceThreshold(distanceThreshold)
if perpendicularAxis is not None:
f.SetPerpendicularConstraintEnabled(True)
f.SetPerpendicularAxis(perpendicularAxis)
f.SetAngleEpsilon(angleEpsilon)
f.Update()
origin = f.GetPlaneOrigin()
normal = np.array(f.GetPlaneNormal())
# flip the normal if needed
if np.dot(normal, expectedNormal) < 0:
normal = -normal
# for each point, compute signed distance to plane
polyData = shallowCopy(polyData)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
if returnOrigin:
return polyData, origin, normal
else:
return polyData, normal
def flipNormalsWithViewDirection(polyData, viewDirection):
normals = vnp.getNumpyFromVtk(polyData, 'normals')
normals[np.dot(normals, viewDirection) > 0] *= -1
def normalEstimation(dataObj, searchCloud=None, searchRadius=0.05, useVoxelGrid=False, voxelGridLeafSize=0.05):
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(searchRadius)
f.SetInput(dataObj)
if searchCloud:
f.SetInput(1, searchCloud)
elif useVoxelGrid:
f.SetInput(1, applyVoxelGrid(dataObj, voxelGridLeafSize))
f.Update()
dataObj = shallowCopy(f.GetOutput())
dataObj.GetPointData().SetNormals(dataObj.GetPointData().GetArray('normals'))
return dataObj
def addCoordArraysToPolyData(polyData):
polyData = shallowCopy(polyData)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
vtkNumpy.addNumpyToVtk(polyData, points[:,0].copy(), 'x')
vtkNumpy.addNumpyToVtk(polyData, points[:,1].copy(), 'y')
vtkNumpy.addNumpyToVtk(polyData, points[:,2].copy(), 'z')
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
viewOrigin = viewFrame.TransformPoint([0.0, 0.0, 0.0])
viewX = viewFrame.TransformVector([1.0, 0.0, 0.0])
viewY = viewFrame.TransformVector([0.0, 1.0, 0.0])
viewZ = viewFrame.TransformVector([0.0, 0.0, 1.0])
polyData = labelPointDistanceAlongAxis(polyData, viewX, origin=viewOrigin, resultArrayName='distance_along_view_x')
polyData = labelPointDistanceAlongAxis(polyData, viewY, origin=viewOrigin, resultArrayName='distance_along_view_y')
polyData = labelPointDistanceAlongAxis(polyData, viewZ, origin=viewOrigin, resultArrayName='distance_along_view_z')
return polyData
def getDebugRevolutionData():
#dataDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../drc-data'))
#filename = os.path.join(dataDir, 'valve_wall.vtp')
#filename = os.path.join(dataDir, 'bungie_valve.vtp')
#filename = os.path.join(dataDir, 'cinder-blocks.vtp')
#filename = os.path.join(dataDir, 'cylinder_table.vtp')
#filename = os.path.join(dataDir, 'firehose.vtp')
#filename = os.path.join(dataDir, 'debris.vtp')
#filename = os.path.join(dataDir, 'rev1.vtp')
#filename = os.path.join(dataDir, 'drill-in-hand.vtp')
filename = os.path.expanduser('~/Desktop/scans/debris-scan.vtp')
return addCoordArraysToPolyData(ioUtils.readPolyData(filename))
def getCurrentScanBundle():
obj = om.findObjectByName('SCANS_HALF_SWEEP')
if not obj:
return None
revPolyData = obj.polyData
if not revPolyData or not revPolyData.GetNumberOfPoints():
return None
if useVoxelGrid:
revPolyData = applyVoxelGrid(revPolyData, leafSize=0.015)
return addCoordArraysToPolyData(revPolyData)
def getCurrentRevolutionData():
revPolyData = perception._multisenseItem.model.revPolyData
if not revPolyData or not revPolyData.GetNumberOfPoints():
return getCurrentScanBundle()
if useVoxelGrid:
revPolyData = applyVoxelGrid(revPolyData, leafSize=0.015)
return addCoordArraysToPolyData(revPolyData)
def getDisparityPointCloud(decimation=4, removeOutliers=True, removeSize=0, imagesChannel='CAMERA', cameraName='CAMERA_LEFT'):
p = cameraview.getStereoPointCloud(decimation, imagesChannel=imagesChannel, cameraName=cameraName, removeSize=removeSize)
if not p:
return None
if removeOutliers:
# attempt to scale outlier filtering, best tuned for decimation of 2 or 4
scaling = (10*16)/(decimation*decimation)
p = labelOutliers(p, searchRadius=0.06, neighborsInSearchRadius=scaling)
p = thresholdPoints(p, 'is_outlier', [0.0, 0.0])
return p
def getCurrentMapServerData():
mapServer = om.findObjectByName('Map Server')
polyData = None
if mapServer and mapServer.getProperty('Visible'):
polyData = mapServer.source.polyData
if not polyData or not polyData.GetNumberOfPoints():
return None
return addCoordArraysToPolyData(polyData)
useVoxelGrid = False
def segmentGroundPlanes():
objs = []
for obj in om.getObjects():
name = obj.getProperty('Name')
if name.startswith('pointcloud snapshot'):
objs.append(obj)
objs = sorted(objs, key=lambda x: x.getProperty('Name'))
d = DebugData()
prevHeadAxis = None
for obj in objs:
name = obj.getProperty('Name')
print '----- %s---------' % name
print 'head axis:', obj.headAxis
origin, normal, groundPoints, _ = segmentGround(obj.polyData)
print 'ground normal:', normal
showPolyData(groundPoints, name + ' ground points', visible=False)
a = np.array([0,0,1])
b = np.array(normal)
diff = math.degrees(math.acos(np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))))
if diff > 90:
print 180 - diff
else:
print diff
if prevHeadAxis is not None:
a = prevHeadAxis
b = np.array(obj.headAxis)
diff = math.degrees(math.acos(np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))))
if diff > 90:
print 180 - diff
else:
print diff
prevHeadAxis = np.array(obj.headAxis)
d.addLine([0,0,0], normal)
updatePolyData(d.getPolyData(), 'normals')
def extractCircle(polyData, distanceThreshold=0.04, radiusLimit=None):
circleFit = vtk.vtkPCLSACSegmentationCircle()
circleFit.SetDistanceThreshold(distanceThreshold)
circleFit.SetInput(polyData)
if radiusLimit is not None:
circleFit.SetRadiusLimit(radiusLimit)
circleFit.SetRadiusConstraintEnabled(True)
circleFit.Update()
polyData = thresholdPoints(circleFit.GetOutput(), 'ransac_labels', [1.0, 1.0])
return polyData, circleFit
def removeMajorPlane(polyData, distanceThreshold=0.02):
# perform plane segmentation
f = planeSegmentationFilter()
f.SetInput(polyData)
f.SetDistanceThreshold(distanceThreshold)
f.Update()
polyData = thresholdPoints(f.GetOutput(), 'ransac_labels', [0.0, 0.0])
return polyData, f
def removeGroundSimple(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
''' Simple ground plane removal algorithm. Uses ground height
and does simple z distance filtering.
Suitable for noisy data e.g. kinect/stereo camera
(Default args should be relaxed, filtering simplfied)
'''
groundHeight = SegmentationContext.getGlobalInstance().getGroundHeight()
origin = [0, 0, groundHeight]
normal = [0, 0, 1]
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-groundThickness/2.0, groundThickness/2.0])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [sceneHeightFromGround, 100])
return groundPoints, scenePoints
def removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
origin, normal, groundPoints, scenePoints = segmentGround(polyData, groundThickness, sceneHeightFromGround)
return groundPoints, scenePoints
def generateFeetForValve():
aff = om.findObjectByName('valve affordance')
assert aff
params = aff.params
origin = np.array(params['origin'])
origin[2] = 0.0
xaxis = -params['axis']
zaxis = np.array([0,0,1])
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
stanceWidth = 0.2
stanceRotation = 25.0
stanceOffset = [-1.0, -0.5, 0.0]
valveFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
valveFrame.PostMultiply()
valveFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(valveFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(boardFrame, 'board ground frame', parent=aff, scale=0.15, visible=False)
showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
#d = DebugData()
#d.addLine(valveFrame.GetPosition(), stanceFrame.GetPosition())
#updatePolyData(d.getPolyData(), 'stance debug')
#publishSteppingGoal(lfootFrame, rfootFrame)
def generateFeetForDebris():
aff = om.findObjectByName('board A')
if not aff:
return
params = aff.params
origin = np.array(params['origin'])
origin = origin + params['zaxis']*params['zwidth']/2.0 - params['xaxis']*params['xwidth']/2.0
origin[2] = 0.0
yaxis = params['zaxis']
zaxis = np.array([0,0,1])
xaxis = np.cross(yaxis, zaxis)
stanceWidth = 0.35
stanceRotation = 0.0
stanceOffset = [-0.48, -0.08, 0]
boardFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
boardFrame.PostMultiply()
boardFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(boardFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(boardFrame, 'board ground frame', parent=aff, scale=0.15, visible=False)
lfoot = showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
rfoot = showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
for obj in [lfoot, rfoot]:
obj.addToView(app.getDRCView())
#d = DebugData()
#d.addLine(valveFrame.GetPosition(), stanceFrame.GetPosition())
#updatePolyData(d.getPolyData(), 'stance debug')
#publishSteppingGoal(lfootFrame, rfootFrame)
def generateFeetForWye():
aff = om.findObjectByName('wye points')
if not aff:
return
params = aff.params
origin = np.array(params['origin'])
origin[2] = 0.0
yaxis = params['xaxis']
xaxis = -params['zaxis']
zaxis = np.cross(xaxis, yaxis)
stanceWidth = 0.20
stanceRotation = 0.0
stanceOffset = [-0.48, -0.08, 0]
affGroundFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
affGroundFrame.PostMultiply()
affGroundFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(affGroundFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(affGroundFrame, 'affordance ground frame', parent=aff, scale=0.15, visible=False)
lfoot = showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
rfoot = showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
for obj in [lfoot, rfoot]:
obj.addToView(app.getDRCView())
def getFootFramesFromReferenceFrame(referenceFrame, stanceWidth, stanceRotation, stanceOffset):
footHeight=0.0745342
ref = vtk.vtkTransform()
ref.SetMatrix(referenceFrame.GetMatrix())
stanceFrame = vtk.vtkTransform()
stanceFrame.PostMultiply()
stanceFrame.RotateZ(stanceRotation)
stanceFrame.Translate(stanceOffset)
stanceFrame.Concatenate(ref)
lfootFrame = vtk.vtkTransform()
lfootFrame.PostMultiply()
lfootFrame.Translate(0, stanceWidth/2.0, footHeight)
lfootFrame.Concatenate(stanceFrame)
rfootFrame = vtk.vtkTransform()
rfootFrame.PostMultiply()
rfootFrame.Translate(0, -stanceWidth/2.0, footHeight)
rfootFrame.Concatenate(stanceFrame)
return stanceFrame, lfootFrame, rfootFrame
def poseFromFrame(frame):
trans = lcmdrc.vector_3d_t()
trans.x, trans.y, trans.z = frame.GetPosition()
wxyz = range(4)
perception.drc.vtkMultisenseSource.GetBotQuaternion(frame, wxyz)
quat = lcmdrc.quaternion_t()
quat.w, quat.x, quat.y, quat.z = wxyz
pose = lcmdrc.position_3d_t()
pose.translation = trans
pose.rotation = quat
return pose
def cropToPlane(polyData, origin, normal, threshold):
polyData = shallowCopy(polyData)
normal = normal/np.linalg.norm(normal)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
cropped = thresholdPoints(polyData, 'dist_to_plane', threshold)
return cropped, polyData
def createLine(blockDimensions, p1, p2):
sliceWidth = np.array(blockDimensions).max()/2.0 + 0.02
sliceThreshold = [-sliceWidth, sliceWidth]
# require p1 to be point on left
if p1[0] > p2[0]:
p1, p2 = p2, p1
_, worldPt1 = getRayFromDisplayPoint(app.getCurrentRenderView(), p1)
_, worldPt2 = getRayFromDisplayPoint(app.getCurrentRenderView(), p2)
cameraPt = np.array(app.getCurrentRenderView().camera().GetPosition())
leftRay = worldPt1 - cameraPt
rightRay = worldPt2 - cameraPt
middleRay = (leftRay + rightRay) / 2.0
d = DebugData()
d.addLine(cameraPt, worldPt1)
d.addLine(cameraPt, worldPt2)
d.addLine(worldPt1, worldPt2)
d.addLine(cameraPt, cameraPt + middleRay)
updatePolyData(d.getPolyData(), 'line annotation', parent=getDebugFolder(), visible=False)
inputObj = om.findObjectByName('pointcloud snapshot')
if inputObj:
polyData = shallowCopy(inputObj.polyData)
else:
polyData = getCurrentRevolutionData()
origin = cameraPt
normal = np.cross(rightRay, leftRay)
leftNormal = np.cross(normal, leftRay)
rightNormal = np.cross(rightRay, normal)
normal /= np.linalg.norm(normal)
leftNormal /= np.linalg.norm(leftNormal)
rightNormal /= np.linalg.norm(rightNormal)
middleRay /= np.linalg.norm(middleRay)
cropped, polyData = cropToPlane(polyData, origin, normal, sliceThreshold)
updatePolyData(polyData, 'slice dist', parent=getDebugFolder(), colorByName='dist_to_plane', colorByRange=[-0.5, 0.5], visible=False)
updatePolyData(cropped, 'slice', parent=getDebugFolder(), colorByName='dist_to_plane', visible=False)
cropped, _ = cropToPlane(cropped, origin, leftNormal, [-1e6, 0])
cropped, _ = cropToPlane(cropped, origin, rightNormal, [-1e6, 0])
updatePolyData(cropped, 'slice segment', parent=getDebugFolder(), colorByName='dist_to_plane', visible=False)
planePoints, planeNormal = applyPlaneFit(cropped, distanceThreshold=0.005, perpendicularAxis=middleRay, angleEpsilon=math.radians(60))
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-0.005, 0.005])
updatePolyData(planePoints, 'board segmentation', parent=getDebugFolder(), color=getRandomColor(), visible=False)
'''
names = ['board A', 'board B', 'board C', 'board D', 'board E', 'board F', 'board G', 'board H', 'board I']
for name in names:
if not om.findObjectByName(name):
break
else:
name = 'board'
'''
name = 'board'
segmentBlockByTopPlane(planePoints, blockDimensions, expectedNormal=-middleRay, expectedXAxis=middleRay, edgeSign=-1, name=name)
def updateBlockAffordances(polyData=None):
for obj in om.getObjects():
if isinstance(obj, BoxAffordanceItem):
if 'refit' in obj.getProperty('Name'):
om.removeFromObjectModel(obj)
for obj in om.getObjects():
if isinstance(obj, BoxAffordanceItem):
updateBlockFit(obj, polyData)
def updateBlockFit(affordanceObj, polyData=None):
affordanceObj.updateParamsFromActorTransform()
name = affordanceObj.getProperty('Name') + ' refit'
origin = affordanceObj.params['origin']
normal = affordanceObj.params['yaxis']
edgePerpAxis = affordanceObj.params['xaxis']
blockDimensions = [affordanceObj.params['xwidth'], affordanceObj.params['ywidth']]
if polyData is None:
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
cropThreshold = 0.1
cropped = polyData
cropped, _ = cropToPlane(cropped, origin, normal, [-cropThreshold, cropThreshold])
cropped, _ = cropToPlane(cropped, origin, edgePerpAxis, [-cropThreshold, cropThreshold])
updatePolyData(cropped, 'refit search region', parent=getDebugFolder(), visible=False)
cropped = extractLargestCluster(cropped)
planePoints, planeNormal = applyPlaneFit(cropped, distanceThreshold=0.005, perpendicularAxis=normal, angleEpsilon=math.radians(10))
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-0.005, 0.005])
updatePolyData(planePoints, 'refit board segmentation', parent=getDebugFolder(), visible=False)
refitObj = segmentBlockByTopPlane(planePoints, blockDimensions, expectedNormal=normal, expectedXAxis=edgePerpAxis, edgeSign=-1, name=name)
refitOrigin = np.array(refitObj.params['origin'])
refitLength = refitObj.params['zwidth']
refitZAxis = refitObj.params['zaxis']
refitEndPoint1 = refitOrigin + refitZAxis*refitLength/2.0
originalLength = affordanceObj.params['zwidth']
correctedOrigin = refitEndPoint1 - refitZAxis*originalLength/2.0
originDelta = correctedOrigin - refitOrigin
refitObj.params['zwidth'] = originalLength
refitObj.polyData.DeepCopy(affordanceObj.polyData)
refitObj.actor.GetUserTransform().Translate(originDelta)
refitObj.updateParamsFromActorTransform()
def startInteractiveLineDraw(blockDimensions):
picker = LineDraw(app.getCurrentRenderView())
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(createLine, blockDimensions)
def startLeverValveSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentLeverValve)
def refitValveAffordance(aff, point1, origin, normal):
xaxis = aff.params['xaxis']
yaxis = aff.params['yaxis']
zaxis = aff.params['zaxis']
origin = aff.params['origin']
zaxis = normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
aff.updateParamsFromActorTransform()
def segmentValve(expectedValveRadius, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, _, wallNormal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
polyData, _, _ = applyPlaneFit(polyData, expectedNormal=wallNormal, searchOrigin=point2, searchRadius=expectedValveRadius, angleEpsilon=0.2, returnOrigin=True)
valveCluster = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
valveCluster = cropToSphere(valveCluster, point2, expectedValveRadius*2)
valveCluster = extractLargestCluster(valveCluster, minClusterSize=1)
updatePolyData(valveCluster, 'valve cluster', parent=getDebugFolder(), visible=False)
origin = np.average(vtkNumpy.getNumpyFromVtk(valveCluster, 'Points') , axis=0)
zaxis = wallNormal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
zwidth = 0.03
radius = expectedValveRadius
d = DebugData()
d.addLine(np.array([0,0,-zwidth/2.0]), np.array([0,0,zwidth/2.0]), radius=radius)
name = 'valve affordance'
obj = showPolyData(d.getPolyData(), name, cls=FrameAffordanceItem, parent='affordances', color=[0,1,0])
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
refitWallCallbacks.append(functools.partial(refitValveAffordance, obj))
params = dict(axis=zaxis, radius=radius, length=zwidth, origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis,
xwidth=radius, ywidth=radius, zwidth=zwidth,
otdf_type='steering_cyl', friendly_name='valve')
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, scale=radius, visible=False)
frameObj.addToView(app.getDRCView())
def segmentValveByBoundingBox(polyData, searchPoint):
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData = cropToSphere(polyData, searchPoint, radius=0.6)
polyData = applyVoxelGrid(polyData, leafSize=0.015)
# extract tube search region
polyData = labelDistanceToLine(polyData, searchPoint, np.array(searchPoint) + np.array([0,0,1]))
searchRegion = thresholdPoints(polyData, 'distance_to_line', [0.0, 0.2])
updatePolyData(searchRegion, 'valve tube search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
# guess valve plane
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.01, perpendicularAxis=viewDirection, angleEpsilon=math.radians(30), expectedNormal=-viewDirection, returnOrigin=True)
# extract plane search region
polyData = labelPointDistanceAlongAxis(polyData, normal, origin)
searchRegion = thresholdPoints(polyData, 'distance_along_axis', [-0.05, 0.05])
updatePolyData(searchRegion, 'valve plane search region', parent=getDebugFolder(), colorByName='distance_along_axis', visible=False)
valvePoints = extractLargestCluster(searchRegion, minClusterSize=1)
updatePolyData(valvePoints, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
valvePoints, _ = applyPlaneFit(valvePoints, expectedNormal=normal, perpendicularAxis=normal, distanceThreshold=0.01)
valveFit = thresholdPoints(valvePoints, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(valveFit, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
points = vtkNumpy.getNumpyFromVtk(valveFit, 'Points')
zvalues = points[:,2].copy()
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
tubeRadius = 0.017
radius = float((maxZ - minZ) / 2.0) - tubeRadius
fields = makePolyDataFields(valveFit)
origin = np.array(fields.frame.GetPosition())
#origin = computeCentroid(valveFit)
zaxis = [0,0,1]
xaxis = normal
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=radius, Segments=20)
desc['Tube Radius'] = tubeRadius
obj = affordanceManager.newAffordanceFromDescription(desc)
obj.params = dict(radius=radius)
return obj
def segmentDoorPlane(polyData, doorPoint, stanceFrame):
doorPoint = np.array(doorPoint)
doorBand = 1.5
polyData = cropToLineSegment(polyData, doorPoint + [0.0,0.0,doorBand/2], doorPoint - [0.0,0.0,doorBand/2])
fitPoints, normal = applyLocalPlaneFit(polyData, doorPoint, searchRadius=0.2, searchRadiusEnd=1.0, removeGroundFirst=False)
updatePolyData(fitPoints, 'door points', visible=False, color=[0,1,0])
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
if np.dot(normal, viewDirection) > 0:
normal = -normal
origin = computeCentroid(fitPoints)
groundHeight = stanceFrame.GetPosition()[2]
origin = [origin[0], origin[1], groundHeight]
xaxis = -normal
zaxis = [0,0,1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
return t
def segmentValveByRim(polyData, rimPoint1, rimPoint2):
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
yaxis = np.array(rimPoint2) - np.array(rimPoint1)
zaxis = [0,0,1]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
# flip xaxis to be with view direction
if np.dot(xaxis, viewDirection) < 0:
xaxis = -xaxis
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
origin = (np.array(rimPoint2) + np.array(rimPoint1)) / 2.0
polyData = labelPointDistanceAlongAxis(polyData, xaxis, origin)
polyData = thresholdPoints(polyData, 'distance_along_axis', [-0.05, 0.05])
updatePolyData(polyData, 'valve plane region', parent=getDebugFolder(), colorByName='distance_along_axis', visible=False)
polyData = cropToSphere(polyData, origin, radius=0.4)
polyData = applyVoxelGrid(polyData, leafSize=0.015)
updatePolyData(polyData, 'valve search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
valveFit = extractLargestCluster(polyData, minClusterSize=1)
updatePolyData(valveFit, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
points = vtkNumpy.getNumpyFromVtk(valveFit, 'Points')
zvalues = points[:,2].copy()
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
tubeRadius = 0.017
radius = float((maxZ - minZ) / 2.0) - tubeRadius
fields = makePolyDataFields(valveFit)
origin = np.array(fields.frame.GetPosition())
vis.updatePolyData(transformPolyData(fields.box, fields.frame), 'valve cluster bounding box', visible=False)
#origin = computeCentroid(valveFit)
'''
zaxis = [0,0,1]
xaxis = normal
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
'''
radius = np.max(fields.dims)/2.0 - tubeRadius
proj = [np.abs(np.dot(xaxis, axis)) for axis in fields.axes]
xaxisNew = fields.axes[np.argmax(proj)]
if np.dot(xaxisNew, xaxis) < 0:
xaxisNew = -xaxisNew
xaxis = xaxisNew
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=float(radius), Segments=20)
desc['Tube Radius'] = tubeRadius
obj = affordanceManager.newAffordanceFromDescription(desc)
obj.params = dict(radius=radius)
return obj
def segmentValveByWallPlane(expectedValveRadius, point1, point2):
centerPoint = (point1 + point2) / 2.0
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
_ , polyData = removeGround(polyData)
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
perpLine = np.cross(point2 - point1, normal)
#perpLine /= np.linalg.norm(perpLine)
#perpLine * np.linalg.norm(point2 - point1)/2.0
point3, point4 = centerPoint + perpLine/2.0, centerPoint - perpLine/2.0
d = DebugData()
d.addLine(point1, point2)
d.addLine(point3, point4)
updatePolyData(d.getPolyData(), 'crop lines', parent=getDebugFolder(), visible=False)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'valve wall', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.05, 0.5])
searchRegion = cropToLineSegment(searchRegion, point1, point2)
searchRegion = cropToLineSegment(searchRegion, point3, point4)
updatePolyData(searchRegion, 'valve search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
searchRegionSpokes = shallowCopy(searchRegion)
searchRegion, origin, _ = applyPlaneFit(searchRegion, expectedNormal=normal, perpendicularAxis=normal, returnOrigin=True)
searchRegion = thresholdPoints(searchRegion, 'dist_to_plane', [-0.015, 0.015])
updatePolyData(searchRegion, 'valve search region 2', parent=getDebugFolder(), color=[0,1,0], visible=False)
largestCluster = extractLargestCluster(searchRegion, minClusterSize=1)
updatePolyData(largestCluster, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
radiusLimit = [expectedValveRadius - 0.01, expectedValveRadius + 0.01] if expectedValveRadius else None
#radiusLimit = None
polyData, circleFit = extractCircle(largestCluster, distanceThreshold=0.01, radiusLimit=radiusLimit)
updatePolyData(polyData, 'circle fit', parent=getDebugFolder(), visible=False)
#polyData, circleFit = extractCircle(polyData, distanceThreshold=0.01)
#showPolyData(polyData, 'circle fit', colorByName='z')
radius = circleFit.GetCircleRadius()
origin = np.array(circleFit.GetCircleOrigin())
circleNormal = np.array(circleFit.GetCircleNormal())
circleNormal = circleNormal/np.linalg.norm(circleNormal)
if np.dot(circleNormal, normal) < 0:
circleNormal *= -1
# force use of the plane normal
circleNormal = normal
radius = expectedValveRadius
d = DebugData()
d.addLine(origin - normal*radius, origin + normal*radius)
d.addCircle(origin, circleNormal, radius)
updatePolyData(d.getPolyData(), 'valve axes', parent=getDebugFolder(), visible=False)
zaxis = -circleNormal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
#t = getTransformFromAxes(xaxis, yaxis, zaxis) # this was added to be consistent with segmentValveByRim
t = getTransformFromAxes(zaxis, -yaxis, xaxis) # this was added to be consistent with segmentValveByRim
t.PostMultiply()
t.Translate(origin)
# Spoke angle fitting:
if (1==0): # disabled jan 2015
# extract the relative positon of the points to the valve axis:
searchRegionSpokes = labelDistanceToLine(searchRegionSpokes, origin, [origin + circleNormal])
searchRegionSpokes = thresholdPoints(searchRegionSpokes, 'distance_to_line', [0.05, radius-0.04])
updatePolyData(searchRegionSpokes, 'valve spoke search', parent=getDebugFolder(), visible=False)
searchRegionSpokesLocal = transformPolyData(searchRegionSpokes, t.GetLinearInverse() )
points = vtkNumpy.getNumpyFromVtk(searchRegionSpokesLocal , 'Points')
spoke_angle = findValveSpokeAngle(points)
else:
spoke_angle = 0
spokeAngleTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,0,spoke_angle])
spokeTransform = transformUtils.copyFrame(t)
spokeAngleTransform.Concatenate(spokeTransform)
spokeObj = showFrame(spokeAngleTransform, 'spoke frame', parent=getDebugFolder(), visible=False, scale=radius)
spokeObj.addToView(app.getDRCView())
t = spokeAngleTransform
tubeRadius = 0.017
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=float(radius), Segments=20)
desc['Tube Radius'] = tubeRadius
obj = affordanceManager.newAffordanceFromDescription(desc)
obj.params = dict(radius=radius)
def showHistogram(polyData, arrayName, numberOfBins=100):
import matplotlib.pyplot as plt
x = vnp.getNumpyFromVtk(polyData, arrayName)
hist, bins = np.histogram(x, bins=numberOfBins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
return bins[np.argmax(hist)] + (bins[1] - bins[0])/2.0
def applyKmeansLabel(polyData, arrayName, numberOfClusters, whiten=False):
import scipy.cluster
ar = vnp.getNumpyFromVtk(polyData, arrayName).copy()
if whiten:
scipy.cluster.vq.whiten(ar)
codes, disturbances = scipy.cluster.vq.kmeans(ar, numberOfClusters)
if arrayName == 'normals' and numberOfClusters == 2:
v1 = codes[0]
v2 = codes[1]
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
angle = np.arccos(np.dot(v1, v2))
print 'angle between normals:', np.degrees(angle)
code, distance = scipy.cluster.vq.vq(ar, codes)
polyData = shallowCopy(polyData)
vnp.addNumpyToVtk(polyData, code, '%s_kmeans_label' % arrayName)
return polyData
def findValveSpokeAngle(points):
'''
Determine the location of the valve spoke angle
By binning the spoke returns. returns angle in degrees
'''
#np.savetxt("/home/mfallon/Desktop/spoke_points.csv", points, delimiter=",")
# convert all points to degrees in range [0,120]
angle = np.degrees( np.arctan2( points[:,1] , points[:,0] ) )
qq = np.where(angle < 0)[0]
angle[qq] += 360
angle = np.mod( angle, 120)
# find the spoke as the max of a histogram:
bins = range(0,130,10) # 0,10,...130
freq, bins = np.histogram(angle, bins)
amax = np.argmax(freq)
spoke_angle = bins[amax] + 5 # correct for 5deg offset
return spoke_angle
def findWallCenter(polyData, removeGroundMethod=removeGround):
'''
Find a frame at the center of the valve wall
X&Y: average of points on the wall plane
Z: 4 feet off the ground (determined using robot's feet
Orientation: z-normal into plane, y-axis horizontal
'''
_ , polyData = removeGroundMethod(polyData)
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
wallPoints = applyVoxelGrid(wallPoints, leafSize=0.03)
wallPoints = extractLargestCluster(wallPoints, minClusterSize=100)
updatePolyData(wallPoints, 'auto valve wall', parent=getDebugFolder(), visible=False)
xvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,0]
yvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,1]
# median or mid of max or min?
#xcenter = np.median(xvalues)
#ycenter = np.median(yvalues)
xcenter = (np.max(xvalues)+np.min(xvalues))/2
ycenter = (np.max(yvalues)+np.min(yvalues))/2
# not used, not very reliable
#zvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,2]
#zcenter = np.median(zvalues)
zcenter = SegmentationContext.getGlobalInstance().getGroundHeight() + 1.2192 # valves are 4ft from ground
point1 =np.array([ xcenter, ycenter, zcenter ]) # center of the valve wall
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
normalObj = showFrame(t, 'valve wall frame', parent=getDebugFolder(), visible=False) # z direction out of wall
normalObj.addToView(app.getDRCView())
return t
def segmentValveWallAuto(expectedValveRadius=.195, mode='both', removeGroundMethod=removeGround ):
'''
Automatically segment a valve hanging in front of the wall at the center
'''
# find the valve wall and its center
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
t = findWallCenter(polyData, removeGroundMethod)
valve_point1 = [ 0 , 0.6 , 0]
valveTransform1 = transformUtils.frameFromPositionAndRPY(valve_point1, [0,0,0])
valveTransform1.Concatenate(t)
point1 = np.array(valveTransform1.GetPosition()) # left of wall
valve_point2 = [ 0 , -0.6 , 0]
valveTransform2 = transformUtils.frameFromPositionAndRPY(valve_point2, [0,0,0])
valveTransform2.Concatenate(t)
point2 = np.array(valveTransform2.GetPosition()) # left of wall
valve_point3 = [ 0 , 1.0 , 0] # lever can over hang
valveTransform3 = transformUtils.frameFromPositionAndRPY(valve_point3, [0,0,0])
valveTransform3.Concatenate(t)
point3 =valveTransform3.GetPosition() # right of wall
d = DebugData()
d.addSphere(point2, radius=0.01)
d.addSphere(point1, radius=0.03)
d.addSphere(point3, radius=0.01)
updatePolyData(d.getPolyData(), 'auto wall points', parent=getDebugFolder(), visible=False)
if (mode=='valve'):
segmentValveByWallPlane(expectedValveRadius, point1, point2)
elif (mode=='lever'):
segmentLeverByWallPlane(point1, point3)
elif (mode=='both'):
segmentValveByWallPlane(expectedValveRadius, point1, point2)
segmentLeverByWallPlane(point1, point3)
else:
raise Exception('unexpected segmentation mode: ' + mode)
def segmentLeverByWallPlane(point1, point2):
'''
determine the position (including rotation of a lever near a wall
input is as for the valve - to points on the wall either side of the lever
'''
# 1. determine the wall plane and normal
centerPoint = (point1 + point2) / 2.0
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
# 2. Crop the cloud down to the lever only using the wall plane
perpLine = np.cross(point2 - point1, -normal)
#perpLine /= np.linalg.norm(perpLine)
#perpLine * np.linalg.norm(point2 - point1)/2.0
point3, point4 = centerPoint + perpLine/2.0, centerPoint - perpLine/2.0
d = DebugData()
d.addLine(point1, point2)
d.addLine(point3, point4)
updatePolyData(d.getPolyData(), 'lever crop lines', parent=getDebugFolder(), visible=False)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'lever valve wall', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.12, 0.2]) # very tight threshold
searchRegion = cropToLineSegment(searchRegion, point1, point2)
searchRegion = cropToLineSegment(searchRegion, point3, point4)
updatePolyData(searchRegion, 'lever search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
# 3. fit line to remaining points - all assumed to be the lever
linePoint, lineDirection, _ = applyLineFit(searchRegion, distanceThreshold=0.02)
#if np.dot(lineDirection, forwardDirection) < 0:
# lineDirection = -lineDirection
d = DebugData()
d.addSphere(linePoint, radius=0.02)
updatePolyData(d.getPolyData(), 'lever point', parent=getDebugFolder(), visible=False)
pts = vtkNumpy.getNumpyFromVtk(searchRegion, 'Points')
dists = np.dot(pts-linePoint, lineDirection)
lever_center = linePoint + lineDirection*np.min(dists)
lever_tip = linePoint + lineDirection*np.max(dists)
# 4. determine which lever point is closest to the lower left of the wall. That's the lever_center point
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
# a distant point down and left from wall
wall_point_lower_left = [ -20 , -20.0 , 0]
wall_point_lower_left_Transform = transformUtils.frameFromPositionAndRPY(wall_point_lower_left, [0,0,0])
wall_point_lower_left_Transform.Concatenate(t)
wall_point_lower_left = wall_point_lower_left_Transform.GetPosition()
d1 = np.sqrt( np.sum((wall_point_lower_left- projectPointToPlane(lever_center, origin, normal) )**2) )
d2 = np.sqrt( np.sum((wall_point_lower_left- projectPointToPlane(lever_tip, origin, normal) )**2) )
if (d2 < d1): # flip the points to match variable names
p_temp = lever_center
lever_center = lever_tip
lever_tip = p_temp
lineDirection = -lineDirection
# 5. compute the rotation angle of the lever and, using that, its frame
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(lever_center) # nominal frame at lever center
rotationAngle = -computeSignedAngleBetweenVectors(lineDirection, [0, 0, 1], -normal)
t_lever = transformUtils.frameFromPositionAndRPY( [0,0,0], [0,0, math.degrees( rotationAngle ) ] )
t_lever.PostMultiply()
t_lever.Concatenate(t)
d = DebugData()
# d.addSphere( point1 , radius=0.1)
d.addSphere( wall_point_lower_left , radius=0.1)
d.addSphere(lever_center, radius=0.04)
d.addSphere(lever_tip, radius=0.01)
d.addLine(lever_center, lever_tip)
updatePolyData(d.getPolyData(), 'lever end points', color=[0,1,0], parent=getDebugFolder(), visible=False)
radius = 0.01
length = np.sqrt( np.sum((lever_tip - lever_center )**2) )
d = DebugData()
d.addLine([0,0,0], [length, 0, 0], radius=radius)
d.addSphere ( [0, 0, 0], 0.02)
geometry = d.getPolyData()
obj = showPolyData(geometry, 'valve lever', cls=FrameAffordanceItem, parent='affordances' , color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t_lever)
obj.addToView(app.getDRCView())
frameObj = showFrame(t_lever, 'lever frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
otdfType = 'lever_valve'
params = dict(origin=np.array(t_lever.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, radius=radius, length=length, friendly_name=otdfType, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def applyICP(source, target):
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(source)
icp.SetTarget(target)
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.Update()
t = vtk.vtkTransform()
t.SetMatrix(icp.GetMatrix())
return t
def applyDiskGlyphs(polyData):
voxelGridLeafSize = 0.03
normalEstimationSearchRadius = 0.05
diskRadius = 0.015
diskResolution = 12
scanInput = polyData
pd = applyVoxelGrid(scanInput, leafSize=voxelGridLeafSize)
pd = labelOutliers(pd, searchRadius=normalEstimationSearchRadius, neighborsInSearchRadius=3)
pd = thresholdPoints(pd, 'is_outlier', [0, 0])
pd = normalEstimation(pd, searchRadius=normalEstimationSearchRadius, searchCloud=scanInput)
disk = vtk.vtkDiskSource()
disk.SetOuterRadius(diskRadius)
disk.SetInnerRadius(0.0)
disk.SetRadialResolution(0)
disk.SetCircumferentialResolution(diskResolution)
disk.Update()
t = vtk.vtkTransform()
t.RotateY(90)
disk = transformPolyData(disk.GetOutput(), t)
glyph = vtk.vtkGlyph3D()
glyph.ScalingOff()
glyph.OrientOn()
glyph.SetSource(disk)
glyph.SetInput(pd)
glyph.SetVectorModeToUseNormal()
glyph.Update()
return shallowCopy(glyph.GetOutput())
def applyArrowGlyphs(polyData, computeNormals=True, voxelGridLeafSize=0.03, normalEstimationSearchRadius=0.05, arrowSize=0.02):
polyData = applyVoxelGrid(polyData, leafSize=0.02)
if computeNormals:
voxelData = applyVoxelGrid(polyData, leafSize=voxelGridLeafSize)
polyData = normalEstimation(polyData, searchRadius=normalEstimationSearchRadius, searchCloud=voxelData)
polyData = removeNonFinitePoints(polyData, 'normals')
flipNormalsWithViewDirection(polyData, SegmentationContext.getGlobalInstance().getViewDirection())
assert polyData.GetPointData().GetNormals()
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph = vtk.vtkGlyph3D()
glyph.SetScaleFactor(arrowSize)
glyph.SetSource(arrow.GetOutput())
glyph.SetInput(polyData)
glyph.SetVectorModeToUseNormal()
glyph.Update()
return shallowCopy(glyph.GetOutput())
def segmentLeverValve(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
radius = 0.01
length = 0.33
normal = -normal # set z to face into wall
zaxis = normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point2)
leverP1 = point2
leverP2 = point2 + xaxis * length
d = DebugData()
d.addLine([0,0,0], [length, 0, 0], radius=radius)
d.addSphere ( [0, 0, 0], 0.02)
geometry = d.getPolyData()
obj = showPolyData(geometry, 'valve lever', cls=FrameAffordanceItem, parent='affordances', color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
frameObj = showFrame(t, 'lever frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
otdfType = 'lever_valve'
params = dict(origin=np.array(t.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, radius=radius, length=length, friendly_name=otdfType, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def segmentWye(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
wyeMesh = ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/wye.obj'))
wyeMeshPoint = np.array([0.0, 0.0, 0.005])
wyeMeshLeftHandle = np.array([0.032292, 0.02949, 0.068485])
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-wyeMeshPoint)
t.PostMultiply()
t.Translate(point2)
d = DebugData()
d.addSphere(point2, radius=0.005)
updatePolyData(d.getPolyData(), 'wye pick point', parent=getDebugFolder(), visible=False)
wyeObj = showPolyData(wyeMesh, 'wye', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
wyeObj.actor.SetUserTransform(t)
wyeObj.addToView(app.getDRCView())
frameObj = showFrame(t, 'wye frame', parent=wyeObj, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=np.array(t.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, friendly_name='wye', otdf_type='wye')
wyeObj.setAffordanceParams(params)
wyeObj.updateParamsFromActorTransform()
def segmentDoorHandle(otdfType, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
handlePoint = np.array([0.005, 0.065, 0.011])
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
xwidth = 0.01
ywidth = 0.13
zwidth = 0.022
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
#t.PreMultiply()
#t.Translate(-handlePoint)
t.PostMultiply()
t.Translate(point2)
name = 'door handle'
obj = showPolyData(cube, name, cls=FrameAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, friendly_name=name, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
def segmentTruss(point1, point2):
edge = point2 - point1
edgeLength = np.linalg.norm(edge)
stanceOffset = [-0.42, 0.0, 0.0]
stanceYaw = 0.0
d = DebugData()
p1 = [0.0, 0.0, 0.0]
p2 = -np.array([0.0, -1.0, 0.0]) * edgeLength
d.addSphere(p1, radius=0.02)
d.addSphere(p2, radius=0.02)
d.addLine(p1, p2)
stanceTransform = vtk.vtkTransform()
stanceTransform.PostMultiply()
stanceTransform.Translate(stanceOffset)
#stanceTransform.RotateZ(stanceYaw)
geometry = transformPolyData(d.getPolyData(), stanceTransform.GetLinearInverse())
yaxis = edge/edgeLength
zaxis = [0.0, 0.0, 1.0]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xwidth = 0.1
ywidth = edgeLength
zwidth = 0.1
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Concatenate(stanceTransform)
t.PostMultiply()
t.Translate(point1)
name = 'truss'
otdfType = 'robot_knees'
obj = showPolyData(geometry, name, cls=FrameAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=t.GetPosition(), xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, friendly_name=name, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
def segmentHoseNozzle(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
searchRegion = cropToSphere(polyData, point1, 0.10)
updatePolyData(searchRegion, 'nozzle search region', parent=getDebugFolder(), visible=False)
xaxis = [1,0,0]
yaxis = [0,-1,0]
zaxis = [0,0,-1]
origin = point1
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
nozzleRadius = 0.0266
nozzleLength = 0.042
nozzleTipRadius = 0.031
nozzleTipLength = 0.024
d = DebugData()
d.addLine(np.array([0,0,-nozzleLength/2.0]), np.array([0,0,nozzleLength/2.0]), radius=nozzleRadius)
d.addLine(np.array([0,0,nozzleLength/2.0]), np.array([0,0,nozzleLength/2.0 + nozzleTipLength]), radius=nozzleTipRadius)
obj = showPolyData(d.getPolyData(), 'hose nozzle', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
frameObj = showFrame(t, 'nozzle frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, friendly_name='firehose', otdf_type='firehose')
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def segmentDrillWall(point1, point2, point3):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
points = [point1, point2, point3]
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
expectedNormal = np.cross(point2 - point1, point3 - point1)
expectedNormal /= np.linalg.norm(expectedNormal)
if np.dot(expectedNormal, viewPlaneNormal) < 0:
expectedNormal *= -1.0
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, searchOrigin=(point1 + point2 + point3)/3.0, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
points = [projectPointToPlane(point, origin, normal) for point in points]
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(points[0])
d = DebugData()
pointsInWallFrame = []
for p in points:
pp = np.zeros(3)
t.GetLinearInverse().TransformPoint(p, pp)
pointsInWallFrame.append(pp)
d.addSphere(pp, radius=0.02)
for a, b in zip(pointsInWallFrame, pointsInWallFrame[1:] + [pointsInWallFrame[0]]):
d.addLine(a, b, radius=0.015)
aff = showPolyData(d.getPolyData(), 'drill target', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill target frame', parent=aff, visible=False)
refitWallCallbacks.append(functools.partial(refitDrillWall, aff))
params = dict(origin=points[0], xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
p1y=pointsInWallFrame[0][1], p1z=pointsInWallFrame[0][2],
p2y=pointsInWallFrame[1][1], p2z=pointsInWallFrame[1][2],
p3y=pointsInWallFrame[2][1], p3z=pointsInWallFrame[2][2],
friendly_name='drill_wall', otdf_type='drill_wall')
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
refitWallCallbacks = []
def refitWall(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
for func in refitWallCallbacks:
func(point1, origin, normal)
def refitDrillWall(aff, point1, origin, normal):
t = aff.actor.GetUserTransform()
targetOrigin = np.array(t.GetPosition())
projectedOrigin = projectPointToPlane(targetOrigin, origin, normal)
projectedOrigin[2] = targetOrigin[2]
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(projectedOrigin)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
# this should be depreciated!
def getGroundHeightFromFeet():
rfoot = getLinkFrame( drcargs.getDirectorConfig()['rightFootLink'] )
return np.array(rfoot.GetPosition())[2] - 0.0745342
# this should be depreciated!
def getTranslationRelativeToFoot(t):
rfoot = getLinkFrame( drcargs.getDirectorConfig()['rightFootLink'] )
def segmentDrillWallConstrained(rightAngleLocation, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
expectedNormal = np.cross(point2 - point1, [0.0, 0.0, 1.0])
expectedNormal /= np.linalg.norm(expectedNormal)
if np.dot(expectedNormal, viewPlaneNormal) < 0:
expectedNormal *= -1.0
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
triangleOrigin = projectPointToPlane(point2, origin, normal)
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(triangleOrigin)
createDrillWall(rightAngleLocation, t)
def createDrillWall(rightAngleLocation, trianglePose):
# recover the origin and axes from the pose:
triangleOrigin = trianglePose.GetPosition()
xaxis, yaxis, zaxis = transformUtils.getAxesFromTransform( trianglePose )
# 0.6096 = 24 * .0254 (m = feet)
# 0.3048 = 12 * .0254 (m = feet)
edgeRight = np.array([0.0, -1.0, 0.0]) * (0.6)
edgeUp = np.array([0.0, 0.0, 1.0]) * (0.3)
pointsInWallFrame = np.zeros((3,3))
if rightAngleLocation == DRILL_TRIANGLE_BOTTOM_LEFT:
pointsInWallFrame[1] = edgeUp
pointsInWallFrame[2] = edgeRight
elif rightAngleLocation == DRILL_TRIANGLE_BOTTOM_RIGHT:
pointsInWallFrame[1] = edgeUp # edgeRight +edgeUp
pointsInWallFrame[2] = -edgeRight # edgeRight
elif rightAngleLocation == DRILL_TRIANGLE_TOP_LEFT:
pointsInWallFrame[1] = edgeRight
pointsInWallFrame[2] = -edgeUp
elif rightAngleLocation == DRILL_TRIANGLE_TOP_RIGHT:
pointsInWallFrame[1] = edgeRight
pointsInWallFrame[2] = edgeRight - edgeUp
else:
raise Exception('unexpected value for right angle location: ', + rightAngleLocation)
center = pointsInWallFrame.sum(axis=0)/3.0
shrinkFactor = 1#0.90
shrinkPoints = (pointsInWallFrame - center) * shrinkFactor + center
d = DebugData()
for p in pointsInWallFrame:
d.addSphere(p, radius=0.015)
for a, b in zip(pointsInWallFrame, np.vstack((pointsInWallFrame[1:], pointsInWallFrame[0]))):
d.addLine(a, b, radius=0.005)#01)
for a, b in zip(shrinkPoints, np.vstack((shrinkPoints[1:], shrinkPoints[0]))):
d.addLine(a, b, radius=0.005)#0.025
folder = om.getOrCreateContainer('affordances')
wall = om.findObjectByName('wall')
om.removeFromObjectModel(wall)
aff = showPolyData(d.getPolyData(), 'wall', cls=FrameAffordanceItem, color=[0,1,0], visible=True, parent=folder)
aff.actor.SetUserTransform(trianglePose)
aff.addToView(app.getDRCView())
refitWallCallbacks.append(functools.partial(refitDrillWall, aff))
frameObj = showFrame(trianglePose, 'wall frame', parent=aff, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=triangleOrigin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
p1y=shrinkPoints[0][1], p1z=shrinkPoints[0][2],
p2y=shrinkPoints[1][1], p2z=shrinkPoints[1][2],
p3y=shrinkPoints[2][1], p3z=shrinkPoints[2][2],
friendly_name='drill_wall', otdf_type='drill_wall')
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
'''
rfoot = getLinkFrame(drcargs.getDirectorConfig()['rightFootLink'])
tt = getTransformFromAxes(xaxis, yaxis, zaxis)
tt.PostMultiply()
tt.Translate(rfoot.GetPosition())
showFrame(tt, 'rfoot with wall orientation')
aff.footToAffTransform = computeAToB(tt, trianglePose)
footToAff = list(aff.footToAffTransform.GetPosition())
tt.TransformVector(footToAff, footToAff)
d = DebugData()
d.addSphere(tt.GetPosition(), radius=0.02)
d.addLine(tt.GetPosition(), np.array(tt.GetPosition()) + np.array(footToAff))
showPolyData(d.getPolyData(), 'rfoot debug')
'''
def getDrillAffordanceParams(origin, xaxis, yaxis, zaxis, drillType="dewalt_button"):
if (drillType=="dewalt_button"):
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
button_x=0.007,
button_y=-0.035,
button_z=-0.06,
button_roll=-90.0,
button_pitch=-90.0,
button_yaw=0.0,
bit_x=-0.01,
bit_y=0.0,
bit_z=0.15,
bit_roll=0,
bit_pitch=-90,
bit_yaw=0,
friendly_name='dewalt_button', otdf_type='dewalt_button')
else:
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
button_x=0.007,
button_y=-0.035,
button_z=-0.06,
button_roll=0.0,
button_pitch=0.0,
button_yaw=0.0,
bit_x=0.18,
bit_y=0.0,
bit_z=0.13,
bit_roll=0,
bit_pitch=0,
bit_yaw=0,
friendly_name='dewalt_barrel', otdf_type='dewalt_barrel')
return params
def getDrillMesh(applyBitOffset=False):
button = np.array([0.007, -0.035, -0.06])
drillMesh = ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/dewalt_button.obj'))
if applyBitOffset:
t = vtk.vtkTransform()
t.Translate(0.01, 0.0, 0.0)
drillMesh = transformPolyData(drillMesh, t)
d = DebugData()
d.addPolyData(drillMesh)
d.addSphere(button, radius=0.005, color=[0,1,0])
d.addLine([0.0,0.0,0.155], [0.0, 0.0, 0.14], radius=0.001, color=[0,1,0])
return shallowCopy(d.getPolyData())
def getDrillBarrelMesh():
return ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/dewalt.ply'), computeNormals=True)
def segmentDrill(point1, point2, point3):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.03, 0.4])
searchRegion = cropToSphere(searchRegion, point2, 0.30)
drillPoints = extractLargestCluster(searchRegion)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = point3 - point2
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(point2)
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(origin, xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def makePolyDataFields(pd):
mesh = computeDelaunay3D(pd)
if not mesh.GetNumberOfPoints():
return None
origin, edges, wireframe = getOrientedBoundingBox(mesh)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
boxCenter = computeCentroid(wireframe)
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(boxCenter)
pd = transformPolyData(pd, t.GetLinearInverse())
wireframe = transformPolyData(wireframe, t.GetLinearInverse())
mesh = transformPolyData(mesh, t.GetLinearInverse())
return FieldContainer(points=pd, box=wireframe, mesh=mesh, frame=t, dims=edgeLengths, axes=axes)
def makeMovable(obj, initialTransform=None):
'''
Adds a child frame to the given PolyDataItem. If initialTransform is not
given, then an origin frame is computed for the polydata using the
center and orientation of the oriented bounding of the polydata. The polydata
is transformed using the inverse of initialTransform and then a child frame
is assigned to the object to reposition it.
'''
pd = obj.polyData
t = initialTransform
if t is None:
origin, edges, wireframe = getOrientedBoundingBox(pd)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
boxCenter = computeCentroid(wireframe)
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(boxCenter)
pd = transformPolyData(pd, t.GetLinearInverse())
obj.setPolyData(pd)
frame = obj.getChildFrame()
if frame:
frame.copyFrame(t)
else:
frame = vis.showFrame(t, obj.getProperty('Name') + ' frame', parent=obj, scale=0.2, visible=False)
obj.actor.SetUserTransform(t)
def segmentTable(polyData, searchPoint):
'''
Segment a horizontal table surface (perpendicular to +Z) in the given polyData
using the given search point.
Returns polyData, tablePoints, origin, normal
polyData is the input polyData with a new 'dist_to_plane' attribute.
'''
expectedNormal = np.array([0.0, 0.0, 1.0])
tableNormalEpsilon = 0.4
polyData = applyVoxelGrid(polyData, leafSize=0.01)
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, searchOrigin=searchPoint, searchRadius=0.3, angleEpsilon=tableNormalEpsilon, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
tablePoints = labelDistanceToPoint(tablePoints, searchPoint)
tablePointsClusters = extractClusters(tablePoints, minClusterSize=10, clusterTolerance=0.1)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
return polyData, tablePoints, origin, normal
def filterClusterObjects(clusters):
result = []
for cluster in clusters:
if np.abs(np.dot(cluster.axes[0], [0,0,1])) < 0.5:
continue
if cluster.dims[0] < 0.1:
continue
result.append(cluster)
return result
def segmentTableThenFindDrills(polyData,pickedPoint):
''' Given a point cloud of a table with drills on it.
Find all clusters and fit drills
Assumes that all clusters are of drills
Nothing else is ever on a table ;)
'''
# 1 segment a table and return clusters and the plane normal
clusters, tablePoints, plane_origin, plane_normal = segmentTableSceneClusters(polyData, pickedPoint, True)
# 2 Detect drills within the clusters:
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
forwardDirection = np.array([1.0, 0.0, 0.0])
viewFrame.TransformVector(forwardDirection, forwardDirection)
robotForward =forwardDirection
fitResults=[]
for clusterObj in clusters:
# vis.showPolyData(clusterObj, 'cluster debug')
drillFrame = fitDrillBarrel (clusterObj, robotForward, plane_origin, plane_normal)
if drillFrame is not None:
fitResults.append((clusterObj, drillFrame))
if not fitResults:
return
for i, fitResult in enumerate(fitResults):
cluster, drillFrame = fitResult
drillOrigin = np.array(drillFrame.GetPosition())
drillMesh = getDrillBarrelMesh()
#drill = om.findObjectByName('drill')
name= 'drill %d' % i
name2= 'drill %d frame' % i
drill = showPolyData(drillMesh, name, cls=FrameAffordanceItem, color=[0, 1, 0], visible=True)
drillFrame = updateFrame(drillFrame, name2, parent=drill, scale=0.2, visible=False)
drill.actor.SetUserTransform(drillFrame.transform)
drill.setSolidColor([0, 1, 0])
#cluster.setProperty('Visible', True)
def segmentTableScene(polyData, searchPoint, filterClustering = True):
''' This seems to be unused, depreciated? '''
objectClusters, tablePoints, _, _ = segmentTableSceneClusters(polyData, searchPoint)
clusters = [makePolyDataFields(cluster) for cluster in objectClusters]
clusters = [cluster for cluster in clusters if cluster is not None]
if (filterClustering):
clusters = filterClusterObjects(clusters)
return FieldContainer(table=makePolyDataFields(tablePoints), clusters=clusters)
def segmentTableSceneClusters(polyData, searchPoint, clusterInXY=False):
''' Given a point cloud of a table with some objects on it
and a point on that table
determine the plane of the table and
extract clusters above the table
'''
polyData, tablePoints, plane_origin, plane_normal = segmentTable(polyData, searchPoint)
tableCentroid = computeCentroid(tablePoints)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.02, 0.5])
# TODO: replace with 'all points above the table':
searchRegion = cropToSphere(searchRegion, tableCentroid, 0.5) # was 1.0
tableCentroidFrame = transformUtils.frameFromPositionAndRPY(tableCentroid, [0,0,0])
showFrame(tableCentroidFrame, 'tableCentroid', visible=False, parent=getDebugFolder(), scale=0.15)
showPolyData(searchRegion, 'searchRegion', color=[1,0,0], visible=False, parent=getDebugFolder())
objectClusters = extractClusters(searchRegion, clusterInXY, clusterTolerance=0.02, minClusterSize=10)
#print 'got %d clusters' % len(objectClusters)
for i,c in enumerate(objectClusters):
name= "cluster %d" % i
showPolyData(c, name, color=getRandomColor(), visible=False, parent=getDebugFolder())
return objectClusters, tablePoints, plane_origin, plane_normal
def segmentTableEdge(polyData, searchPoint, edgePoint):
'''
segment a table using two points:
searchPoint is a point on the table top
edgePoint is a point on the edge facing the robot
'''
polyData, tablePoints, origin, normal = segmentTable(polyData, searchPoint)
tableMesh = computeDelaunay3D(tablePoints)
origin, edges, wireframe = getOrientedBoundingBox(tableMesh)
origin = origin + 0.5*np.sum(edges, axis=0)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
def findAxis(referenceVector):
refAxis = referenceVector / np.linalg.norm(referenceVector)
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
axisIndex = axisProjections.argmax()
axis = axes[axisIndex]
if np.dot(axis, refAxis) < 0:
axis = -axis
return axis, axisIndex
tableXAxis, tableXAxisIndex = findAxis(searchPoint - edgePoint)
tableZAxis, tableZAxisIndex = findAxis([0,0,1])
tableYAxis, tableYAxisIndex = findAxis(np.cross(tableZAxis, tableXAxis))
assert len(set([tableXAxisIndex, tableYAxisIndex, tableZAxisIndex])) == 3
axes = tableXAxis, tableYAxis, tableZAxis
edgeLengths = edgeLengths[tableXAxisIndex], edgeLengths[tableYAxisIndex], edgeLengths[tableZAxisIndex]
edgeCenter = origin - 0.5 * axes[0]*edgeLengths[0] + 0.5*axes[2]*edgeLengths[2]
edgeLeft = edgeCenter + 0.5 * axes[1]*edgeLengths[1]
edgeRight = edgeCenter - 0.5 * axes[1]*edgeLengths[1]
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(edgeRight)
table_center = [edgeLengths[0]/2, edgeLengths[1]/2, -edgeLengths[2]/2]
t.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY(table_center,[0,0,0])
t.Concatenate(t3)
tablePoints = transformPolyData(tablePoints, t.GetLinearInverse())
wireframe = transformPolyData(wireframe, t.GetLinearInverse())
tableMesh = transformPolyData(tableMesh, t.GetLinearInverse())
return FieldContainer(points=tablePoints, box=wireframe, mesh=tableMesh, frame=t, dims=edgeLengths, axes=axes)
def segmentDrillAuto(point1, polyData=None):
if polyData is None:
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
expectedNormal = np.array([0.0, 0.0, 1.0])
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, searchOrigin=point1, searchRadius=0.4, angleEpsilon=0.2, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
tablePoints = labelDistanceToPoint(tablePoints, point1)
tablePointsClusters = extractClusters(tablePoints)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.03, 0.4])
searchRegion = cropToSphere(searchRegion, point1, 0.30)
drillPoints = extractLargestCluster(searchRegion, minClusterSize=1)
# determine drill orientation (rotation about z axis)
centroids = computeCentroids(drillPoints, axis=normal)
centroidsPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(centroids)
d = DebugData()
updatePolyData(centroidsPolyData, 'cluster centroids', parent=getDebugFolder(), visible=False)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = centroids[0] - centroids[-1]
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
# note this hack to orient the drill correctly:
t = getTransformFromAxes(yaxis, -xaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(centroids[-1])
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False, scale=0.2).addToView(app.getDRCView())
params = getDrillAffordanceParams(origin, xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def segmentDrillButton(point1):
d = DebugData()
d.addSphere([0,0,0], radius=0.005)
obj = updatePolyData(d.getPolyData(), 'sensed drill button', color=[0,0.5,0.5], visible=True)
# there is no orientation, but this allows the XYZ point to be queried
pointerTipFrame = transformUtils.frameFromPositionAndRPY(point1, [0,0,0])
obj.actor.SetUserTransform(pointerTipFrame)
obj.addToView(app.getDRCView())
frameObj = updateFrame(obj.actor.GetUserTransform(), 'sensed drill button frame', parent=obj, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
def segmentPointerTip(point1):
d = DebugData()
d.addSphere([0,0,0], radius=0.005)
obj = updatePolyData(d.getPolyData(), 'sensed pointer tip', color=[0.5,0.5,0.0], visible=True)
# there is no orientation, but this allows the XYZ point to be queried
pointerTipFrame = transformUtils.frameFromPositionAndRPY(point1, [0,0,0])
obj.actor.SetUserTransform(pointerTipFrame)
obj.addToView(app.getDRCView())
frameObj = updateFrame(obj.actor.GetUserTransform(), 'sensed pointer tip frame', parent=obj, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
def fitGroundObject(polyData=None, expectedDimensionsMin=[0.2, 0.02], expectedDimensionsMax=[1.3, 0.1]):
removeGroundFunc = removeGroundSimple
polyData = polyData or getCurrentRevolutionData()
groundPoints, scenePoints = removeGroundFunc(polyData, groundThickness=0.02, sceneHeightFromGround=0.035)
searchRegion = thresholdPoints(scenePoints, 'dist_to_plane', [0.05, 0.2])
clusters = extractClusters(searchRegion, clusterTolerance=0.07, minClusterSize=4)
candidates = []
for clusterId, cluster in enumerate(clusters):
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges[:2]]
found = (expectedDimensionsMin[0] <= edgeLengths[0] < expectedDimensionsMax[0]
and expectedDimensionsMin[1] <= edgeLengths[1] < expectedDimensionsMax[1])
if not found:
updatePolyData(cluster, 'candidate cluster %d' % clusterId, color=[1,1,0], parent=getDebugFolder(), visible=False)
continue
updatePolyData(cluster, 'cluster %d' % clusterId, color=[0,1,0], parent=getDebugFolder(), visible=False)
candidates.append(cluster)
if not candidates:
return None
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
viewOrigin = np.array(viewFrame.GetPosition())
dists = [np.linalg.norm(viewOrigin - computeCentroid(cluster)) for cluster in candidates]
candidates = [candidates[i] for i in np.argsort(dists)]
cluster = candidates[0]
obj = makePolyDataFields(cluster)
return vis.showClusterObjects([obj], parent='segmentation')[0]
def findHorizontalSurfaces(polyData, removeGroundFirst=False, normalEstimationSearchRadius=0.05,
clusterTolerance=0.025, distanceToPlaneThreshold=0.0025, normalsDotUpRange=[0.95, 1.0], showClusters=False):
'''
Find the horizontal surfaces, tuned to work with walking terrain
'''
searchZ = [0.0, 2.0]
voxelGridLeafSize = 0.01
minClusterSize = 150
verboseFlag = False
if (removeGroundFirst):
groundPoints, scenePoints = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', searchZ)
updatePolyData(groundPoints, 'ground points', parent=getDebugFolder(), visible=verboseFlag)
else:
scenePoints = polyData
if not scenePoints.GetNumberOfPoints():
return
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(scenePoints)
f.SetInput(1, applyVoxelGrid(scenePoints, voxelGridLeafSize))
# Duration 0.2 sec for V1 log:
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotUp = np.abs(np.dot(normals, [0,0,1]))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotUp, 'normals_dot_up')
surfaces = thresholdPoints(scenePoints, 'normals_dot_up', normalsDotUpRange)
updatePolyData(scenePoints, 'scene points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=verboseFlag)
updatePolyData(surfaces, 'surfaces points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=verboseFlag)
clusters = extractClusters(surfaces, clusterTolerance=clusterTolerance, minClusterSize=minClusterSize)
planeClusters = []
clustersLarge = []
om.removeFromObjectModel(om.findObjectByName('surface clusters'))
folder = om.getOrCreateContainer('surface clusters', parentObj=getDebugFolder())
for i, cluster in enumerate(clusters):
updatePolyData(cluster, 'surface cluster %d' % i, parent=folder, color=getRandomColor(), visible=verboseFlag)
planePoints, _ = applyPlaneFit(cluster, distanceToPlaneThreshold)
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-distanceToPlaneThreshold, distanceToPlaneThreshold])
if planePoints.GetNumberOfPoints() > minClusterSize:
clustersLarge.append(cluster)
obj = makePolyDataFields(planePoints)
if obj is not None:
planeClusters.append(obj)
folder = om.getOrCreateContainer('surface objects', parentObj=getDebugFolder())
if showClusters:
vis.showClusterObjects(planeClusters, parent=folder)
return clustersLarge
def fitVerticalPosts(polyData):
groundPoints, scenePoints = removeGround(polyData)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', [0.1, 4.0])
if not scenePoints.GetNumberOfPoints():
return
scenePoints = applyVoxelGrid(scenePoints, leafSize=0.03)
clusters = extractClusters(scenePoints, clusterTolerance=0.15, minClusterSize=10)
def isPostCluster(cluster, lineDirection):
up = [0,0,1]
minPostLength = 1.0
maxRadius = 0.3
angle = math.degrees(math.acos(np.dot(up,lineDirection) / (np.linalg.norm(up) * np.linalg.norm(lineDirection))))
if angle > 15:
return False
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges]
if edgeLengths[0] < minPostLength:
return False
# extract top half
zvalues = vtkNumpy.getNumpyFromVtk(cluster, 'Points')[:,2].copy()
vtkNumpy.addNumpyToVtk(cluster, zvalues, 'z')
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
cluster = thresholdPoints(cluster, 'z', [(minZ + maxZ)/2.0, maxZ])
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges]
if edgeLengths[1] > maxRadius or edgeLengths[2] > maxRadius:
return False
return True
def makeCylinderAffordance(linePoints, lineDirection, lineOrigin, postId):
pts = vtkNumpy.getNumpyFromVtk(linePoints, 'Points')
dists = np.dot(pts-lineOrigin, lineDirection)
p1 = lineOrigin + lineDirection*np.min(dists)
p2 = lineOrigin + lineDirection*np.max(dists)
origin = (p1+p2)/2.0
lineLength = np.linalg.norm(p2-p1)
t = transformUtils.getTransformFromOriginAndNormal(origin, lineDirection)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CylinderAffordanceItem', Name='post %d' % postId,
uuid=newUUID(), pose=pose, Radius=0.05, Length=float(lineLength), Color=[0.0, 1.0, 0.0])
desc['Collision Enabled'] = True
return affordanceManager.newAffordanceFromDescription(desc)
rejectFolder = om.getOrCreateContainer('nonpost clusters', parentObj=getDebugFolder())
keepFolder = om.getOrCreateContainer('post clusters', parentObj=getDebugFolder())
for i, cluster in enumerate(clusters):
linePoint, lineDirection, linePoints = applyLineFit(cluster, distanceThreshold=0.1)
if isPostCluster(cluster, lineDirection):
vis.showPolyData(cluster, 'cluster %d' % i, visible=False, color=getRandomColor(), alpha=0.5, parent=keepFolder)
makeCylinderAffordance(linePoints, lineDirection, linePoint, i)
else:
vis.showPolyData(cluster, 'cluster %d' % i, visible=False, color=getRandomColor(), alpha=0.5, parent=rejectFolder)
def findAndFitDrillBarrel(polyData=None):
''' Find the horizontal surfaces
on the horizontal surfaces, find all the drills
'''
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = polyData or inputObj.polyData
groundPoints, scenePoints = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.50)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', [0.5, 1.7])
if not scenePoints.GetNumberOfPoints():
return
normalEstimationSearchRadius = 0.10
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(scenePoints)
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotUp = np.abs(np.dot(normals, [0,0,1]))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotUp, 'normals_dot_up')
surfaces = thresholdPoints(scenePoints, 'normals_dot_up', [0.95, 1.0])
updatePolyData(groundPoints, 'ground points', parent=getDebugFolder(), visible=False)
updatePolyData(scenePoints, 'scene points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=False)
updatePolyData(surfaces, 'surfaces', parent=getDebugFolder(), visible=False)
clusters = extractClusters(surfaces, clusterTolerance=0.15, minClusterSize=50)
fitResults = []
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
forwardDirection = np.array([1.0, 0.0, 0.0])
viewFrame.TransformVector(forwardDirection, forwardDirection)
robotOrigin = viewFrame.GetPosition()
robotForward =forwardDirection
#print 'robot origin:', robotOrigin
#print 'robot forward:', robotForward
centroid =[]
for clusterId, cluster in enumerate(clusters):
clusterObj = updatePolyData(cluster, 'surface cluster %d' % clusterId, color=[1,1,0], parent=getDebugFolder(), visible=False)
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges[:2]]
skipCluster = False
for edgeLength in edgeLengths:
#print 'cluster %d edge length: %f' % (clusterId, edgeLength)
if edgeLength < 0.35 or edgeLength > 0.75:
skipCluster = True
if skipCluster:
continue
clusterObj.setSolidColor([0, 0, 1])
centroid = np.average(vtkNumpy.getNumpyFromVtk(cluster, 'Points'), axis=0)
try:
drillFrame = segmentDrillBarrelFrame(centroid, polyData=scenePoints, forwardDirection=robotForward)
if drillFrame is not None:
fitResults.append((clusterObj, drillFrame))
except:
print traceback.format_exc()
print 'fit drill failed for cluster:', clusterId
if not fitResults:
return
sortFittedDrills(fitResults, robotOrigin, robotForward)
return centroid
def sortFittedDrills(fitResults, robotOrigin, robotForward):
angleToFitResults = []
for fitResult in fitResults:
cluster, drillFrame = fitResult
drillOrigin = np.array(drillFrame.GetPosition())
angleToDrill = np.abs(computeSignedAngleBetweenVectors(robotForward, drillOrigin - robotOrigin, [0,0,1]))
angleToFitResults.append((angleToDrill, cluster, drillFrame))
#print 'angle to candidate drill:', angleToDrill
angleToFitResults.sort(key=lambda x: x[0])
#print 'using drill at angle:', angleToFitResults[0][0]
drillMesh = getDrillBarrelMesh()
for i, fitResult in enumerate(angleToFitResults):
angleToDrill, cluster, drillFrame = fitResult
if i == 0:
drill = om.findObjectByName('drill')
drill = updatePolyData(drillMesh, 'drill', color=[0, 1, 0], cls=FrameAffordanceItem, visible=True)
drillFrame = updateFrame(drillFrame, 'drill frame', parent=drill, visible=False)
drill.actor.SetUserTransform(drillFrame.transform)
drill.setAffordanceParams(dict(otdf_type='dewalt_button', friendly_name='dewalt_button'))
drill.updateParamsFromActorTransform()
drill.setSolidColor([0, 1, 0])
#cluster.setProperty('Visible', True)
else:
drill = showPolyData(drillMesh, 'drill candidate', color=[1,0,0], visible=False, parent=getDebugFolder())
drill.actor.SetUserTransform(drillFrame)
om.addToObjectModel(drill, parentObj=getDebugFolder())
def computeSignedAngleBetweenVectors(v1, v2, perpendicularVector):
'''
Computes the signed angle between two vectors in 3d, given a perpendicular vector
to determine sign. Result returned is radians.
'''
v1 = np.array(v1)
v2 = np.array(v2)
perpendicularVector = np.array(perpendicularVector)
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
perpendicularVector /= np.linalg.norm(perpendicularVector)
return math.atan2(np.dot(perpendicularVector, np.cross(v1, v2)), np.dot(v1, v2))
def segmentDrillBarrelFrame(point1, polyData, forwardDirection):
tableClusterSearchRadius = 0.4
drillClusterSearchRadius = 0.5 #0.3
expectedNormal = np.array([0.0, 0.0, 1.0])
if not polyData.GetNumberOfPoints():
return
polyData, plane_origin, plane_normal = applyPlaneFit(polyData, expectedNormal=expectedNormal,
perpendicularAxis=expectedNormal, searchOrigin=point1,
searchRadius=tableClusterSearchRadius, angleEpsilon=0.2, returnOrigin=True)
if not polyData.GetNumberOfPoints():
return
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
tablePoints = labelDistanceToPoint(tablePoints, point1)
tablePointsClusters = extractClusters(tablePoints)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
if not tablePointsClusters:
return
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.02, 0.3])
if not searchRegion.GetNumberOfPoints():
return
searchRegion = cropToSphere(searchRegion, point1, drillClusterSearchRadius)
#drillPoints = extractLargestCluster(searchRegion, minClusterSize=1)
t = fitDrillBarrel (searchRegion, forwardDirection, plane_origin, plane_normal)
return t
def segmentDrillBarrel(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
forwardDirection = -np.array(getCurrentView().camera().GetViewPlaneNormal())
t = segmentDrillBarrel(point1, polyData, forwardDirection)
assert t is not None
drillMesh = getDrillBarrelMesh()
aff = showPolyData(drillMesh, 'drill', visible=True)
aff.addToView(app.getDRCView())
aff.actor.SetUserTransform(t)
drillFrame = showFrame(t, 'drill frame', parent=aff, visible=False)
drillFrame.addToView(app.getDRCView())
return aff, drillFrame
def segmentDrillAlignedWithTable(point, polyData = None):
'''
Yet Another Drill Fitting Algorithm [tm]
This one fits the button drill assuming its on the table
and aligned with the table frame (because the button drill orientation is difficult to find)
Table must have long side facing robot
'''
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = polyData or inputObj.polyData
# segment the table and recover the precise up direction normal:
polyDataOut, tablePoints, origin, normal = segmentTable(polyData,point)
#print origin # this origin is bunk
#tableCentroid = computeCentroid(tablePoints)
# get the bounding box edges
OBBorigin, edges, _ = getOrientedBoundingBox(tablePoints)
#print "OBB out"
#print OBBorigin
#print edges
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
#print edgeLengths
#print axes
# check which direction the robot is facing and flip x-axis of table if necessary
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
#print "main axes", axes[1]
#print "viewDirection", viewDirection
#dp = np.dot(axes[1], viewDirection)
#print dp
if np.dot(axes[1], viewDirection) < 0:
#print "flip the x-direction"
axes[1] = -axes[1]
# define the x-axis to be along the 2nd largest edge
xaxis = axes[1]
xaxis = np.array(xaxis)
zaxis = np.array( normal )
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
tableOrientation = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
#tableTransform = transformUtils.frameFromPositionAndRPY( tableCentroid , tableOrientation.GetOrientation() )
#updateFrame(tableTransform, 'table frame [z up, x away face]', parent="segmentation", visible=True).addToView(app.getDRCView())
data = segmentTableScene(polyData, point )
#vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')
# crude use of the table frame to determine the frame of the drill on the table
#t2 = transformUtils.frameFromPositionAndRPY([0,0,0], [180, 0 , 90] )
#drillOrientationTransform = transformUtils.copyFrame( om.findObjectByName('object 1 frame').transform )
#drillOrientationTransform.PreMultiply()
#drillOrientationTransform.Concatenate(t2)
#vis.updateFrame(t, 'drillOrientationTransform',visible=True)
#table_xaxis, table_yaxis, table_zaxis = transformUtils.getAxesFromTransform( data.table.frame )
#drillOrientation = transformUtils.orientationFromAxes( table_yaxis, table_xaxis, -1*np.array( table_zaxis) )
drillTransform = transformUtils.frameFromPositionAndRPY( data.clusters[0].frame.GetPosition() , tableOrientation.GetOrientation() )
drillMesh = getDrillMesh()
drill = om.findObjectByName('drill')
om.removeFromObjectModel(drill)
aff = showPolyData(drillMesh, 'drill', color=[0.0, 1.0, 0.0], cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(drillTransform)
aff.addToView(app.getDRCView())
frameObj = updateFrame(drillTransform, 'drill frame', parent=aff, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(drillTransform.GetPosition()), [1,0,0], [0,1,0], [0,0,1], drillType="dewalt_button")
aff.setAffordanceParams(params)
def segmentDrillInHand(p1, p2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
distanceToLineThreshold = 0.05
polyData = labelDistanceToLine(polyData, p1, p2)
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, distanceToLineThreshold])
lineSegment = p2 - p1
lineLength = np.linalg.norm(lineSegment)
cropped, polyData = cropToPlane(polyData, p1, lineSegment/lineLength, [-0.03, lineLength + 0.03])
updatePolyData(cropped, 'drill cluster', parent=getDebugFolder(), visible=False)
drillPoints = cropped
normal = lineSegment/lineLength
centroids = computeCentroids(drillPoints, axis=normal)
centroidsPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(centroids)
d = DebugData()
updatePolyData(centroidsPolyData, 'cluster centroids', parent=getDebugFolder(), visible=False)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = centroids[0] - centroids[-1]
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(p2)
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(t.GetPosition()), xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def addDrillAffordance():
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
t = vtk.vtkTransform()
t.PostMultiply()
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(t.GetPosition()), [1,0,0], [0,1,0], [0,0,1])
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
return aff
def getLinkFrame(linkName):
robotStateModel = om.findObjectByName('robot state model')
robotStateModel = robotStateModel or getVisibleRobotModel()
assert robotStateModel
t = vtk.vtkTransform()
robotStateModel.model.getLinkToWorld(linkName, t)
return t
def getDrillInHandOffset(zRotation=0.0, zTranslation=0.0, xTranslation=0.0, yTranslation=0.0,flip=False):
drillOffset = vtk.vtkTransform()
drillOffset.PostMultiply()
if flip:
drillOffset.RotateY(180)
drillOffset.RotateZ(zRotation)
drillOffset.RotateY(-90)
#drillOffset.Translate(0, 0.09, zTranslation - 0.015)
#drillOffset.Translate(zTranslation - 0.015, 0.035 + xTranslation, 0.0)
drillOffset.Translate(zTranslation, xTranslation, 0.0 + yTranslation)
return drillOffset
def moveDrillToHand(drillOffset, hand='right'):
drill = om.findObjectByName('drill')
if not drill:
drill = addDrillAffordance()
assert hand in ('right', 'left')
drillTransform = drill.actor.GetUserTransform()
rightBaseLink = getLinkFrame('%s_hand_face' % hand[0])
drillTransform.PostMultiply()
drillTransform.Identity()
drillTransform.Concatenate(drillOffset)
drillTransform.Concatenate(rightBaseLink)
drill._renderAllViews()
class PointPicker(TimerCallback):
def __init__(self, numberOfPoints=3):
TimerCallback.__init__(self)
self.targetFps = 30
self.enabled = False
self.numberOfPoints = numberOfPoints
self.annotationObj = None
self.drawLines = True
self.clear()
def clear(self):
self.points = [None for i in xrange(self.numberOfPoints)]
self.hoverPos = None
self.annotationFunc = None
self.lastMovePos = [0, 0]
def onMouseMove(self, displayPoint, modifiers=None):
self.lastMovePos = displayPoint
def onMousePress(self, displayPoint, modifiers=None):
#print 'mouse press:', modifiers
#if not modifiers:
# return
for i in xrange(self.numberOfPoints):
if self.points[i] is None:
self.points[i] = self.hoverPos
break
if self.points[-1] is not None:
self.finish()
def finish(self):
self.enabled = False
om.removeFromObjectModel(self.annotationObj)
points = [p.copy() for p in self.points]
if self.annotationFunc is not None:
self.annotationFunc(*points)
removeViewPicker(self)
def handleRelease(self, displayPoint):
pass
def draw(self):
d = DebugData()
points = [p if p is not None else self.hoverPos for p in self.points]
# draw points
for p in points:
if p is not None:
d.addSphere(p, radius=0.01)
if self.drawLines:
# draw lines
for a, b in zip(points, points[1:]):
if b is not None:
d.addLine(a, b)
# connect end points
if points[-1] is not None:
d.addLine(points[0], points[-1])
self.annotationObj = updatePolyData(d.getPolyData(), 'annotation', parent=getDebugFolder())
self.annotationObj.setProperty('Color', QtGui.QColor(0, 255, 0))
self.annotationObj.actor.SetPickable(False)
def tick(self):
if not self.enabled:
return
if not om.findObjectByName('pointcloud snapshot'):
self.annotationFunc = None
self.finish()
return
self.hoverPos = pickPoint(self.lastMovePos, getSegmentationView(), obj='pointcloud snapshot')
self.draw()
class LineDraw(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.targetFps = 30
self.enabled = False
self.view = view
self.renderer = view.renderer()
self.line = vtk.vtkLeaderActor2D()
self.line.SetArrowPlacementToNone()
self.line.GetPositionCoordinate().SetCoordinateSystemToViewport()
self.line.GetPosition2Coordinate().SetCoordinateSystemToViewport()
self.line.GetProperty().SetLineWidth(4)
self.line.SetPosition(0,0)
self.line.SetPosition2(0,0)
self.clear()
def clear(self):
self.p1 = None
self.p2 = None
self.annotationFunc = None
self.lastMovePos = [0, 0]
self.renderer.RemoveActor2D(self.line)
def onMouseMove(self, displayPoint, modifiers=None):
self.lastMovePos = displayPoint
def onMousePress(self, displayPoint, modifiers=None):
if self.p1 is None:
self.p1 = list(self.lastMovePos)
if self.p1 is not None:
self.renderer.AddActor2D(self.line)
else:
self.p2 = self.lastMovePos
self.finish()
def finish(self):
self.enabled = False
self.renderer.RemoveActor2D(self.line)
if self.annotationFunc is not None:
self.annotationFunc(self.p1, self.p2)
def handleRelease(self, displayPoint):
pass
def tick(self):
if not self.enabled:
return
if self.p1:
self.line.SetPosition(self.p1)
self.line.SetPosition2(self.lastMovePos)
self.view.render()
viewPickers = []
def addViewPicker(picker):
global viewPickers
viewPickers.append(picker)
def removeViewPicker(picker):
global viewPickers
viewPickers.remove(picker)
def distanceToLine(x0, x1, x2):
numerator = np.sqrt(np.sum(np.cross((x0 - x1), (x0-x2))**2))
denom = np.linalg.norm(x2-x1)
return numerator / denom
def labelDistanceToLine(polyData, linePoint1, linePoint2, resultArrayName='distance_to_line'):
x0 = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
x1 = np.array(linePoint1)
x2 = np.array(linePoint2)
numerator = np.sqrt(np.sum(np.cross((x0 - x1), (x0-x2))**2, axis=1))
denom = np.linalg.norm(x2-x1)
dists = numerator / denom
polyData = shallowCopy(polyData)
vtkNumpy.addNumpyToVtk(polyData, dists, resultArrayName)
return polyData
def labelDistanceToPoint(polyData, point, resultArrayName='distance_to_point'):
assert polyData.GetNumberOfPoints()
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
points = points - point
dists = np.sqrt(np.sum(points**2, axis=1))
polyData = shallowCopy(polyData)
vtkNumpy.addNumpyToVtk(polyData, dists, resultArrayName)
return polyData
def getPlaneEquationFromPolyData(polyData, expectedNormal):
_, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, returnOrigin=True)
return origin, normal, np.hstack((normal, [np.dot(origin, normal)]))
def computeEdge(polyData, edgeAxis, perpAxis, binWidth=0.03):
polyData = labelPointDistanceAlongAxis(polyData, edgeAxis, resultArrayName='dist_along_edge')
polyData = labelPointDistanceAlongAxis(polyData, perpAxis, resultArrayName='dist_perp_to_edge')
polyData, bins = binByScalar(polyData, 'dist_along_edge', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
distToEdge = vtkNumpy.getNumpyFromVtk(polyData, 'dist_perp_to_edge')
numberOfBins = len(bins) - 1
edgePoints = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
binDists = distToEdge[binLabels == i]
if len(binDists):
edgePoints.append(binPoints[binDists.argmax()])
return np.array(edgePoints)
def computeCentroids(polyData, axis, binWidth=0.025):
polyData = labelPointDistanceAlongAxis(polyData, axis, resultArrayName='dist_along_axis')
polyData, bins = binByScalar(polyData, 'dist_along_axis', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
numberOfBins = len(bins) - 1
centroids = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
if len(binPoints):
centroids.append(np.average(binPoints, axis=0))
return np.array(centroids)
def computePointCountsAlongAxis(polyData, axis, binWidth=0.025):
polyData = labelPointDistanceAlongAxis(polyData, axis, resultArrayName='dist_along_axis')
polyData, bins = binByScalar(polyData, 'dist_along_axis', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
numberOfBins = len(bins) - 1
binCount = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
binCount.append(len(binPoints))
return np.array(binCount)
def binByScalar(lidarData, scalarArrayName, binWidth, binLabelsArrayName='bin_labels'):
'''
Gets the array with name scalarArrayName from lidarData.
Computes bins by dividing the scalar array into bins of size binWidth.
Adds a new label array to the lidar points identifying which bin the point belongs to,
where the first bin is labeled with 0.
Returns the new, labeled lidar data and the bins.
The bins are an array where each value represents a bin edge.
'''
scalars = vtkNumpy.getNumpyFromVtk(lidarData, scalarArrayName)
bins = np.arange(scalars.min(), scalars.max()+binWidth, binWidth)
binLabels = np.digitize(scalars, bins) - 1
assert(len(binLabels) == len(scalars))
newData = shallowCopy(lidarData)
vtkNumpy.addNumpyToVtk(newData, binLabels, binLabelsArrayName)
return newData, bins
def showObbs(polyData):
labelsArrayName = 'cluster_labels'
assert polyData.GetPointData().GetArray(labelsArrayName)
f = vtk.vtkAnnotateOBBs()
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, labelsArrayName)
f.SetInput(polyData)
f.Update()
showPolyData(f.GetOutput(), 'bboxes')
def getOrientedBoundingBox(polyData):
'''
returns origin, edges, and outline wireframe
'''
nPoints = polyData.GetNumberOfPoints()
assert nPoints
polyData = shallowCopy(polyData)
labelsArrayName = 'bbox_labels'
labels = np.ones(nPoints)
vtkNumpy.addNumpyToVtk(polyData, labels, labelsArrayName)
f = vtk.vtkAnnotateOBBs()
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, labelsArrayName)
f.SetInput(polyData)
f.Update()
assert f.GetNumberOfBoundingBoxes() == 1
origin = np.zeros(3)
edges = [np.zeros(3) for i in xrange(3)]
f.GetBoundingBoxOrigin(0, origin)
for i in xrange(3):
f.GetBoundingBoxEdge(0, i, edges[i])
return origin, edges, shallowCopy(f.GetOutput())
def segmentBlockByAnnotation(blockDimensions, p1, p2, p3):
segmentationObj = om.findObjectByName('pointcloud snapshot')
segmentationObj.mapper.ScalarVisibilityOff()
segmentationObj.setProperty('Point Size', 2)
segmentationObj.setProperty('Alpha', 0.8)
# constraint z to lie in plane
#p1[2] = p2[2] = p3[2] = max(p1[2], p2[2], p3[2])
zedge = p2 - p1
zaxis = zedge / np.linalg.norm(zedge)
#xwidth = distanceToLine(p3, p1, p2)
# expected dimensions
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(zedge)
yaxis = np.cross(p2 - p1, p3 - p1)
yaxis = yaxis / np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
# reorient axes
viewPlaneNormal = getSegmentationView().camera().GetViewPlaneNormal()
if np.dot(yaxis, viewPlaneNormal) < 0:
yaxis *= -1
if np.dot(xaxis, p3 - p1) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
origin = ((p1 + p2) / 2.0) + xaxis*xwidth/2.0 + yaxis*ywidth/2.0
d = DebugData()
d.addSphere(origin, radius=0.01)
d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
obj = updatePolyData(d.getPolyData(), 'block axes')
obj.setProperty('Color', QtGui.QColor(255, 255, 0))
obj.setProperty('Visible', False)
om.findObjectByName('annotation').setProperty('Visible', False)
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = updatePolyData(cube, 'block affordance', cls=BlockAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
####
# debrs task ground frame
def getBoardCorners(params):
axes = [np.array(params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
widths = [np.array(params[axis])/2.0 for axis in ['xwidth', 'ywidth', 'zwidth']]
edges = [axes[i] * widths[i] for i in xrange(3)]
origin = np.array(params['origin'])
return [
origin + edges[0] + edges[1] + edges[2],
origin - edges[0] + edges[1] + edges[2],
origin - edges[0] - edges[1] + edges[2],
origin + edges[0] - edges[1] + edges[2],
origin + edges[0] + edges[1] - edges[2],
origin - edges[0] + edges[1] - edges[2],
origin - edges[0] - edges[1] - edges[2],
origin + edges[0] - edges[1] - edges[2],
]
def getPointDistances(target, points):
return np.array([np.linalg.norm(target - p) for p in points])
def computeClosestCorner(aff, referenceFrame):
corners = getBoardCorners(aff.params)
dists = getPointDistances(np.array(referenceFrame.GetPosition()), corners)
return corners[dists.argmin()]
def computeGroundFrame(aff, referenceFrame):
refAxis = [0.0, -1.0, 0.0]
referenceFrame.TransformVector(refAxis, refAxis)
refAxis = np.array(refAxis)
axes = [np.array(aff.params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
boardAxis = axes[axisProjections.argmax()]
if np.dot(boardAxis, refAxis) < 0:
boardAxis = -boardAxis
xaxis = boardAxis
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
closestCorner = computeClosestCorner(aff, referenceFrame)
groundFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
groundFrame.PostMultiply()
groundFrame.Translate(closestCorner[0], closestCorner[1], 0.0)
return groundFrame
def computeCornerFrame(aff, referenceFrame):
refAxis = [0.0, -1.0, 0.0]
referenceFrame.TransformVector(refAxis, refAxis)
refAxis = np.array(refAxis)
axes = [np.array(aff.params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
edgeLengths = [edgeLength for edgeLength in ['xwidth', 'ywidth', 'zwidth']]
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
boardAxis = axes[axisProjections.argmax()]
if np.dot(boardAxis, refAxis) < 0:
boardAxis = -boardAxis
longAxis = axes[np.argmax(edgeLengths)]
xaxis = boardAxis
yaxis = axes[2]
zaxis = np.cross(xaxis, yaxis)
closestCorner = computeClosestCorner(aff, referenceFrame)
cornerFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
cornerFrame.PostMultiply()
cornerFrame.Translate(closestCorner)
return cornerFrame
def publishTriad(transform, collectionId=1234):
o = lcmvs.obj_t()
xyz = transform.GetPosition()
rpy = transformUtils.rollPitchYawFromTransform(transform)
o.roll, o.pitch, o.yaw = rpy
o.x, o.y, o.z = xyz
o.id = 1
m = lcmvs.obj_collection_t()
m.id = collectionId
m.name = 'stance_triads'
m.type = lcmvs.obj_collection_t.AXIS3D
m.nobjs = 1
m.reset = False
m.objs = [o]
lcmUtils.publish('OBJ_COLLECTION', m)
def createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name, parent='affordances'):
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = BoxAffordanceItem(name, view=app.getCurrentRenderView())
obj.setProperty('Dimensions', [float(v) for v in [xwidth, ywidth, zwidth]])
obj.actor.SetUserTransform(t)
om.addToObjectModel(obj, parentObj=om.getOrCreateContainer(parent))
frameObj = vis.showFrame(t, name + ' frame', scale=0.2, visible=False, parent=obj)
obj.addToView(app.getDRCView())
frameObj.addToView(app.getDRCView())
affordanceManager.registerAffordance(obj)
return obj
def segmentBlockByTopPlane(polyData, blockDimensions, expectedNormal, expectedXAxis, edgeSign=1, name='block affordance'):
polyData, planeOrigin, normal = applyPlaneFit(polyData, distanceThreshold=0.05, expectedNormal=expectedNormal, returnOrigin=True)
_, lineDirection, _ = applyLineFit(polyData)
zaxis = lineDirection
yaxis = normal
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, expectedXAxis) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis /= np.linalg.norm(zaxis)
expectedXAxis = np.array(xaxis)
edgePoints = computeEdge(polyData, zaxis, xaxis*edgeSign)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
d = DebugData()
obj = updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
zaxis = lineDirection
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, expectedXAxis) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis /= np.linalg.norm(zaxis)
polyData = labelPointDistanceAlongAxis(polyData, xaxis, resultArrayName='dist_along_line')
pts = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dists = np.dot(pts-linePoint, zaxis)
p1 = linePoint + zaxis*np.min(dists)
p2 = linePoint + zaxis*np.max(dists)
p1 = projectPointToPlane(p1, planeOrigin, normal)
p2 = projectPointToPlane(p2, planeOrigin, normal)
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(p2 - p1)
origin = p1 - edgeSign*xaxis*xwidth/2.0 - yaxis*ywidth/2.0 + zaxis*zwidth/2.0
d = DebugData()
#d.addSphere(linePoint, radius=0.02)
#d.addLine(linePoint, linePoint + yaxis*ywidth)
#d.addLine(linePoint, linePoint + xaxis*xwidth)
#d.addLine(linePoint, linePoint + zaxis*zwidth)
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
d.addSphere(origin, radius=0.01)
#d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
#d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
#d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
d.addLine(origin, origin + xaxis*xwidth/2.0)
d.addLine(origin, origin + yaxis*ywidth/2.0)
d.addLine(origin, origin + zaxis*zwidth/2.0)
#obj = updatePolyData(d.getPolyData(), 'block axes')
#obj.setProperty('Color', QtGui.QColor(255, 255, 0))
#obj.setProperty('Visible', False)
obj = createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name)
obj.setProperty('Color', [222/255.0, 184/255.0, 135/255.0])
computeDebrisGraspSeed(obj)
t = computeDebrisStanceFrame(obj)
if t:
showFrame(t, 'debris stance frame', parent=obj)
obj.publishCallback = functools.partial(publishDebrisStanceFrame, obj)
return obj
def computeDebrisGraspSeed(aff):
debrisReferenceFrame = om.findObjectByName('debris reference frame')
if debrisReferenceFrame:
debrisReferenceFrame = debrisReferenceFrame.transform
affCornerFrame = computeCornerFrame(aff, debrisReferenceFrame)
showFrame(affCornerFrame, 'board corner frame', parent=aff, visible=False)
def computeDebrisStanceFrame(aff):
debrisReferenceFrame = om.findObjectByName('debris reference frame')
debrisWallEdge = om.findObjectByName('debris plane edge')
if debrisReferenceFrame and debrisWallEdge:
debrisReferenceFrame = debrisReferenceFrame.transform
affGroundFrame = computeGroundFrame(aff, debrisReferenceFrame)
updateFrame(affGroundFrame, 'board ground frame', parent=getDebugFolder(), visible=False)
affWallEdge = computeGroundFrame(aff, debrisReferenceFrame)
framePos = np.array(affGroundFrame.GetPosition())
p1, p2 = debrisWallEdge.points
edgeAxis = p2 - p1
edgeAxis /= np.linalg.norm(edgeAxis)
projectedPos = p1 + edgeAxis * np.dot(framePos - p1, edgeAxis)
affWallFrame = vtk.vtkTransform()
affWallFrame.PostMultiply()
useWallFrameForRotation = True
if useWallFrameForRotation:
affWallFrame.SetMatrix(debrisReferenceFrame.GetMatrix())
affWallFrame.Translate(projectedPos - np.array(debrisReferenceFrame.GetPosition()))
stanceWidth = 0.20
stanceOffsetX = -0.35
stanceOffsetY = 0.45
stanceRotation = 0.0
else:
affWallFrame.SetMatrix(affGroundFrame.GetMatrix())
affWallFrame.Translate(projectedPos - framePos)
stanceWidth = 0.20
stanceOffsetX = -0.35
stanceOffsetY = -0.45
stanceRotation = math.pi/2.0
stanceFrame, _, _ = getFootFramesFromReferenceFrame(affWallFrame, stanceWidth, math.degrees(stanceRotation), [stanceOffsetX, stanceOffsetY, 0.0])
return stanceFrame
def publishDebrisStanceFrame(aff):
frame = computeDebrisStanceFrame(aff)
publishTriad(frame)
def segmentBlockByPlanes(blockDimensions):
planes = om.findObjectByName('selected planes').children()[:2]
viewPlaneNormal = getSegmentationView().camera().GetViewPlaneNormal()
origin1, normal1, plane1 = getPlaneEquationFromPolyData(planes[0].polyData, expectedNormal=viewPlaneNormal)
origin2, normal2, plane2 = getPlaneEquationFromPolyData(planes[1].polyData, expectedNormal=viewPlaneNormal)
xaxis = normal2
yaxis = normal1
zaxis = np.cross(xaxis, yaxis)
xaxis = np.cross(yaxis, zaxis)
pts1 = vtkNumpy.getNumpyFromVtk(planes[0].polyData, 'Points')
pts2 = vtkNumpy.getNumpyFromVtk(planes[1].polyData, 'Points')
linePoint = np.zeros(3)
centroid2 = np.sum(pts2, axis=0)/len(pts2)
vtk.vtkPlane.ProjectPoint(centroid2, origin1, normal1, linePoint)
dists = np.dot(pts1-linePoint, zaxis)
p1 = linePoint + zaxis*np.min(dists)
p2 = linePoint + zaxis*np.max(dists)
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(p2 - p1)
origin = p1 + xaxis*xwidth/2.0 + yaxis*ywidth/2.0 + zaxis*zwidth/2.0
d = DebugData()
d.addSphere(linePoint, radius=0.02)
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
d.addSphere(origin, radius=0.01)
d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
obj = updatePolyData(d.getPolyData(), 'block axes')
obj.setProperty('Color', QtGui.QColor(255, 255, 0))
obj.setProperty('Visible', False)
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = updatePolyData(cube, 'block affordance', cls=BlockAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def estimatePointerTip(robotModel, polyData):
'''
Given a robot model, uses forward kinematics to determine a pointer tip
search region, then does a ransac line fit in the search region to find
points on the pointer, and selects the maximum point along the line fit
as the pointer tip. Returns the pointer tip xyz on success and returns
None on failure.
'''
palmFrame = robotModel.getLinkFrame('r_hand_force_torque')
p1 = [0.0, 0.14, -0.06]
p2 = [0.0, 0.24, -0.06]
palmFrame.TransformPoint(p1, p1)
palmFrame.TransformPoint(p2, p2)
p1 = np.array(p1)
p2 = np.array(p2)
d = DebugData()
d.addSphere(p1, radius=0.005)
d.addSphere(p2, radius=0.005)
d.addLine(p1, p2)
vis.updatePolyData(d.getPolyData(), 'pointer line', color=[1,0,0], parent=getDebugFolder(), visible=False)
polyData = cropToLineSegment(polyData, p1, p2)
if not polyData.GetNumberOfPoints():
#print 'pointer search region is empty'
return None
vis.updatePolyData(polyData, 'cropped to pointer line', parent=getDebugFolder(), visible=False)
polyData = labelDistanceToLine(polyData, p1, p2)
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, 0.07])
if polyData.GetNumberOfPoints() < 2:
#print 'pointer search region is empty'
return None
updatePolyData(polyData, 'distance to pointer line', colorByName='distance_to_line', parent=getDebugFolder(), visible=False)
ransacDistanceThreshold = 0.0075
lineOrigin, lineDirection, polyData = applyLineFit(polyData, distanceThreshold=ransacDistanceThreshold)
updatePolyData(polyData, 'line fit ransac', colorByName='ransac_labels', parent=getDebugFolder(), visible=False)
lineDirection = np.array(lineDirection)
lineDirection /= np.linalg.norm(lineDirection)
if np.dot(lineDirection, (p2 - p1)) < 0:
lineDirection *= -1
polyData = thresholdPoints(polyData, 'ransac_labels', [1.0, 1.0])
if polyData.GetNumberOfPoints() < 2:
#print 'pointer ransac line fit failed to find inliers'
return None
obj = updatePolyData(polyData, 'line fit points', colorByName='dist_along_line', parent=getDebugFolder(), visible=True)
obj.setProperty('Point Size', 5)
pts = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dists = np.dot(pts-lineOrigin, lineDirection)
p1 = lineOrigin + lineDirection*np.min(dists)
p2 = lineOrigin + lineDirection*np.max(dists)
d = DebugData()
#d.addSphere(p1, radius=0.005)
d.addSphere(p2, radius=0.005)
d.addLine(p1, p2)
vis.updatePolyData(d.getPolyData(), 'fit pointer line', color=[0,1,0], parent=getDebugFolder(), visible=True)
return p2
def startBoundedPlaneSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentBoundedPlaneByAnnotation)
def startValveSegmentationByWallPlane(expectedValveRadius):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentValveByWallPlane, expectedValveRadius)
def startValveSegmentationManual(expectedValveRadius):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentValve, expectedValveRadius)
def startRefitWall():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = refitWall
def startWyeSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentWye)
def startDoorHandleSegmentation(otdfType):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDoorHandle, otdfType)
def startTrussSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentTruss)
def startHoseNozzleSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentHoseNozzle)
def storePoint(p):
global _pickPoint
_pickPoint = p
def getPickPoint():
global _pickPoint
return _pickPoint
def startPickPoint():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = storePoint
def startSelectToolTip():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = selectToolTip
def startDrillSegmentation():
picker = PointPicker(numberOfPoints=3)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrill)
def startDrillAutoSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillAuto)
def startDrillButtonSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillButton)
def startPointerTipSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentPointerTip)
def startDrillAutoSegmentationAlignedWithTable():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillAlignedWithTable)
def startDrillBarrelSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillBarrel)
def startDrillWallSegmentation():
picker = PointPicker(numberOfPoints=3)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentDrillWall)
def startDrillWallSegmentationConstrained(rightAngleLocation):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillWallConstrained, rightAngleLocation)
def startDrillInHandSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentDrillInHand)
def startSegmentDebrisWall():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentDebrisWall)
def startSegmentDebrisWallManual():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentDebrisWallManual)
def selectToolTip(point1):
print point1
def segmentDebrisWallManual(point1, point2):
p1, p2 = point1, point2
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
edgeObj = updatePolyData(d.getPolyData(), 'debris plane edge', visible=True)
edgeObj.points = [p1, p2]
xaxis = p2 - p1
xaxis /= np.linalg.norm(xaxis)
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(p1)
updateFrame(t, 'debris plane frame', parent=edgeObj, visible=False)
refFrame = vtk.vtkTransform()
refFrame.PostMultiply()
refFrame.SetMatrix(t.GetMatrix())
refFrame.Translate(-xaxis + yaxis + zaxis*20.0)
updateFrame(refFrame, 'debris reference frame', parent=edgeObj, visible=False)
def segmentDebrisWall(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, distanceThreshold=0.02, expectedNormal=viewPlaneNormal, perpendicularAxis=viewPlaneNormal,
searchOrigin=point1, searchRadius=0.25, angleEpsilon=0.7, returnOrigin=True)
planePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.02, 0.02])
updatePolyData(planePoints, 'unbounded plane points', parent=getDebugFolder(), visible=False)
planePoints = applyVoxelGrid(planePoints, leafSize=0.03)
planePoints = labelOutliers(planePoints, searchRadius=0.06, neighborsInSearchRadius=10)
updatePolyData(planePoints, 'voxel plane points', parent=getDebugFolder(), colorByName='is_outlier', visible=False)
planePoints = thresholdPoints(planePoints, 'is_outlier', [0, 0])
planePoints = labelDistanceToPoint(planePoints, point1)
clusters = extractClusters(planePoints, clusterTolerance=0.10)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
planePoints = clusters[0]
planeObj = updatePolyData(planePoints, 'debris plane points', parent=getDebugFolder(), visible=False)
perpAxis = [0,0,-1]
perpAxis /= np.linalg.norm(perpAxis)
edgeAxis = np.cross(normal, perpAxis)
edgePoints = computeEdge(planePoints, edgeAxis, perpAxis)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
#binCounts = computePointCountsAlongAxis(planePoints, lineDirection)
xaxis = lineDirection
yaxis = normal
zaxis = np.cross(xaxis, yaxis)
if np.dot(zaxis, [0, 0, 1]) < 0:
zaxis *= -1
xaxis *= -1
pts = vtkNumpy.getNumpyFromVtk(planePoints, 'Points')
dists = np.dot(pts-linePoint, xaxis)
p1 = linePoint + xaxis*np.min(dists)
p2 = linePoint + xaxis*np.max(dists)
p1 = projectPointToPlane(p1, origin, normal)
p2 = projectPointToPlane(p2, origin, normal)
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
edgeObj = updatePolyData(d.getPolyData(), 'debris plane edge', parent=planeObj, visible=True)
edgeObj.points = [p1, p2]
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(p1)
updateFrame(t, 'debris plane frame', parent=planeObj, visible=False)
refFrame = vtk.vtkTransform()
refFrame.PostMultiply()
refFrame.SetMatrix(t.GetMatrix())
refFrame.Translate(-xaxis + yaxis + zaxis*20.0)
updateFrame(refFrame, 'debris reference frame', parent=planeObj, visible=False)
def segmentBoundedPlaneByAnnotation(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, distanceThreshold=0.015, expectedNormal=viewPlaneNormal, perpendicularAxis=viewPlaneNormal,
searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.7, returnOrigin=True)
planePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.015, 0.015])
updatePolyData(planePoints, 'unbounded plane points', parent=getDebugFolder(), visible=False)
planePoints = applyVoxelGrid(planePoints, leafSize=0.03)
planePoints = labelOutliers(planePoints, searchRadius=0.06, neighborsInSearchRadius=12)
updatePolyData(planePoints, 'voxel plane points', parent=getDebugFolder(), colorByName='is_outlier', visible=False)
planePoints = thresholdPoints(planePoints, 'is_outlier', [0, 0])
planePoints = labelDistanceToPoint(planePoints, point1)
clusters = extractClusters(planePoints, clusterTolerance=0.10)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
planePoints = clusters[0]
updatePolyData(planePoints, 'plane points', parent=getDebugFolder(), visible=False)
perpAxis = point2 - point1
perpAxis /= np.linalg.norm(perpAxis)
edgeAxis = np.cross(normal, perpAxis)
edgePoints = computeEdge(planePoints, edgeAxis, perpAxis)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
zaxis = normal
yaxis = lineDirection
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, perpAxis) < 0:
xaxis *= -1
# make right handed
yaxis = np.cross(zaxis, xaxis)
pts = vtkNumpy.getNumpyFromVtk(planePoints, 'Points')
dists = np.dot(pts-linePoint, yaxis)
p1 = linePoint + yaxis*np.min(dists)
p2 = linePoint + yaxis*np.max(dists)
p1 = projectPointToPlane(p1, origin, normal)
p2 = projectPointToPlane(p2, origin, normal)
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
updatePolyData(d.getPolyData(), 'plane edge', parent=getDebugFolder(), visible=False)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate((p1 + p2)/ 2.0)
updateFrame(t, 'plane edge frame', parent=getDebugFolder(), visible=False)
savedCameraParams = None
def perspective():
global savedCameraParams
if savedCameraParams is None:
return
aff = getDefaultAffordanceObject()
if aff:
aff.setProperty('Alpha', 1.0)
obj = om.findObjectByName('pointcloud snapshot')
if obj is not None:
obj.actor.SetPickable(1)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOff()
c.SetPosition(savedCameraParams['Position'])
c.SetFocalPoint(savedCameraParams['FocalPoint'])
c.SetViewUp(savedCameraParams['ViewUp'])
view.setCameraManipulationStyle()
view.render()
def saveCameraParams(overwrite=False):
global savedCameraParams
if overwrite or (savedCameraParams is None):
view = getSegmentationView()
c = view.camera()
savedCameraParams = dict(Position=c.GetPosition(), FocalPoint=c.GetFocalPoint(), ViewUp=c.GetViewUp())
def getDefaultAffordanceObject():
obj = om.getActiveObject()
if isinstance(obj, AffordanceItem):
return obj
for obj in om.getObjects():
if isinstance(obj, AffordanceItem):
return obj
def getVisibleRobotModel():
for obj in om.getObjects():
if isinstance(obj, roboturdf.RobotModelItem) and obj.getProperty('Visible'):
return obj
def orthoX():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['xaxis']
viewUp = -aff.params['yaxis']
viewDistance = aff.params['xwidth']*3
scale = aff.params['zwidth']
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def orthoY():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['yaxis']
viewUp = -aff.params['xaxis']
viewDistance = aff.params['ywidth']*4
scale = aff.params['zwidth']
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def orthoZ():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['zaxis']
viewUp = -aff.params['yaxis']
viewDistance = aff.params['zwidth']
scale = aff.params['ywidth']*6
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def zoomToDisplayPoint(displayPoint, boundsRadius=0.5, view=None):
pickedPoint = pickPoint(displayPoint, getSegmentationView(), obj='pointcloud snapshot')
if pickedPoint is None:
return
view = view or app.getCurrentRenderView()
worldPt1, worldPt2 = getRayFromDisplayPoint(getSegmentationView(), displayPoint)
diagonal = np.array([boundsRadius, boundsRadius, boundsRadius])
bounds = np.hstack([pickedPoint - diagonal, pickedPoint + diagonal])
bounds = [bounds[0], bounds[3], bounds[1], bounds[4], bounds[2], bounds[5]]
view.renderer().ResetCamera(bounds)
view.camera().SetFocalPoint(pickedPoint)
view.render()
def extractPointsAlongClickRay(position, ray, polyData=None, distanceToLineThreshold=0.025, nearestToCamera=False):
#segmentationObj = om.findObjectByName('pointcloud snapshot')
if polyData is None:
polyData = getCurrentRevolutionData()
if not polyData or not polyData.GetNumberOfPoints():
return None
polyData = labelDistanceToLine(polyData, position, position + ray)
# extract points near line
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, distanceToLineThreshold])
if not polyData.GetNumberOfPoints():
return None
polyData = labelPointDistanceAlongAxis(polyData, ray, origin=position, resultArrayName='distance_along_line')
polyData = thresholdPoints(polyData, 'distance_along_line', [0.20, 1e6])
if not polyData.GetNumberOfPoints():
return None
updatePolyData(polyData, 'ray points', colorByName='distance_to_line', visible=False, parent=getDebugFolder())
if nearestToCamera:
dists = vtkNumpy.getNumpyFromVtk(polyData, 'distance_along_line')
else:
dists = vtkNumpy.getNumpyFromVtk(polyData, 'distance_to_line')
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
intersectionPoint = points[dists.argmin()]
d = DebugData()
d.addSphere( intersectionPoint, radius=0.005)
d.addLine(position, intersectionPoint)
obj = updatePolyData(d.getPolyData(), 'intersecting ray', visible=False, color=[0,1,0], parent=getDebugFolder())
obj.actor.GetProperty().SetLineWidth(2)
d2 = DebugData()
end_of_ray = position + 2*ray
d2.addLine(position, end_of_ray)
obj2 = updatePolyData(d2.getPolyData(), 'camera ray', visible=False, color=[1,0,0], parent=getDebugFolder())
obj2.actor.GetProperty().SetLineWidth(2)
return intersectionPoint
def segmentDrillWallFromTag(position, ray):
'''
Fix the drill wall relative to a ray intersected with the wall
Desc: given a position and a ray (typically derived from a camera pixel)
Use that point to determine a position for the Drill Wall
This function uses a hard coded offset between the position on the wall
to produce the drill cutting origin
'''
#inputObj = om.findObjectByName('pointcloud snapshot')
#polyData = shallowCopy(inputObj.polyData)
polyData = getCurrentRevolutionData()
if (polyData is None): # no data yet
print "no LIDAR data yet"
return False
point1 = extractPointsAlongClickRay(position, ray, polyData )
# view direction is out:
viewDirection = -1 * SegmentationContext.getGlobalInstance().getViewDirection()
polyDataOut, origin, normal = applyPlaneFit(polyData, expectedNormal=viewDirection, searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
# project the lidar point onto the plane (older, variance is >1cm with robot 2m away)
#intersection_point = projectPointToPlane(point1, origin, normal)
# intersect the ray with the plane (variance was about 4mm with robot 2m away)
intersection_point = intersectLineWithPlane(position, ray, origin, normal)
# Define a frame:
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(intersection_point)
t2 = transformUtils.copyFrame(t)
t2.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY( [0,0.6,-0.25] , [0,0,0] )
t2.Concatenate(t3)
rightAngleLocation = 'bottom left'
createDrillWall(rightAngleLocation, t2)
wall= om.findObjectByName('wall')
vis.updateFrame( t ,'wall fit tag', parent=wall, visible=False, scale=0.2)
d = DebugData()
d.addSphere( intersection_point, radius=0.002)
obj = updatePolyData(d.getPolyData(), 'intersection', parent=wall, visible=False, color=[0,1,0]) #
obj.actor.GetProperty().SetLineWidth(1)
return True
def segmentDrillWallFromWallCenter():
'''
Get the drill wall target as an offset from the center of
the full wall
'''
# find the valve wall and its center
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
# hardcoded position to target frame from center of wall
# conincides with the distance from the april tag to this position
wallFrame = transformUtils.copyFrame( findWallCenter(polyData) )
wallFrame.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY( [-0.07,-0.3276,0] , [180,-90,0] )
wallFrame.Concatenate(t3)
rightAngleLocation = 'bottom left'
createDrillWall(rightAngleLocation, wallFrame)
wall= om.findObjectByName('wall')
vis.updateFrame( wallFrame ,'wall fit lidar', parent=wall, visible=False, scale=0.2)
| bsd-3-clause |
StudyExchange/Udacity | MachineLearning(Advanced)/p3_creating_customer_segments/visuals.py | 21 | 6047 | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import numpy as np
def pca_results(good_data, pca):
'''
Create a DataFrame of the PCA results
Includes dimension feature weights and explained variance
Visualizes the PCA results
'''
# Dimension indexing
dimensions = dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)]
# PCA components
components = pd.DataFrame(np.round(pca.components_, 4), columns = good_data.keys())
components.index = dimensions
# PCA explained variance
ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)
variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance'])
variance_ratios.index = dimensions
# Create a bar plot visualization
fig, ax = plt.subplots(figsize = (14,8))
# Plot the feature weights as a function of the components
components.plot(ax = ax, kind = 'bar');
ax.set_ylabel("Feature Weights")
ax.set_xticklabels(dimensions, rotation=0)
# Display the explained variance ratios
for i, ev in enumerate(pca.explained_variance_ratio_):
ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev))
# Return a concatenated DataFrame
return pd.concat([variance_ratios, components], axis = 1)
def cluster_results(reduced_data, preds, centers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions
Adds cues for cluster centers and student-selected sample data
'''
predictions = pd.DataFrame(preds, columns = ['Cluster'])
plot_data = pd.concat([predictions, reduced_data], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned cluster
for i, cluster in plot_data.groupby('Cluster'):
cluster.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i)*1.0/(len(centers)-1)), label = 'Cluster %i'%(i), s=30);
# Plot centers with indicators
for i, c in enumerate(centers):
ax.scatter(x = c[0], y = c[1], color = 'white', edgecolors = 'black', \
alpha = 1, linewidth = 2, marker = 'o', s=200);
ax.scatter(x = c[0], y = c[1], marker='$%d$'%(i), alpha = 1, s=100);
# Plot transformed sample points
ax.scatter(x = pca_samples[:,0], y = pca_samples[:,1], \
s = 150, linewidth = 4, color = 'black', marker = 'x');
# Set plot title
ax.set_title("Cluster Learning on PCA-Reduced Data - Centroids Marked by Number\nTransformed Sample Data Marked by Black Cross");
def biplot(good_data, reduced_data, pca):
'''
Produce a biplot that shows a scatterplot of the reduced
data and the projections of the original features.
good_data: original data, before transformation.
Needs to be a pandas dataframe with valid column names
reduced_data: the reduced data (the first two dimensions are plotted)
pca: pca object that contains the components_ attribute
return: a matplotlib AxesSubplot object (for any additional customization)
This procedure is inspired by the script:
https://github.com/teddyroland/python-biplot
'''
fig, ax = plt.subplots(figsize = (14,8))
# scatterplot of the reduced data
ax.scatter(x=reduced_data.loc[:, 'Dimension 1'], y=reduced_data.loc[:, 'Dimension 2'],
facecolors='b', edgecolors='b', s=70, alpha=0.5)
feature_vectors = pca.components_.T
# we use scaling factors to make the arrows easier to see
arrow_size, text_pos = 7.0, 8.0,
# projections of the original features
for i, v in enumerate(feature_vectors):
ax.arrow(0, 0, arrow_size*v[0], arrow_size*v[1],
head_width=0.2, head_length=0.2, linewidth=2, color='red')
ax.text(v[0]*text_pos, v[1]*text_pos, good_data.columns[i], color='black',
ha='center', va='center', fontsize=18)
ax.set_xlabel("Dimension 1", fontsize=14)
ax.set_ylabel("Dimension 2", fontsize=14)
ax.set_title("PC plane with original feature projections.", fontsize=16);
return ax
def channel_results(reduced_data, outliers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions using the full dataset
Data is labeled by "Channel" and cues added for student-selected sample data
'''
# Check that the dataset is loadable
try:
full_data = pd.read_csv("customers.csv")
except:
print "Dataset could not be loaded. Is the file missing?"
return False
# Create the Channel DataFrame
channel = pd.DataFrame(full_data['Channel'], columns = ['Channel'])
channel = channel.drop(channel.index[outliers]).reset_index(drop = True)
labeled = pd.concat([reduced_data, channel], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned Channel
labels = ['Hotel/Restaurant/Cafe', 'Retailer']
grouped = labeled.groupby('Channel')
for i, channel in grouped:
channel.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i-1)*1.0/2), label = labels[i-1], s=30);
# Plot transformed sample points
for i, sample in enumerate(pca_samples):
ax.scatter(x = sample[0], y = sample[1], \
s = 200, linewidth = 3, color = 'black', marker = 'o', facecolors = 'none');
ax.scatter(x = sample[0]+0.25, y = sample[1]+0.3, marker='$%d$'%(i), alpha = 1, s=125);
# Set plot title
ax.set_title("PCA-Reduced Data Labeled by 'Channel'\nTransformed Sample Data Circled"); | mit |
OSGeo-live/CesiumWidget | GSOC/notebooks/ipython/examples/Embedding/internal_ipkernel.py | 4 | 2018 | #-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from IPython.lib.kernel import connect_qtconsole
from IPython.kernel.zmq.kernelapp import IPKernelApp
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
def mpl_kernel(gui):
"""Launch and return an IPython kernel with matplotlib support for the desired gui
"""
kernel = IPKernelApp.instance()
kernel.initialize(['python', '--matplotlib=%s' % gui,
#'--log-level=10'
])
return kernel
class InternalIPKernel(object):
def init_ipkernel(self, backend):
# Start IPython kernel with GUI event loop and mpl support
self.ipkernel = mpl_kernel(backend)
# To create and track active qt consoles
self.consoles = []
# This application will also act on the shell user namespace
self.namespace = self.ipkernel.shell.user_ns
# Example: a variable that will be seen by the user in the shell, and
# that the GUI modifies (the 'Counter++' button increments it):
self.namespace['app_counter'] = 0
#self.namespace['ipkernel'] = self.ipkernel # dbg
def print_namespace(self, evt=None):
print("\n***Variables in User namespace***")
for k, v in self.namespace.items():
if not k.startswith('_'):
print('%s -> %r' % (k, v))
sys.stdout.flush()
def new_qt_console(self, evt=None):
"""start a new qtconsole connected to our kernel"""
return connect_qtconsole(self.ipkernel.connection_file, profile=self.ipkernel.profile)
def count(self, evt=None):
self.namespace['app_counter'] += 1
def cleanup_consoles(self, evt=None):
for c in self.consoles:
c.kill()
| apache-2.0 |
mitenjain/signalAlign | scripts/zayante.py | 2 | 14906 | #!/usr/bin/env python
"""Run signal-to-reference alignments
"""
from __future__ import print_function
import pandas as pd
import glob
from signalAlignLib import *
from alignmentAnalysisLib import CallMethylation
from variantCallingLib import get_alignments_labels_and_mask
from multiprocessing import Process, Queue, current_process, Manager
from serviceCourse.file_handlers import FolderHandler
from argparse import ArgumentParser
from random import shuffle
def parse_args():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--file_directory', '-d', action='store',
dest='files_dir', required=True, type=str, default=None,
help="directory with MinION fast5 reads to align")
parser.add_argument('--ref', '-r', action='store',
dest='ref', required=True, type=str,
help="reference sequence to align to, in FASTA")
parser.add_argument('--in_template_hmm', '-T', action='store', dest='in_T_Hmm',
required=False, type=str, default=None,
help="input HMM for template events, if you don't want the default")
parser.add_argument('--in_complement_hmm', '-C', action='store', dest='in_C_Hmm',
required=False, type=str, default=None,
help="input HMM for complement events, if you don't want the default")
parser.add_argument('--templateHDP', '-tH', action='store', dest='templateHDP', default=None,
help="template serialized HDP file")
parser.add_argument('--complementHDP', '-cH', action='store', dest='complementHDP', default=None,
help="complement serialized HDP file")
parser.add_argument('--degenerate', '-x', action='store', dest='degenerate', default="variant",
help="Specify degenerate nucleotide options: "
"variant -> {ACGT}, twoWay -> {CE} threeWay -> {CEO}")
parser.add_argument('--stateMachineType', '-smt', action='store', dest='stateMachineType', type=str,
default="threeState", help="decide which model to use, threeState by default")
parser.add_argument('--threshold', '-t', action='store', dest='threshold', type=float, required=False,
default=None, help="posterior match probability threshold, Default: 0.01")
parser.add_argument('--diagonalExpansion', '-e', action='store', dest='diag_expansion', type=int,
required=False, default=None, help="number of diagonals to expand around each anchor")
parser.add_argument('--constraintTrim', '-m', action='store', dest='constraint_trim', type=int,
required=False, default=None, help='amount to remove from an anchor constraint')
parser.add_argument('--target_regions', '-q', action='store', dest='target_regions', type=str,
required=False, default=None, help="tab separated table with regions to align to")
parser.add_argument('---un-banded', '-ub', action='store_false', dest='banded',
default=True, help='flag, turn off banding')
parser.add_argument('--jobs', '-j', action='store', dest='nb_jobs', required=False,
default=4, type=int, help="number of jobs to run concurrently")
parser.add_argument('--nb_files', '-n', action='store', dest='nb_files', required=False,
default=500, type=int, help="maximum number of reads to align")
parser.add_argument('--cycles', dest='cycles', default=1, required=False, type=int)
parser.add_argument('--output_location', '-o', action='store', dest='out',
required=True, type=str, default=None,
help="directory to put the alignments")
args = parser.parse_args()
return args
def get_first_sequence(input_fasta):
input_sequence = ""
for header, comment, sequence in read_fasta(input_fasta):
input_sequence += sequence
break
return input_sequence
def make_degenerate_reference(input_fasta, start, forward_sequence_path, backward_sequence_path,
block_size=1, step=6):
"""
input_sequence: string, input nucleotide sequence
out_path: string, path to directory to put new sequences with substituted degenerate characters
block_size: not implemented
step: number of bases between degenerate characters
:return (subbed sequence, complement subbed sequence)
"""
input_sequence = get_first_sequence(input_fasta)
complement_sequence = reverse_complement(dna=input_sequence, reverse=False, complement=True)
t_seq = list(input_sequence)
c_seq = list(complement_sequence)
positions = xrange(start, len(input_sequence), step)
for position in positions:
t_seq[position] = "X"
c_seq[position] = "X"
t_seq = ''.join(t_seq)
c_seq = ''.join(c_seq)
sequence_length = len(t_seq)
with open(forward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=t_seq))
with open(backward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=c_seq))
return True, sequence_length
def aligner(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
alignment = SignalAlignment(**f)
alignment.run()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def run_methyl_caller(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
c = CallMethylation(**f)
c.write()
except Exception, e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def load_data(file_path):
data = pd.read_table(file_path,
usecols=(0, 1, 2, 3, 4, 5, 6),
names=['site', 'strand', 'pA', 'pC', 'pG', 'pT', 'read'],
dtype={'site': np.int64,
'strand': np.str,
'pC': np.float64,
'pmC': np.float64,
'phmC': np.float64,
'read': np.str,
})
return data
def symbol_to_base(symbol):
return ["A", "C", "G", "T"][symbol]
def rc_probs(probs):
return [probs[3], probs[2], probs[1], probs[0]]
def update_reference(data, reference_sequence, min_depth=0, get_sites=False):
d = load_data(data)
ref = get_first_sequence(reference_sequence)
ref = list(ref)
candidate_sites = []
add_to_candidates = candidate_sites.append
for g, x in d.groupby("site"):
marginal_forward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
marginal_backward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
assert(len(x['site'].unique()) == 1)
site = x['site'].unique()[0]
if len(x['read']) < min_depth:
continue
for i, read in x.iterrows():
if ((read['read'].endswith(".forward.tsv") and read['strand'] == 't') or
(read['read'].endswith(".backward.tsv") and read['strand'] == 'c')):
direction = True
else:
direction = False
if direction:
marginal_forward_p += read[['pA', 'pC', 'pG', 'pT']]
else:
marginal_backward_p += read[['pA', 'pC', 'pG', 'pT']]
marginal_prob = marginal_forward_p + rc_probs(marginal_backward_p)
called_base = marginal_prob.map(lambda x: x / sum(marginal_prob)).argmax()[1]
if called_base != ref[site]:
print("Changing {orig} to {new} at {site}".format(orig=ref[site], new=called_base, site=site))
if get_sites is False:
ref[site] = called_base
else:
add_to_candidates(site)
if get_sites is True:
return candidate_sites
else:
return ''.join(ref)
def main(args):
# parse args
args = parse_args()
command_line = " ".join(sys.argv[:])
print("Command Line: {cmdLine}\n".format(cmdLine=command_line), file=sys.stderr)
start_message = """
# Starting Zayante Error-Correction
# Aligning files from: {fileDir}
# Aligning to reference: {reference}
# Aligning maximum of {nbFiles} files
# Using model: {model}
# Using banding: {banding}
# Aligning to regions in: {regions}
# Non-default template HMM: {inThmm}
# Non-default complement HMM: {inChmm}
# Template HDP: {tHdp}
# Complement HDP: {cHdp}
""".format(fileDir=args.files_dir, reference=args.ref, nbFiles=args.nb_files, banding=args.banded,
inThmm=args.in_T_Hmm, inChmm=args.in_C_Hmm, model=args.stateMachineType, regions=args.target_regions,
tHdp=args.templateHDP, cHdp=args.complementHDP)
print(start_message, file=sys.stdout)
if not os.path.isfile(args.ref):
print("Did not find valid reference file", file=sys.stderr)
sys.exit(1)
temp_folder = FolderHandler()
temp_dir_path = temp_folder.open_folder(args.out + "tempFiles_errorCorrection")
reference_sequence = args.ref
STEP = 10
for cycle in range(0, 8):
for it in range(0, STEP):
# make paths for reference files
forward_reference = temp_folder.add_file_path("forward_reference.{cycle}.{iter}.txt".format(cycle=cycle,
iter=it))
backward_reference = temp_folder.add_file_path("backward_reference.{cycle}.{iter}.txt".format(cycle=cycle,
iter=it))
# make N-ed reference sequence for this iteration
deg, reference_sequence_length = make_degenerate_reference(reference_sequence, it,
forward_reference, backward_reference,
step=STEP)
assert deg, "Problem making degenerate reference for cycle {cycle} iteration {iter}" \
"".format(cycle=cycle, iter=it)
# index the reference for bwa
print("signalAlign - indexing reference", file=sys.stderr)
bwa_ref_index = get_bwa_index(args.ref, temp_dir_path)
print("signalAlign - indexing reference, done", file=sys.stderr)
# setup workers for multiprocessing
workers = args.nb_jobs
work_queue = Manager().Queue()
done_queue = Manager().Queue()
jobs = []
# list of alignment files
fast5s = [x for x in os.listdir(args.files_dir) if x.endswith(".fast5")]
# take only some
if args.nb_files < len(fast5s):
shuffle(fast5s)
fast5s = fast5s[:args.nb_files]
for fast5 in fast5s:
alignment_args = {
"forward_reference": forward_reference,
"backward_reference": backward_reference,
"path_to_EC_refs": None,
"destination": temp_dir_path,
"stateMachineType": args.stateMachineType,
"bwa_index": bwa_ref_index,
"in_templateHmm": args.in_T_Hmm,
"in_complementHmm": args.in_C_Hmm,
"in_templateHdp": args.templateHDP,
"in_complementHdp": args.complementHDP,
"banded": args.banded,
"sparse_output": True,
"in_fast5": args.files_dir + fast5,
"threshold": args.threshold,
"diagonal_expansion": args.diag_expansion,
"constraint_trim": args.constraint_trim,
"target_regions": None,
"degenerate": degenerate_enum(args.degenerate),
}
#alignment = SignalAlignment(**alignment_args)
#alignment.run()
work_queue.put(alignment_args)
for w in xrange(workers):
p = Process(target=aligner, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
print("\n# signalAlign - finished alignments\n", file=sys.stderr)
print("\n# signalAlign - finished alignments\n", file=sys.stdout)
print("\n# Starting Variant Calling\n", file=sys.stdout)
print("\n# Starting Variant Calling\n", file=sys.stderr)
# cull the alignment files
alns, forward_mask = get_alignments_labels_and_mask(temp_dir_path + "*.tsv", args.nb_files)
degenerate_positions = {
'forward': range(it, reference_sequence_length, STEP),
'backward': range(it, reference_sequence_length, STEP) }
variant_call_file = temp_folder.add_file_path("variants.{cycle}.{iter}.calls".format(cycle=cycle, iter=it))
for aln, forward_bool in zip(alns, forward_mask):
call_methyl_args = {
"sequence": None,
"alignment_file": aln,
"forward": forward_bool,
"out_file": variant_call_file,
"positions": degenerate_positions,
"degenerate_type": degenerate_enum(args.degenerate),
}
#c = CallMethylation(**call_methyl_args)
#c.write()
work_queue.put(call_methyl_args)
for w in xrange(workers):
p = Process(target=run_methyl_caller, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
print("\n# Finished Variant Calling\n", file=sys.stdout)
print("\n# Finished Variant Calling\n", file=sys.stderr)
new_ref = update_reference(variant_call_file, reference_sequence, 0)
ref_path = temp_folder.add_file_path("iteration.{cycle}.{iter}.fa".format(cycle=cycle, iter=it))
write_fasta("iteration.{cycle}.{iter}.fa".format(cycle=cycle, iter=it), new_ref, open(ref_path, 'w'))
reference_sequence = ref_path
# remove old alignments
for f in glob.glob(temp_dir_path + "*.tsv"):
os.remove(f)
STEP -= 1
return
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit |
ajoshi1/JupyterWorkflow | jupyterworkflow/data.py | 1 | 1051 | import os
from urllib.request import urlretrieve
import pandas as pd
FREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename='Fremont.csv', url=FREMONT_URL,
force_download=False):
"""Download and cache the fremont data
Parameters
----------
filename : string (optional)
location to save the data
url : string (optional)
web location of the data
force_download : bool (optional)
if True, force redownload of data
Returns
-------
data : pandas.DataFrame
The fremont bridge data
"""
if force_download or not os.path.exists(filename):
urlretrieve(URL, 'Fremont.csv')
data = pd.read_csv('Fremont.csv', index_col='Date')
try:
data.index = pd.to_datetime(data.index, format='%m/%d/%Y %I:%M:%S %p')
except TypeError:
data.index = pd.to_datetime(data.index)
data.columns = ['West', 'East']
data['Total'] = data['West'] + data['East']
return data
| mit |
pastas/pasta | pastas/utils.py | 1 | 7798 | from logging import getLogger
import numpy as np
from pandas import Series, to_datetime, Timedelta, Timestamp, to_timedelta
from pandas.tseries.frequencies import to_offset
from scipy import interpolate
logger = getLogger(__name__)
def frequency_is_supported(freq):
"""Method to determine if a frequency is supported for a pastas-model.
Possible frequency-offsets are listed in:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
The frequency can be a multiple of these offsets, like '7D'. Because of the
use in convolution, only frequencies with an equidistant offset are
allowed. This means monthly ('M'), yearly ('Y') or even weekly ('W')
frequencies are not allowed. Use '7D' for a weekly simulation.
D calendar day frequency
H hourly frequency
T, min minutely frequency
S secondly frequency
L, ms milliseconds
U, us microseconds
N nanoseconds
Parameters
----------
freq: str
Returns
-------
boolean
True when frequency can be used as a simulation frequency
"""
offset = to_offset(freq)
if not hasattr(offset, 'delta'):
logger.error("Frequency %s not supported." % freq)
else:
if offset.n == 1:
freq = offset.name
else:
freq = str(offset.n) + offset.name
return freq
def get_stress_dt(freq):
"""Internal method to obtain a timestep in days from a frequency string
derived by Pandas Infer method or supplied by the user as a TimeSeries
settings.
Parameters
----------
freq: str
Returns
-------
dt: float
Approximate timestep in number of days.
Notes
-----
Used for comparison to determine if a time series needs to be up or
downsampled.
See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
for the offset_aliases supported by Pandas.
"""
# Get the frequency string and multiplier
offset = to_offset(freq)
if hasattr(offset, 'delta'):
dt = offset.delta / Timedelta(1, "D")
else:
num = offset.n
freq = offset.name
if freq in ['A', 'Y', 'AS', 'YS', 'BA', 'BY', 'BAS', 'BYS']:
# year
dt = num * 365
elif freq in ['BQ', 'BQS', 'Q', 'QS']:
# quarter
dt = num * 90
elif freq in ['BM', 'BMS', 'CBM', 'CBMS', 'M', 'MS']:
# month
dt = num * 30
elif freq in ['SM', 'SMS']:
# semi-month
dt = num * 15
elif freq in ['W']:
# week
dt = num * 7
elif freq in ['B', 'C']:
# day
dt = num
elif freq in ['BH', 'CBH']:
# hour
dt = num * 1 / 24
else:
raise (ValueError('freq of {} not supported'.format(freq)))
return dt
def get_dt(freq):
"""Method to obtain a timestep in DAYS from a frequency string.
Parameters
----------
freq: str
Returns
-------
dt: float
Number of days
"""
# Get the frequency string and multiplier
dt = to_offset(freq).delta / Timedelta(1, "D")
return dt
def get_time_offset(t, freq):
""" method to calculate the time offset between a TimeStamp t and a
default Series with a frequency of freq
Parameters
----------
t: pandas.Timestamp
Timestamp to calculate the offset from the desired freq for.
freq: str
String with the desired frequency.
Returns
-------
offset: pandas.Timedelta
Timedelta with the offset for the timestamp t.
"""
return t - t.floor(freq)
def get_sample(tindex, ref_tindex):
"""Sample the index so that the frequency is not higher than the frequency
of ref_tindex.
Parameters
----------
tindex: pandas.index
Pandas index object
ref_tindex: pandas.index
Pandas index object
Returns
-------
series: pandas.index
Notes
-----
Find the index closest to the ref_tindex, and then return a selection
of the index.
"""
if len(tindex) == 1:
return tindex
else:
f = interpolate.interp1d(tindex.asi8, np.arange(0, tindex.size),
kind='nearest', bounds_error=False,
fill_value='extrapolate')
ind = np.unique(f(ref_tindex.asi8).astype(int))
return tindex[ind]
def timestep_weighted_resample(series, tindex):
"""resample a timeseries to a new tindex, using an overlapping-timestep
weighted average the new tindex does not have to be equidistant also,
the timestep-edges of the new tindex do not have to overlap with the
original series it is assumed the series consists of measurements that
describe an intensity at the end of the period for which they hold
therefore when upsampling, the values are uniformally spread over the
new timestep (like bfill) this method unfortunately is slower than the
pandas-reample methods.
Parameters
----------
series
tindex
Returns
-------
TODO Make faster, document and test.
"""
# determine some arrays for the input-series
t0e = series.index.get_values()
dt0 = np.diff(t0e)
dt0 = np.hstack((dt0[0], dt0))
t0s = t0e - dt0
v0 = series.values
# determine some arrays for the output-series
t1e = tindex.get_values()
dt1 = np.diff(t1e)
dt1 = np.hstack((dt1[0], dt1))
t1s = t1e - dt1
v1 = np.empty(t1e.shape)
v1[:] = np.nan
for i in range(len(v1)):
# determine which periods within the series are within the new tindex
mask = (t0e > t1s[i]) & (t0s < t1e[i])
if np.any(mask):
# cut by the timestep-edges
ts = t0s[mask]
te = t0e[mask]
ts[ts < t1s[i]] = t1s[i]
te[te > t1e[i]] = t1e[i]
# determine timestep
dt = (te - ts).astype(float)
# determine timestep-weighted value
v1[i] = np.sum(dt * v0[mask]) / np.sum(dt)
# replace all values in the series
series = Series(v1, index=tindex)
return series
def excel2datetime(tindex, freq="D"):
"""Method to convert excel datetime to pandas timetime objects.
Parameters
----------
tindex: datetime index
can be a datetime object or a pandas datetime index.
freq: str
Returns
-------
datetimes: pandas.datetimeindex
"""
datetimes = to_datetime('1899-12-30') + to_timedelta(tindex, freq)
return datetimes
def matlab2datetime(tindex):
""" Transform a matlab time to a datetime, rounded to seconds
"""
day = Timestamp.fromordinal(int(tindex))
dayfrac = Timedelta(days=float(tindex) % 1) - Timedelta(days=366)
return day + dayfrac
def datetime2matlab(tindex):
mdn = tindex + Timedelta(days=366)
frac = (tindex - tindex.round("D")).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def get_stress_tmin_tmax(ml):
"""Get the minimum and maximum time that all of the stresses have data"""
from .model import Model
from .project import Project
tmin = Timestamp.min
tmax = Timestamp.max
if isinstance(ml, Model):
for sm in ml.stressmodels:
for st in ml.stressmodels[sm].stress:
tmin = max((tmin, st.series_original.index.min()))
tmax = min((tmax, st.series_original.index.max()))
elif isinstance(ml, Project):
for st in ml.stresses['series']:
tmin = max((tmin, st.series_original.index.min()))
tmax = min((tmax, st.series_original.index.max()))
else:
raise (TypeError('Unknown type {}'.format(type(ml))))
return tmin, tmax
| mit |
Leotrinos/agpy | agpy/pymc_plotting.py | 6 | 7710 | import numpy as np
import pylab
import matplotlib
old_errsettings = np.geterr()
try:
import pymc # pymc breaks np error settings
except ImportError:
pass
np.seterr(**old_errsettings)
def find_percentile(data, pctile):
sorted_data = np.sort(data.ravel())
accum_data = sorted_data.cumsum()
pctiles = accum_data / accum_data.max() * 100.
return sorted_data[np.argmin(np.abs(pctiles-pctile))]
def errellipse(MC, varname1, varname2, ax=None):
N = pymc.NormApprox(MC)
N.fit()
E = matplotlib.patches.Ellipse(N.mu[N.__dict__[varname1],
N.__dict__[varname2]],
N.C[N.__dict__[varname1]],
N.C[N.__dict__[varname2]],
(N.C[N.__dict__[varname1],
N.__dict__[varname2]][0,1] /
N.C[N.__dict__[varname1]] * 90.)[0],
facecolor='none',
edgecolor='black')
if ax is None:
ax=pylab.gca()
ax.add_artist(E)
def hist2d(MC, varname1, varname2, varslice=None,
percentiles=[0.0027,0.0455,0.3173,0.5,0.75],
colors=[(0.4,0.4,1,0.2),(1,0.4,1,0.5),(1,0.2,0.2,0.5),(0.7,0.1,0.1,1),(0.5,0.05,0.05,1),(0.4,0.05,0.05,0.5)],
ticklabels=['3$\\sigma$','2$\\sigma$','1$\\sigma$','50%','25%'],
axis=None,
fignum=1,
contourcmd=pylab.contourf,
clear=False,
colorbar=True,
doerrellipse=False,
chain=None,
**kwargs):
"""
Create a 2D histogram of the MCMC data over some Trace range
"""
try: # if input is just a dict of arrays
if varslice is None:
histvals,xvals,yvals = pylab.histogram2d(MC[varname1].squeeze(),MC[varname2].squeeze(),**kwargs)
else:
histvals,xvals,yvals = pylab.histogram2d(MC[varname1][slice(*varslice)].squeeze(),MC[varname2][slice(*varslice)].squeeze(),**kwargs)
except TypeError:
if varslice is None:
histvals,xvals,yvals = pylab.histogram2d(
MC.trace(varname1,chain=chain)[:].squeeze(),
MC.trace(varname2,chain=chain)[:].squeeze(),
**kwargs)
else:
histvals,xvals,yvals = pylab.histogram2d(
MC.trace(varname1,chain=chain)[slice(*varslice)].squeeze(),
MC.trace(varname2,chain=chain)[slice(*varslice)].squeeze(),
**kwargs)
levels = [find_percentile(histvals, p*100) for p in percentiles]
if axis is None:
pylab.figure(fignum)
if clear:
pylab.clf()
axis = pylab.gca()
xax = np.linspace(xvals.min(),xvals.max(),histvals.shape[1])
yax = np.linspace(yvals.min(),yvals.max(),histvals.shape[0])
if axis is not None:
contourcmd = eval('axis.'+contourcmd.__name__)
cntr = contourcmd(xax, yax, histvals.swapaxes(0,1), levels+[histvals.max()], colors=colors)
# hack to fix opacity
axis.set_xlabel(varname1)
axis.set_ylabel(varname2)
if colorbar:
try:
cb = pylab.colorbar(cntr, ax=axis)
cb.ax.set_yticks(levels)
cb.ax.set_yticklabels(ticklabels)
except Exception as e:
print "Colorbar failed with exception ",e
if doerrellipse:
errellipse(MC,varname1,varname2)
return axis
def gkde_contours(MC, varname1, varname2, varslice=None,
percentiles=[0.0027,0.0455,0.3173,0.5,0.75],
colors=[(0.4,0.4,1,0.2),(1,0.4,1,0.5),(1,0.2,0.2,0.75),(1,0.1,0.1,1),(0.8,0.0,0.0,1),(0,0,0,1)],
ticklabels=['3$\\sigma$','2$\\sigma$','1$\\sigma$','50%','75%'],
fignum=1,
ngridpts=101,
clear=False,):
"""
Contours for kernel densit estimate... to compare to real contours
"""
import scipy.stats
data1 = MC.trace(varname1)[slice(*varslice)]
data2 = MC.trace(varname2)[slice(*varslice)]
gkde = scipy.stats.gaussian_kde([data1,data2])
xvals = np.linspace(data1.min(),data1.max(),ngridpts)
yvals = np.linspace(data2.min(),data2.max(),ngridpts)
xx,yy = np.meshgrid(xvals, yvals)
zz = np.array(gkde.evaluate([xx.flatten(),yy.flatten()])).reshape(xx.shape)
pylab.figure(fignum)
if clear:
pylab.clf()
pylab.contour(xx, yy, zz, linewidths=1, alpha=.5, cmap=matplotlib.cm.Greys)
pylab.xlabel(varname1)
pylab.ylabel(varname2)
def plot_mc_hist(MC, field, varslice=None, onesided=True, bins=50, chain=None,
axis=None, lolim=False, legloc='best', legend=True, **kwargs):
"""
Plot a histogram with 1,2,3-sigma bars
"""
try:
field_data = MC[field].squeeze()
except TypeError:
field_data = MC.trace(field,chain=chain)[:]
if varslice is not None:
field_data = field_data[slice(*varslice)]
field_stats = {'mean': field_data.mean()}
if onesided:
#field_stats = MC.trace(field,chain=chain).stats(quantiles=[68.2689,95.44997,99.7300,50])
quantiles = {1:68.2689,2:95.44997,3:99.7300,'m':50}
if lolim:
quantiles = {k:100-q for k,q in quantiles.iteritems()}
field_stats['quantiles'] = {k:np.percentile(field_data,q) for k,q in quantiles.iteritems()}
else:
#field_stats = MC.trace(field,chain=chain).stats(quantiles=[0.135,2.275,15.866,84.134,97.725,99.865,50])
field_stats['quantiles'] = {q:np.percentile(field_data,q) for q in [0.135,2.275,15.866,84.134,97.725,99.865,50]}
vpts = field_stats['quantiles']
if axis is None:
ax = pylab.gca()
else:
ax = axis
#field_data_sorted = np.sort(field_data)
h,l,p = ax.hist(field_data,bins=bins,histtype='stepfilled',**kwargs)
if kwargs.get('normed'):
ylim = [0,h.max()*1.01]
else:
ylim = ax.get_ylim()
#fieldlen = len(field_data)
if onesided:
ax.vlines(vpts[1], *ylim,linewidth=3, alpha=0.5, color='k',label="$1\\sigma$")
ax.vlines(vpts[2],*ylim,linewidth=3, alpha=0.5, color='r',label="$2\\sigma$")
ax.vlines(vpts[3], *ylim,linewidth=3, alpha=0.5, color='g',label="$3\\sigma$")
else:
ax.vlines(field_stats['mean'],*ylim,color='k', linestyle='--', linewidth=3, alpha=0.5, label="$\mu$")
ax.vlines(vpts[50],*ylim, color='b', linestyle='--', linewidth=3, alpha=0.5, label="$\mu_{1/2}$")
ax.vlines([vpts[15.866],vpts[84.134]],*ylim,color='k',linewidth=3, alpha=0.5, label="$1\\sigma$")
ax.vlines([vpts[02.275],vpts[97.725]],*ylim,color='r',linewidth=3, alpha=0.5, label="$2\\sigma$")
ax.vlines([vpts[00.135],vpts[99.865]],*ylim,color='g',linewidth=3, alpha=0.5, label="$3\\sigma$")
ax.set_ylim(*ylim)
if legend:
ax.legend(loc=legloc)
return ax
def autocorr_diagnostics(mc):
traces = mc.db._traces
ntraces = len(traces)
npanels = np.ceil(np.sqrt(ntraces))
for ii,(k,v) in enumerate(traces.iteritems()):
if v[:].ndim > 1:
d = v[:,0].squeeze()
else:
d = v[:].squeeze()
pylab.subplot(npanels, npanels, ii+1)
ft = np.fft.fft(d)
ac = np.fft.ifft(ft*ft[::-1])
frq = np.fft.fftfreq(ac.size)
pylab.plot(frq,ac,',')
def trace_diagnostics(mc):
traces = mc.db._traces
ntraces = len(traces)
npanels = np.ceil(np.sqrt(ntraces))
for ii,(k,v) in enumerate(traces.iteritems()):
if v[:].ndim > 1:
d = v[:,0].squeeze()
else:
d = v[:].squeeze()
pylab.subplot(npanels, npanels, ii+1)
pylab.plot(d,',')
| mit |
miloharper/neural-network-animation | matplotlib/tests/test_basic.py | 10 | 1264 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_equal
from matplotlib.testing.decorators import knownfailureif
from pylab import *
def test_simple():
assert_equal(1 + 1, 2)
@knownfailureif(True)
def test_simple_knownfail():
# Test the known fail mechanism.
assert_equal(1 + 1, 3)
def test_override_builtins():
ok_to_override = set([
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'any',
'all',
'sum'
])
# We could use six.moves.builtins here, but that seems
# to do a little more than just this.
if six.PY3:
builtins = sys.modules['builtins']
else:
builtins = sys.modules['__builtin__']
overridden = False
for key in globals().keys():
if key in dir(builtins):
if (globals()[key] != getattr(builtins, key) and
key not in ok_to_override):
print("'%s' was overridden in globals()." % key)
overridden = True
assert not overridden
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
GehenHe/Recognize-Face-on-Android | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 4 | 42435 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import sys
import tempfile
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import learn
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.lookup import lookup_ops
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
export_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def export_input_fn_with_asset():
features, labels, inputs = export_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup_ops.HashTable(
lookup_ops.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, export_input_fn_with_asset
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffold(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
training_scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7., 8.], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7., 10.], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, export_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, export_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool), None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Centre-Alt-Rendiment-Esportiu/att | old_project/Python/win_libs/scipy/cluster/hierarchy.py | 7 | 93813 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy.lib.six import string_types
from scipy.lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
+ \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = X.astype(np.bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
try:
if type(R) != np.ndarray:
if name:
raise TypeError(('Variable \'%s\' passed as inconsistency '
'matrix is not a numpy array.') % name)
else:
raise TypeError('Variable passed as inconsistency matrix '
'is not a numpy array.')
if R.dtype != np.double:
if name:
raise TypeError(('Inconsistency matrix \'%s\' must contain '
'doubles (double).') % name)
else:
raise TypeError('Inconsistency matrix must contain doubles '
'(double).')
if len(R.shape) != 2:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have '
'shape=2 (i.e. be two-dimensional).') % name)
else:
raise ValueError('Inconsistency matrix must have shape=2 '
'(i.e. be two-dimensional).')
if R.shape[1] != 4:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have 4 '
'columns.') % name)
else:
raise ValueError('Inconsistency matrix must have 4 columns.')
if R.shape[0] < 1:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have at '
'least one row.') % name)
else:
raise ValueError('Inconsistency matrix must have at least '
'one row.')
if (R[:, 0] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height means.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height means.')
if (R[:, 1] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height standard '
'deviations.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height standard deviations.')
if (R[:, 2] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link counts.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link counts.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
try:
if type(Z) != np.ndarray:
if name:
raise TypeError(('\'%s\' passed as a linkage is not a valid '
'array.') % name)
else:
raise TypeError('Variable is not a valid array.')
if Z.dtype != np.double:
if name:
raise TypeError('Linkage matrix \'%s\' must contain doubles.'
% name)
else:
raise TypeError('Linkage matrix must contain doubles.')
if len(Z.shape) != 2:
if name:
raise ValueError(('Linkage matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Linkage matrix must have shape=2 '
'(i.e. be two-dimensional).')
if Z.shape[1] != 4:
if name:
raise ValueError('Linkage matrix \'%s\' must have 4 columns.'
% name)
else:
raise ValueError('Linkage matrix must have 4 columns.')
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'indices.') % name)
else:
raise ValueError('Linkage contains negative indices.')
if (Z[:, 2] < 0).any():
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'distances.') % name)
else:
raise ValueError('Linkage contains negative distances.')
if (Z[:, 3] < 0).any():
if name:
raise ValueError('Linkage \'%s\' contains negative counts.'
% name)
else:
raise ValueError('Linkage contains negative counts.')
if _check_hierarchy_uses_cluster_before_formed(Z):
if name:
raise ValueError(('Linkage \'%s\' uses non-singleton cluster '
'before its formed.') % name)
else:
raise ValueError("Linkage uses non-singleton cluster before "
"it's formed.")
if _check_hierarchy_uses_cluster_more_than_once(Z):
if name:
raise ValueError(('Linkage \'%s\' uses the same cluster more '
'than once.') % name)
else:
raise ValueError('Linkage uses the same cluster more than '
'once.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(p))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(p))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the non-blue link groupings, i.e. those groupings below the
# color threshold.
for color in colors_used:
if color != 'b':
ax.add_collection(colors_to_collections[color])
# If there is a blue grouping (i.e., links above the color threshold),
# it should go last.
if 'b' in colors_to_collections:
ax.add_collection(colors_to_collections['b'])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do:
>>> # First define the leaf label function.
>>> def llf(id):
... if id < n:
... return str(id)
... else:
>>> return '[%d %d %1.2f]' % (id, count, R[n-id,3])
>>>
>>> # The text for the leaf nodes is going to be big so force
>>> # a rotation of 90 degrees.
>>> dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example:
>>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = 'b'
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
| gpl-3.0 |
Garrett-R/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
mdeff/ntds_2016 | project/reports/fake_news/lib/exploitation_helper.py | 1 | 2665 | import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.contrib import learn
def preprocessing(dataframe):
mask_true = np.array(dataframe.rating == 'mostly true')
mask_false = np.array(dataframe.rating == 'mostly false')
# Extract text and labels
x_text = dataframe.message.fillna('')
y = dataframe.rating
# Keep only two labels
x_true = x_text[mask_true]
x_false = x_text[mask_false]
y_true = y[mask_true]
y_false = y[mask_false]
y_true[:] = 0
y_false[:] = 1
# Reassemble the filtered data
x_text = pd.concat([x_true, x_false])
y_target = pd.concat([y_true, y_false])
# Shuffle (fixed seed)
x_text = x_text.sample(frac=1, random_state=0)
y_target = y_target.sample(frac=1, random_state=0)
# Pad the messages such that they all have the same length
max_document_length = x_text.map(lambda s: s.split(" ")).map(len).max()
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
y_array = y_target.as_matrix()
y = np.zeros((len(y_array), 2), dtype=int)
for i, a in enumerate(y_array):
if a == 1:
y[i, :] = [0, 1]
else:
y[i, :] = [1, 0]
return x, y, vocab_processor
def compute_confusion_metrics(predicted, actual):
# Count true positives, true negatives, false positives and false negatives.
tp = tf.count_nonzero(predicted * actual)
tn = tf.count_nonzero((predicted - 1) * (actual - 1))
fp = tf.count_nonzero(predicted * (actual - 1))
fn = tf.count_nonzero((predicted - 1) * actual)
# Calculate accuracy, precision, recall and F1 score.
# accuracy = (tp + tn) / (tp + fp + fn + tn)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
fmeasure = (2 * precision * recall) / (precision + recall)
return precision, recall, fmeasure
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| mit |
ibmsoe/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 7 | 54562 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import inspect
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existance of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableOfTensors', 'MutableDenseHashTable'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overriden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
logging.info('Using config: %s', str(vars(self._config)))
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
all_hooks.extend(hooks)
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _get_arguments(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
variables.initialize_local_variables()
data_flow_ops.tables_initializer()
resources.initialize_resources(resources.shared_resources())
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
data_flow_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name='score')
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
bert9bert/statsmodels | statsmodels/datasets/scotland/data.py | 5 | 3014 | """Taxation Powers Vote for the Scottish Parliament 1997 dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "Taxation Powers Vote for the Scottish Parliamant 1997"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Taxation Powers' Yes Vote for Scottish Parliamanet-1997"""
DESCRLONG = """
This data is based on the example in Gill and describes the proportion of
voters who voted Yes to grant the Scottish Parliament taxation powers.
The data are divided into 32 council districts. This example's explanatory
variables include the amount of council tax collected in pounds sterling as
of April 1997 per two adults before adjustments, the female percentage of
total claims for unemployment benefits as of January, 1998, the standardized
mortality rate (UK is 100), the percentage of labor force participation,
regional GDP, the percentage of children aged 5 to 15, and an interaction term
between female unemployment and the council tax.
The original source files and variable information are included in
/scotland/src/
"""
NOTE = """::
Number of Observations - 32 (1 for each Scottish district)
Number of Variables - 8
Variable name definitions::
YES - Proportion voting yes to granting taxation powers to the
Scottish parliament.
COUTAX - Amount of council tax collected in pounds steling as of
April '97
UNEMPF - Female percentage of total unemployment benefits claims as of
January 1998
MOR - The standardized mortality rate (UK is 100)
ACT - Labor force participation (Short for active)
GDP - GDP per county
AGE - Percentage of children aged 5 to 15 in the county
COUTAX_FEMALEUNEMP - Interaction between COUTAX and UNEMPF
Council district names are included in the data file, though are not
returned by load.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the Scotvote data and returns a Dataset instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the Scotvote data and returns a Dataset instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath + '/scotvote.csv',"rb") as f:
data = np.recfromtxt(f, delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))
return data
| bsd-3-clause |
ankurankan/scikit-learn | examples/classification/plot_lda_qda.py | 17 | 4794 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA()
y_pred = lda.fit(X, y, store_covariance=True).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
Lyleo/nupic | nupic/research/monitor_mixin/monitor_mixin_base.py | 7 | 5503 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
MonitorMixinBase class used in monitor mixin framework.
"""
import abc
import numpy
from prettytable import PrettyTable
from nupic.research.monitor_mixin.plot import Plot
class MonitorMixinBase(object):
"""
Base class for MonitorMixin. Each subclass will be a mixin for a particular
algorithm.
All arguments, variables, and methods in monitor mixin classes should be
prefixed with "mm" (to avoid collision with the classes they mix in to).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
"""
Note: If you set the kwarg "mmName", then pretty-printing of traces and
metrics will include the name you specify as a tag before every title.
"""
self.mmName = kwargs.get("mmName")
if "mmName" in kwargs:
del kwargs["mmName"]
super(MonitorMixinBase, self).__init__(*args, **kwargs)
# Mapping from key (string) => trace (Trace)
self._mmTraces = None
self._mmData = None
self.mmClearHistory()
def mmClearHistory(self):
"""
Clears the stored history.
"""
self._mmTraces = {}
self._mmData = {}
@staticmethod
def mmPrettyPrintTraces(traces, breakOnResets=None):
"""
Returns pretty-printed table of traces.
@param traces (list) Traces to print in table
@param breakOnResets (BoolsTrace) Trace of resets to break table on
@return (string) Pretty-printed table of traces.
"""
assert len(traces) > 0, "No traces found"
table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces])
for i in xrange(len(traces[0].data)):
if breakOnResets and breakOnResets.data[i]:
table.add_row(["<reset>"] * (len(traces) + 1))
table.add_row([i] +
[trace.prettyPrintDatum(trace.data[i]) for trace in traces])
return table.get_string().encode("utf-8")
@staticmethod
def mmPrettyPrintMetrics(metrics, sigFigs=5):
"""
Returns pretty-printed table of metrics.
@param metrics (list) Traces to print in table
@param sigFigs (int) Number of significant figures to print
@return (string) Pretty-printed table of metrics.
"""
assert len(metrics) > 0, "No metrics found"
table = PrettyTable(["Metric", "mean", "standard deviation",
"min", "max", "sum", ])
for metric in metrics:
table.add_row([metric.prettyPrintTitle()] + metric.getStats())
return table.get_string().encode("utf-8")
def mmGetDefaultTraces(self, verbosity=1):
"""
Returns list of default traces. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default traces
"""
return []
def mmGetDefaultMetrics(self, verbosity=1):
"""
Returns list of default metrics. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default metrics
"""
return []
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="",
showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1)
plot.add2DArray(data, xlabel="Time", ylabel=activityType)
return plot
| gpl-3.0 |
h2educ/scikit-learn | sklearn/cluster/dbscan_.py | 92 | 12380 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
bowang/tensorflow | tensorflow/contrib/factorization/python/ops/gmm_test.py | 41 | 9763 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments, self.scores = self.make_random_points(
self.true_centers, self.num_points)
self.true_score = np.add.reduce(self.scores)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
means = [
np.mean(
points[assignments == center], axis=0)
for center in xrange(num_centers)
]
covs = [
np.cov(points[assignments == center].T)
for center in xrange(num_centers)
]
scores = []
for r in xrange(num_points):
scores.append(
np.sqrt(
np.dot(
np.dot(points[r, :] - means[assignments[r]],
np.linalg.inv(covs[assignments[r]])), points[r, :] -
means[assignments[r]])))
return (points, assignments, scores)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertGreater(score1, score2)
self.assertNear(self.true_score, score2, self.true_score * 0.15)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments, true_offsets = (
self.make_random_points(clusters, num_points))
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
# Test score
score = gmm.score(input_fn=self.input_fn(points=points,
batch_size=num_points), steps=1)
self.assertNear(score, np.sum(true_offsets), 4.05)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.