repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Fireblend/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
davidparks21/qso_lya_detection_pipeline | dla_cnn/vette_results.py | 1 | 17576 | """ Module to vette results against Human catalogs
SDSS-DR5 (JXP) and BOSS (Notredaeme)
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import pdb
import matplotlib as mpl
mpl.rcParams['font.family'] = 'stixgeneral'
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.table import Table
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
from linetools import utils as ltu
from pyigm.surveys.llssurvey import LLSSurvey
from pyigm.surveys.dlasurvey import DLASurvey
def json_to_sdss_dlasurvey(json_file, sdss_survey, add_pf=True, debug=False):
""" Convert JSON output file to a DLASurvey object
Assumes SDSS bookkeeping for sightlines (i.e. PLATE, FIBER)
Parameters
----------
json_file : str
Full path to the JSON results file
sdss_survey : DLASurvey
SDSS survey, usually human (e.g. JXP for DR5)
add_pf : bool, optional
Add plate/fiber to DLAs in sdss_survey
Returns
-------
ml_survey : LLSSurvey
Survey object for the LLS
"""
print("Loading SDSS Survey from JSON file {:s}".format(json_file))
# imports
from pyigm.abssys.dla import DLASystem
from pyigm.abssys.lls import LLSSystem
# Fiber key
for fkey in ['FIBER', 'FIBER_ID', 'FIB']:
if fkey in sdss_survey.sightlines.keys():
break
# Read
ml_results = ltu.loadjson(json_file)
use_platef = False
if 'plate' in ml_results[0].keys():
use_platef = True
else:
if 'id' in ml_results[0].keys():
use_id = True
# Init
#idict = dict(plate=[], fiber=[], classification_confidence=[], # FOR v2
# classification=[], ra=[], dec=[])
idict = dict(ra=[], dec=[])
if use_platef:
for key in ['plate', 'fiber', 'mjd']:
idict[key] = []
ml_tbl = Table()
ml_survey = LLSSurvey()
systems = []
in_ml = np.array([False]*len(sdss_survey.sightlines))
# Loop
for obj in ml_results:
# Sightline
for key in idict.keys():
idict[key].append(obj[key])
# DLAs
#if debug:
# if (obj['plate'] == 1366) & (obj['fiber'] == 614):
# sv_coord = SkyCoord(ra=obj['ra'], dec=obj['dec'], unit='deg')
# print("GOT A MATCH IN RESULTS FILE")
for idla in obj['dlas']:
"""
dla = DLASystem((sdss_survey.sightlines['RA'][mt[0]],
sdss_survey.sightlines['DEC'][mt[0]]),
idla['spectrum']/(1215.6701)-1., None,
idla['column_density'])
"""
if idla['z_dla'] < 1.8:
continue
isys = LLSSystem((obj['ra'],obj['dec']),
idla['z_dla'], None, NHI=idla['column_density'], zem=obj['z_qso'])
isys.confidence = idla['dla_confidence']
if use_platef:
isys.plate = obj['plate']
isys.fiber = obj['fiber']
elif use_id:
plate, fiber = [int(spl) for spl in obj['id'].split('-')]
isys.plate = plate
isys.fiber = fiber
# Save
systems.append(isys)
# Connect to sightlines
ml_coord = SkyCoord(ra=idict['ra'], dec=idict['dec'], unit='deg')
s_coord = SkyCoord(ra=sdss_survey.sightlines['RA'], dec=sdss_survey.sightlines['DEC'], unit='deg')
idx, d2d, d3d = match_coordinates_sky(s_coord, ml_coord, nthneighbor=1)
used = d2d < 1.*u.arcsec
for iidx in np.where(~used)[0]:
print("Sightline RA={:g}, DEC={:g} was not used".format(sdss_survey.sightlines['RA'][iidx],
sdss_survey.sightlines['DEC'][iidx]))
# Add plate/fiber to statistical DLAs
if add_pf:
dla_coord = sdss_survey.coord
idx2, d2d, d3d = match_coordinates_sky(dla_coord, s_coord, nthneighbor=1)
if np.min(d2d.to('arcsec').value) > 1.:
raise ValueError("Bad match to sightlines")
for jj,igd in enumerate(np.where(sdss_survey.mask)[0]):
dla = sdss_survey._abs_sys[igd]
try:
dla.plate = sdss_survey.sightlines['PLATE'][idx2[jj]]
except IndexError:
pdb.set_trace()
dla.fiber = sdss_survey.sightlines[fkey][idx2[jj]]
# Finish
ml_survey._abs_sys = systems
if debug:
ml2_coord = ml_survey.coord
minsep = np.min(sv_coord.separation(ml2_coord))
minsep2 = np.min(sv_coord.separation(s_coord))
tmp = sdss_survey.sightlines[used]
t_coord = SkyCoord(ra=tmp['RA'], dec=tmp['DEC'], unit='deg')
minsep3 = np.min(sv_coord.separation(t_coord))
pdb.set_trace()
ml_survey.sightlines = sdss_survey.sightlines[used]
for key in idict.keys():
ml_tbl[key] = idict[key]
ml_survey.ml_tbl = ml_tbl
# Return
return ml_survey
def vette_dlasurvey(ml_survey, sdss_survey, fig_root='tmp', lyb_cut=True,
dz_toler=0.03, debug=False):
"""
Parameters
----------
ml_survey : IGMSurvey
Survey describing the Machine Learning results
sdss_survey : DLASurvey
SDSS survey, usually human (e.g. JXP for DR5)
fig_root : str, optional
Root string for figures generated
lyb_cut : bool, optional
Cut surveys at Lyb in QSO rest-frame.
Recommended until LLS, Lyb and OVI is dealt with
dz_toler : float, optional
Tolerance for matching in redshift
Returns
-------
false_neg : list
List of systems that are false negatives from SDSS -> ML
midx : list
List of indices matching SDSS -> ML
"""
from pyigm.surveys import dlasurvey as pyis_ds
reload(pyis_ds)
# Cut at Lyb
if lyb_cut:
for survey in [ml_survey, sdss_survey]:
# Alter Z_START
zlyb = (1+survey.sightlines['ZEM']).data*1026./1215.6701 - 1.
survey.sightlines['Z_START'] = np.maximum(survey.sightlines['Z_START'], zlyb)
# Mask
mask = pyis_ds.dla_stat(survey, survey.sightlines, zem_tol=0.2) # Errors in zem!
survey.mask = mask
print("Done cutting on Lyb")
# Setup coords
ml_coords = ml_survey.coord
ml_z = ml_survey.zabs
s_coords = sdss_survey.coord
s_z = sdss_survey.zabs
# if debug:
# miss_coord = SkyCoord(ra=174.35545833333333,dec=44.585,unit='deg')
# minsep = np.min(miss_coord.separation(ml_coords))
# s_coord = SkyCoord(ra=ml_survey.sightlines['RA'], dec=ml_survey.sightlines['DEC'], unit='deg')
# isl = np.argmin(miss_coord.separation(s_coord))
# Match from SDSS and record false negatives
false_neg = []
midx = []
for igd in np.where(sdss_survey.mask)[0]:
isys = sdss_survey._abs_sys[igd]
# Match?
gd_radec = np.where(isys.coord.separation(ml_coords) < 1*u.arcsec)[0]
sep = isys.coord.separation(ml_coords)
if len(gd_radec) == 0:
false_neg.append(isys)
midx.append(-1)
else:
gdz = np.abs(ml_z[gd_radec] - isys.zabs) < dz_toler
# Only require one match
if np.sum(gdz) > 0:
iz = np.argmin(np.abs(ml_z[gd_radec] - isys.zabs))
midx.append(gd_radec[iz])
else:
false_neg.append(isys)
midx.append(-1)
if debug:
if (isys.plate == 1366) & (isys.fiber == 614):
pdb.set_trace()
# Match from ML and record false positives
false_pos = []
pidx = []
for igd in np.where(ml_survey.mask)[0]:
isys = ml_survey._abs_sys[igd]
# Match?
gd_radec = np.where(isys.coord.separation(s_coords) < 1*u.arcsec)[0]
sep = isys.coord.separation(s_coords)
if len(gd_radec) == 0:
false_pos.append(isys)
pidx.append(-1)
else:
gdz = np.abs(s_z[gd_radec] - isys.zabs) < dz_toler
# Only require one match
if np.sum(gdz) > 0:
iz = np.argmin(np.abs(s_z[gd_radec] - isys.zabs))
pidx.append(gd_radec[iz])
else:
false_pos.append(isys)
pidx.append(-1)
# Return
return false_neg, np.array(midx), false_pos
def mk_false_neg_table(false_neg, outfil):
""" Generate a simple CSV file of false negatives
Parameters
----------
false_neg : list
List of false negative systems
outfil : str
Returns
-------
"""
# Parse
ra, dec = [], []
zabs, zem = [], []
NHI = []
plate, fiber = [], []
for ifneg in false_neg:
ra.append(ifneg.coord.ra.value)
dec.append(ifneg.coord.dec.value)
zabs.append(ifneg.zabs)
zem.append(ifneg.zem)
NHI.append(ifneg.NHI)
plate.append(ifneg.plate)
fiber.append(ifneg.fiber)
# Generate a Table
fneg_tbl = Table()
fneg_tbl['RA'] = ra
fneg_tbl['DEC'] = dec
fneg_tbl['zabs'] = zabs
fneg_tbl['zem'] = zem
fneg_tbl['NHI'] = NHI
fneg_tbl['plate'] = plate
fneg_tbl['fiber'] = fiber
# Write
print("Writing false negative file: {:s}".format(outfil))
fneg_tbl.write(outfil, format='ascii.csv')#, overwrite=True)
def fig_dzdnhi(ml_survey, sdss_survey, midx, outfil='fig_dzdnhi.pdf'):
""" Compare zabs and NHI between SDSS and ML
Parameters
----------
ml_survey : IGMSurvey
Survey describing the Machine Learning results
This should be masked according to the vetting
sdss_survey : DLASurvey
SDSS survey, usually human (e.g. JXP for DR5)
This should be masked according to the vetting
midx : list
List of indices matching SDSS -> ML
outfil : str, optional
Input None to plot to screen
Returns
-------
"""
# z, NHI
z_sdss = sdss_survey.zabs
z_ml = ml_survey.zabs
NHI_sdss = sdss_survey.NHI
NHI_ml = ml_survey.NHI
# deltas
dz = []
dNHI = []
for qq,idx in enumerate(midx):
if idx < 0:
continue
# Match
dz.append(z_sdss[qq]-z_ml[idx])
dNHI.append(NHI_sdss[qq]-NHI_ml[idx])
# Figure
if outfil is not None:
pp = PdfPages(outfil)
fig = plt.figure(figsize=(8, 5))
plt.clf()
gs = gridspec.GridSpec(1, 2)
# dz
ax = plt.subplot(gs[0])
ax.hist(dz, color='green', bins=20)#, normed=True)#, bins=20 , zorder=1)
#ax.text(0.05, 0.74, lbl3, transform=ax.transAxes, color=wcolor, size=csz, ha='left')
ax.set_xlim(-0.03, 0.03)
ax.set_xlabel(r'$\delta z$ [SDSS-ML]')
# NHI
ax = plt.subplot(gs[1])
ax.hist(dNHI, color='blue', bins=20)#, normed=True)#, bins=20 , zorder=1)
#ax.text(0.05, 0.74, lbl3, transform=ax.transAxes, color=wcolor, size=csz, ha='left')
#ax.set_xlim(-0.03, 0.03)
ax.set_xlabel(r'$\Delta \log N_{\rm HI}$ [SDSS-ML]')
#
# End
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)
if outfil is not None:
print('Writing {:s}'.format(outfil))
pp.savefig()
pp.close()
plt.close()
else:
plt.show()
def fig_falseneg(ml_survey, sdss_survey, false_neg, outfil='fig_falseneg.pdf'):
""" Figure on false negatives
Parameters
----------
ml_survey : IGMSurvey
Survey describing the Machine Learning results
This should be masked according to the vetting
sdss_survey : DLASurvey
SDSS survey, usually human (e.g. JXP for DR5)
This should be masked according to the vetting
midx : list
List of indices matching SDSS -> ML
false_neg : list
List of false negatives
outfil : str, optional
Input None to plot to screen
Returns
-------
"""
# Generate some lists
zabs_false = [isys.zabs for isys in false_neg]
zem_false = [isys.zem for isys in false_neg]
NHI_false = [isys.NHI for isys in false_neg]
# Figure
if outfil is not None:
pp = PdfPages(outfil)
fig = plt.figure(figsize=(8, 5))
plt.clf()
gs = gridspec.GridSpec(2, 2)
# zabs
ax = plt.subplot(gs[0])
ax.hist(zabs_false, color='green', bins=20)#, normed=True)#, bins=20 , zorder=1)
ax.set_xlabel(r'$z_{\rm abs}$')
# zem
ax = plt.subplot(gs[1])
ax.hist(zem_false, color='red', bins=20)#, normed=True)#, bins=20 , zorder=1)
ax.set_xlabel(r'$z_{\rm qso}$')
# NHI
ax = plt.subplot(gs[2])
ax.hist(NHI_false, color='blue', bins=20)#, normed=True)#, bins=20 , zorder=1)
ax.set_xlabel(r'$\log \, N_{\rm HI}$')
# End
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)
if outfil is not None:
print('Writing {:s}'.format(outfil))
pp.savefig()
pp.close()
plt.close()
else:
plt.show()
def dr5_for_david():
""" Generate a Table for David
"""
# imports
from pyigm.abssys.dla import DLASystem
from pyigm.abssys.lls import LLSSystem
sdss_survey = DLASurvey.load_SDSS_DR5()
# Fiber key
for fkey in ['FIBER', 'FIBER_ID', 'FIB']:
if fkey in sdss_survey.sightlines.keys():
break
# Init
#idict = dict(plate=[], fiber=[], classification_confidence=[], # FOR v2
# classification=[], ra=[], dec=[])
# Connect to sightlines
s_coord = SkyCoord(ra=sdss_survey.sightlines['RA'], dec=sdss_survey.sightlines['DEC'], unit='deg')
# Add plate/fiber to statistical DLAs
dla_coord = sdss_survey.coord
idx2, d2d, d3d = match_coordinates_sky(dla_coord, s_coord, nthneighbor=1)
if np.min(d2d.to('arcsec').value) > 1.:
raise ValueError("Bad match to sightlines")
plates, fibers = [], []
for jj,igd in enumerate(np.where(sdss_survey.mask)[0]):
dla = sdss_survey._abs_sys[igd]
try:
dla.plate = sdss_survey.sightlines['PLATE'][idx2[jj]]
except IndexError:
pdb.set_trace()
dla.fiber = sdss_survey.sightlines[fkey][idx2[jj]]
plates.append(sdss_survey.sightlines['PLATE'][idx2[jj]])
fibers.append(sdss_survey.sightlines[fkey][idx2[jj]])
# Write
dtbl = Table()
dtbl['plate'] = plates
dtbl['fiber'] = fibers
dtbl['zabs'] = sdss_survey.zabs
dtbl['NHI'] = sdss_survey.NHI
dtbl.write('results/dr5_for_david.ascii', format='ascii')
# Write sightline info
stbl = sdss_survey.sightlines[['PLATE', 'FIB', 'Z_START', 'Z_END', 'RA', 'DEC']]
gdsl = stbl['Z_END'] > stbl['Z_START']
stbl[gdsl].write('results/dr5_sightlines_for_david.ascii', format='ascii')
def main(flg_tst, sdss=None, ml_survey=None):
# Load JSON for DR5
if (flg_tst % 2**1) >= 2**0:
if sdss is None:
sdss = DLASurvey.load_SDSS_DR5()
#ml_survey = json_to_sdss_dlasurvey('../results/dr5_v1_predictions.json', sdss)
ml_survey = json_to_sdss_dlasurvey('../results/dr5_v2_results.json', sdss)
# Vette
if (flg_tst % 2**2) >= 2**1:
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/dr5_v2_results.json', sdss)
vette_dlasurvey(ml_survey, sdss)
# Vette v5 and generate CSV
if (flg_tst % 2**3) >= 2**2:
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/dr5_v5_predictions.json', sdss)
false_neg, midx, _ = vette_dlasurvey(ml_survey, sdss)
# CSV of false negatives
mk_false_neg_table(false_neg, '../results/false_negative_DR5_v5.csv')
# Vette v6 and generate CSV
if (flg_tst % 2**4) >= 2**3:
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/dr5_v6.1_results.json', sdss)
false_neg, midx, _ = vette_dlasurvey(ml_survey, sdss)
# CSV of false negatives
mk_false_neg_table(false_neg, '../results/false_negative_DR5_v6.1.csv')
# Vette gensample v2
if (flg_tst % 2**5) >= 2**4:
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/results_catalog_dr7_model_gensample_v2.json',sdss)
false_neg, midx, false_pos = vette_dlasurvey(ml_survey, sdss)
# CSV of false negatives
mk_false_neg_table(false_neg, '../results/false_negative_DR5_v2_gen.csv')
mk_false_neg_table(false_pos, '../results/false_positives_DR5_v2_gen.csv')
# Vette gensample v4.3.1
if flg_tst & (2**5):
if ml_survey is None:
sdss = DLASurvey.load_SDSS_DR5()
ml_survey = json_to_sdss_dlasurvey('../results/results_model_4.3.1_data_dr5.json',sdss)
false_neg, midx, false_pos = vette_dlasurvey(ml_survey, sdss)
# CSV of false negatives
mk_false_neg_table(false_neg, '../results/false_negative_DR5_v4.3.1_gen.csv')
mk_false_neg_table(false_pos, '../results/false_positives_DR5_v4.3.1_gen.csv')
if flg_tst & (2**6):
dr5_for_david()
# Test
if __name__ == '__main__':
flg_tst = 0
#flg_tst += 2**0 # Load JSON for DR5
#flg_tst += 2**1 # Vette
#flg_tst += 2**2 # v5
#flg_tst += 2**3 # v6.1
#flg_tst += 2**4 # v2 of gensample
#flg_tst += 2**5 # v4.3.1 of gensample
flg_tst += 2**6 # Generate DR5 table for David
main(flg_tst)
| mit |
RecipeML/Recipe | recipe/classifiers/qda.py | 1 | 1502 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Walter Josรฉ and Alex de Sรก
This file is part of the RECIPE Algorithm.
The RECIPE is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/.
"""
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
def qda(args):
"""Uses scikit-learn's QuadraticDiscriminantAnalysis, a classifier with a quadratic decision boundary, generated by fitting class conditional densities to the data and using Bayesโ rule.
Parameters
----------
store_covariances : boolean
If True the covariance matrices are computed and stored in the self.covariances_ attribute.
tol : float
Threshold used for rank estimation.
reg_param : float
Regularizes the covariance estimate as (1-reg_param)*Sigma + reg_param*np.eye(n_features)
"""
rp = float(args[1])
t = float(args[2])
store_cov = False
if(args[3].find("True")!=-1):
store_cov = True
return QuadraticDiscriminantAnalysis(priors=None, reg_param=rp, store_covariances=store_cov, tol=t) | gpl-3.0 |
ndingwall/scikit-learn | examples/miscellaneous/plot_display_object_visualization.py | 17 | 3676 | """
===================================
Visualizations with Display Objects
===================================
.. currentmodule:: sklearn.metrics
In this example, we will construct display objects,
:class:`ConfusionMatrixDisplay`, :class:`RocCurveDisplay`, and
:class:`PrecisionRecallDisplay` directly from their respective metrics. This
is an alternative to using their corresponding plot functions when
a model's predictions are already computed or expensive to compute. Note that
this is advanced usage, and in general we recommend using their respective
plot functions.
"""
print(__doc__)
# %%
# Load Data and train model
# -------------------------
# For this example, we load a blood transfusion service center data set from
# `OpenML <https://www.openml.org/d/1464>`. This is a binary classification
# problem where the target is whether an individual donated blood. Then the
# data is split into a train and test dataset and a logistic regression is
# fitted wtih the train dataset.
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
X, y = fetch_openml(data_id=1464, return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y)
clf = make_pipeline(StandardScaler(), LogisticRegression(random_state=0))
clf.fit(X_train, y_train)
# %%
# Create :class:`ConfusionMatrixDisplay`
##############################################################################
# With the fitted model, we compute the predictions of the model on the test
# dataset. These predictions are used to compute the confustion matrix which
# is plotted with the :class:`ConfusionMatrixDisplay`
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cm_display = ConfusionMatrixDisplay(cm).plot()
# %%
# Create :class:`RocCurveDisplay`
##############################################################################
# The roc curve requires either the probabilities or the non-thresholded
# decision values from the estimator. Since the logistic regression provides
# a decision function, we will use it to plot the roc curve:
from sklearn.metrics import roc_curve
from sklearn.metrics import RocCurveDisplay
y_score = clf.decision_function(X_test)
fpr, tpr, _ = roc_curve(y_test, y_score, pos_label=clf.classes_[1])
roc_display = RocCurveDisplay(fpr=fpr, tpr=tpr).plot()
# %%
# Create :class:`PrecisionRecallDisplay`
##############################################################################
# Similarly, the precision recall curve can be plotted using `y_score` from
# the prevision sections.
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import PrecisionRecallDisplay
prec, recall, _ = precision_recall_curve(y_test, y_score,
pos_label=clf.classes_[1])
pr_display = PrecisionRecallDisplay(precision=prec, recall=recall).plot()
# %%
# Combining the display objects into a single plot
##############################################################################
# The display objects store the computed values that were passed as arguments.
# This allows for the visualizations to be easliy combined using matplotlib's
# API. In the following example, we place the displays next to each other in a
# row.
# sphinx_gallery_thumbnail_number = 4
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
roc_display.plot(ax=ax1)
pr_display.plot(ax=ax2)
plt.show()
| bsd-3-clause |
magne-max/zipline-ja | tests/pipeline/test_us_equity_pricing_loader.py | 1 | 20821 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for USEquityPricingLoader and related classes.
"""
from numpy import (
arange,
datetime64,
float64,
ones,
uint32,
)
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
from pandas import (
concat,
DataFrame,
Int64Index,
Timestamp,
)
from toolz.curried.operator import getitem
from zipline.lib.adjustment import Float64Multiply
from zipline.pipeline.loaders.synthetic import (
NullAdjustmentReader,
make_bar_data,
expected_bar_values_2d,
)
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.errors import WindowLengthTooLong
from zipline.pipeline.data import USEquityPricing
from zipline.testing import (
seconds_to_timestamp,
str_to_seconds,
MockDailyBarReader,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
ZiplineTestCase,
)
# Test calendar ranges over the month of June 2015
# June 2015
# Mo Tu We Th Fr Sa Su
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
TEST_CALENDAR_START = Timestamp('2015-06-01', tz='UTC')
TEST_CALENDAR_STOP = Timestamp('2015-06-30', tz='UTC')
TEST_QUERY_START = Timestamp('2015-06-10', tz='UTC')
TEST_QUERY_STOP = Timestamp('2015-06-19', tz='UTC')
# One asset for each of the cases enumerated in load_raw_arrays_from_bcolz.
EQUITY_INFO = DataFrame(
[
# 1) The equity's trades start and end before query.
{'start_date': '2015-06-01', 'end_date': '2015-06-05'},
# 2) The equity's trades start and end after query.
{'start_date': '2015-06-22', 'end_date': '2015-06-30'},
# 3) The equity's data covers all dates in range.
{'start_date': '2015-06-02', 'end_date': '2015-06-30'},
# 4) The equity's trades start before the query start, but stop
# before the query end.
{'start_date': '2015-06-01', 'end_date': '2015-06-15'},
# 5) The equity's trades start and end during the query.
{'start_date': '2015-06-12', 'end_date': '2015-06-18'},
# 6) The equity's trades start during the query, but extend through
# the whole query.
{'start_date': '2015-06-15', 'end_date': '2015-06-25'},
],
index=arange(1, 7),
columns=['start_date', 'end_date'],
).astype(datetime64)
EQUITY_INFO['symbol'] = [chr(ord('A') + n) for n in range(len(EQUITY_INFO))]
TEST_QUERY_ASSETS = EQUITY_INFO.index
# ADJUSTMENTS use the following scheme to indicate information about the value
# upon inspection.
#
# 1s place is the equity
#
# 0.1s place is the action type, with:
#
# splits, 1
# mergers, 2
# dividends, 3
#
# 0.001s is the date
SPLITS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-03'),
'ratio': 1.103,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.110,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.112,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-21'),
'ratio': 6.121,
'sid': 6},
# Another action in query range, should have last_row of 1
{'effective_date': str_to_seconds('2015-06-11'),
'ratio': 3.111,
'sid': 3},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 3.119,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
MERGERS = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-03'),
'ratio': 1.203,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 3.210,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 3.212,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-25'),
'ratio': 6.225,
'sid': 6},
# Another action in query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 4.212,
'sid': 4},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 3.219,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
DIVIDENDS = DataFrame(
[
# Before query range, should be excluded.
{'declared_date': Timestamp('2015-05-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-03', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-05', tz='UTC').to_datetime64(),
'amount': 90.0,
'sid': 1},
# First day of query range, should be excluded.
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-10', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-15', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-17', tz='UTC').to_datetime64(),
'amount': 80.0,
'sid': 3},
# Third day of query range, should have last_row of 2
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-12', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-15', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-17', tz='UTC').to_datetime64(),
'amount': 70.0,
'sid': 3},
# After query range, should be excluded.
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-25', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-28', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-30', tz='UTC').to_datetime64(),
'amount': 60.0,
'sid': 6},
# Another action in query range, should have last_row of 3
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-15', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-18', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-20', tz='UTC').to_datetime64(),
'amount': 50.0,
'sid': 3},
# Last day of range. Should have last_row of 7
{'declared_date': Timestamp('2015-06-01', tz='UTC').to_datetime64(),
'ex_date': Timestamp('2015-06-19', tz='UTC').to_datetime64(),
'record_date': Timestamp('2015-06-22', tz='UTC').to_datetime64(),
'pay_date': Timestamp('2015-06-30', tz='UTC').to_datetime64(),
'amount': 40.0,
'sid': 3},
],
columns=['declared_date',
'ex_date',
'record_date',
'pay_date',
'amount',
'sid'],
)
DIVIDENDS_EXPECTED = DataFrame(
[
# Before query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-01'),
'ratio': 0.1,
'sid': 1},
# First day of query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-10'),
'ratio': 0.20,
'sid': 3},
# Third day of query range, should have last_row of 2
{'effective_date': str_to_seconds('2015-06-12'),
'ratio': 0.30,
'sid': 3},
# After query range, should be excluded.
{'effective_date': str_to_seconds('2015-06-25'),
'ratio': 0.40,
'sid': 6},
# Another action in query range, should have last_row of 3
{'effective_date': str_to_seconds('2015-06-15'),
'ratio': 0.50,
'sid': 3},
# Last day of range. Should have last_row of 7
{'effective_date': str_to_seconds('2015-06-19'),
'ratio': 0.60,
'sid': 3},
],
columns=['effective_date', 'ratio', 'sid'],
)
class USEquityPricingLoaderTestCase(WithAdjustmentReader,
ZiplineTestCase):
START_DATE = TEST_CALENDAR_START
END_DATE = TEST_CALENDAR_STOP
asset_ids = 1, 2, 3
@classmethod
def make_equity_info(cls):
return EQUITY_INFO
@classmethod
def make_splits_data(cls):
return SPLITS
@classmethod
def make_mergers_data(cls):
return MERGERS
@classmethod
def make_dividends_data(cls):
return DIVIDENDS
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader()
@classmethod
def make_equity_daily_bar_data(cls):
return make_bar_data(
EQUITY_INFO,
cls.equity_daily_bar_days,
)
@classmethod
def init_class_fixtures(cls):
super(USEquityPricingLoaderTestCase, cls).init_class_fixtures()
cls.assets = TEST_QUERY_ASSETS
cls.asset_info = EQUITY_INFO
def test_input_sanity(self):
# Ensure that the input data doesn't contain adjustments during periods
# where the corresponding asset didn't exist.
for table in SPLITS, MERGERS:
for eff_date_secs, _, sid in table.itertuples(index=False):
eff_date = Timestamp(eff_date_secs, unit='s')
asset_start, asset_end = EQUITY_INFO.ix[
sid, ['start_date', 'end_date']
]
self.assertGreaterEqual(eff_date, asset_start)
self.assertLessEqual(eff_date, asset_end)
def calendar_days_between(self, start_date, end_date, shift=0):
slice_ = self.equity_daily_bar_days.slice_indexer(start_date, end_date)
start = slice_.start + shift
stop = slice_.stop + shift
if start < 0:
raise KeyError(start_date, shift)
return self.equity_daily_bar_days[start:stop]
def expected_adjustments(self, start_date, end_date):
price_adjustments = {}
volume_adjustments = {}
query_days = self.calendar_days_between(start_date, end_date)
start_loc = query_days.get_loc(start_date)
for table in SPLITS, MERGERS, DIVIDENDS_EXPECTED:
for eff_date_secs, ratio, sid in table.itertuples(index=False):
eff_date = Timestamp(eff_date_secs, unit='s', tz='UTC')
# Ignore adjustments outside the query bounds.
if not (start_date <= eff_date <= end_date):
continue
eff_date_loc = query_days.get_loc(eff_date)
delta = eff_date_loc - start_loc
# Pricing adjustments should be applied on the date
# corresponding to the effective date of the input data. They
# should affect all rows **before** the effective date.
price_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=ratio,
)
)
# Volume is *inversely* affected by *splits only*.
if table is SPLITS:
volume_adjustments.setdefault(delta, []).append(
Float64Multiply(
first_row=0,
last_row=delta,
first_col=sid - 1,
last_col=sid - 1,
value=1.0 / ratio,
)
)
return price_adjustments, volume_adjustments
def test_load_adjustments_from_sqlite(self):
columns = [USEquityPricing.close, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
)
adjustments = self.adjustment_reader.load_adjustments(
[c.name for c in columns],
query_days,
self.assets,
)
close_adjustments = adjustments[0]
volume_adjustments = adjustments[1]
expected_close_adjustments, expected_volume_adjustments = \
self.expected_adjustments(TEST_QUERY_START, TEST_QUERY_STOP)
for key in expected_close_adjustments:
close_adjustment = close_adjustments[key]
for j, adj in enumerate(close_adjustment):
expected = expected_close_adjustments[key][j]
self.assertEqual(adj.first_row, expected.first_row)
self.assertEqual(adj.last_row, expected.last_row)
self.assertEqual(adj.first_col, expected.first_col)
self.assertEqual(adj.last_col, expected.last_col)
assert_allclose(adj.value, expected.value)
for key in expected_volume_adjustments:
volume_adjustment = volume_adjustments[key]
for j, adj in enumerate(volume_adjustment):
expected = expected_volume_adjustments[key][j]
self.assertEqual(adj.first_row, expected.first_row)
self.assertEqual(adj.last_row, expected.last_row)
self.assertEqual(adj.first_col, expected.first_col)
self.assertEqual(adj.last_col, expected.last_col)
assert_allclose(adj.value, expected.value)
def test_read_no_adjustments(self):
adjustment_reader = NullAdjustmentReader()
columns = [USEquityPricing.close, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP
)
# Our expected results for each day are based on values from the
# previous day.
shifted_query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
shift=-1,
)
adjustments = adjustment_reader.load_adjustments(
[c.name for c in columns],
query_days,
self.assets,
)
self.assertEqual(adjustments, [{}, {}])
pricing_loader = USEquityPricingLoader(
self.bcolz_equity_daily_bar_reader,
adjustment_reader,
)
results = pricing_loader.load_adjusted_array(
columns,
dates=query_days,
assets=self.assets,
mask=ones((len(query_days), len(self.assets)), dtype=bool),
)
closes, volumes = map(getitem(results), columns)
expected_baseline_closes = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'close',
)
expected_baseline_volumes = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'volume',
)
# AdjustedArrays should yield the same data as the expected baseline.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(closes.traverse(windowlen)):
assert_array_equal(
expected_baseline_closes[offset:offset + windowlen],
window,
)
for offset, window in enumerate(volumes.traverse(windowlen)):
assert_array_equal(
expected_baseline_volumes[offset:offset + windowlen],
window,
)
# Verify that we checked up to the longest possible window.
with self.assertRaises(WindowLengthTooLong):
closes.traverse(windowlen + 1)
with self.assertRaises(WindowLengthTooLong):
volumes.traverse(windowlen + 1)
def apply_adjustments(self, dates, assets, baseline_values, adjustments):
min_date, max_date = dates[[0, -1]]
# HACK: Simulate the coercion to float64 we do in adjusted_array. This
# should be removed when AdjustedArray properly supports
# non-floating-point types.
orig_dtype = baseline_values.dtype
values = baseline_values.astype(float64).copy()
for eff_date_secs, ratio, sid in adjustments.itertuples(index=False):
eff_date = seconds_to_timestamp(eff_date_secs)
# Don't apply adjustments that aren't in the current date range.
if eff_date not in dates:
continue
eff_date_loc = dates.get_loc(eff_date)
asset_col = assets.get_loc(sid)
# Apply ratio multiplicatively to the asset column on all rows less
# than or equal adjustment effective date.
values[:eff_date_loc + 1, asset_col] *= ratio
return values.astype(orig_dtype)
def test_read_with_adjustments(self):
columns = [USEquityPricing.high, USEquityPricing.volume]
query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP
)
# Our expected results for each day are based on values from the
# previous day.
shifted_query_days = self.calendar_days_between(
TEST_QUERY_START,
TEST_QUERY_STOP,
shift=-1,
)
pricing_loader = USEquityPricingLoader(
self.bcolz_equity_daily_bar_reader,
self.adjustment_reader,
)
results = pricing_loader.load_adjusted_array(
columns,
dates=query_days,
assets=Int64Index(arange(1, 7)),
mask=ones((len(query_days), 6), dtype=bool),
)
highs, volumes = map(getitem(results), columns)
expected_baseline_highs = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'high',
)
expected_baseline_volumes = expected_bar_values_2d(
shifted_query_days,
self.asset_info,
'volume',
)
# At each point in time, the AdjustedArrays should yield the baseline
# with all adjustments up to that date applied.
for windowlen in range(1, len(query_days) + 1):
for offset, window in enumerate(highs.traverse(windowlen)):
baseline = expected_baseline_highs[offset:offset + windowlen]
baseline_dates = query_days[offset:offset + windowlen]
expected_adjusted_highs = self.apply_adjustments(
baseline_dates,
self.assets,
baseline,
# Apply all adjustments.
concat([SPLITS, MERGERS, DIVIDENDS_EXPECTED],
ignore_index=True),
)
assert_allclose(expected_adjusted_highs, window)
for offset, window in enumerate(volumes.traverse(windowlen)):
baseline = expected_baseline_volumes[offset:offset + windowlen]
baseline_dates = query_days[offset:offset + windowlen]
# Apply only splits and invert the ratio.
adjustments = SPLITS.copy()
adjustments.ratio = 1 / adjustments.ratio
expected_adjusted_volumes = self.apply_adjustments(
baseline_dates,
self.assets,
baseline,
adjustments,
)
# FIXME: Make AdjustedArray properly support integral types.
assert_array_equal(
expected_adjusted_volumes,
window.astype(uint32),
)
# Verify that we checked up to the longest possible window.
with self.assertRaises(WindowLengthTooLong):
highs.traverse(windowlen + 1)
with self.assertRaises(WindowLengthTooLong):
volumes.traverse(windowlen + 1)
| apache-2.0 |
imperial-genomics-facility/data-management-python | igf_data/process/batch_effect/batch_effect_report.py | 1 | 9733 | from shlex import quote
import pandas as pd
from copy import copy
import matplotlib
from sklearn.decomposition import PCA
import subprocess,re,os,json,base64
from igf_data.utils.fileutils import get_temp_dir,remove_dir,check_file_path,copy_local_file
from jinja2 import Template,Environment, FileSystemLoader,select_autoescape
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
class Batch_effect_report:
'''
A python class for checking lane level batch effect for RNA-Seq sample
:param input_json_file: A json file containing the list of following info
file
flowcell
lane
:param template_file: A template file for writing the report
:param rscript_path: Rscript exe path
:param batch_effect_rscript_path: CPM conversion R-script file path
:param strand_info: RNA-Seq strand information, default reverse_strand
:param read_threshold: Threshold for low number of reads, default 5
:param allowed_strands: A list of allowed strands,
reverse_strand
forward_strand
unstranded'
'''
def __init__(self,input_json_file,template_file,rscript_path,batch_effect_rscript_path,
allowed_strands=('reverse_strand','forward_strand','unstranded'),
read_threshold=5,strand_info='reverse_strand'):
self.input_json_file=input_json_file
self.template_file=template_file
self.batch_effect_rscript_path=batch_effect_rscript_path
self.rscript_path=rscript_path
self.strand_info=strand_info
self.allowed_strands=list(allowed_strands)
self.read_threshold=read_threshold
@staticmethod
def _encode_png_image(png_file):
'''
A static method for encoding png files to string data
:param png_file: A png filepath
:returns: A string data
'''
try:
if not os.path.exists(png_file):
raise ValueError('File not present')
encoded=base64.b64encode(open(png_file, "rb").read()).decode()
return encoded
except:
raise
def check_lane_effect_and_log_report(self,project_name,sample_name,output_file):
'''
A function for generating batch effect report for a sample and project
:param project_name: A project name for the report file
:param sample_name: A sample name for the report file
:param output_file: Path of the output report file
'''
try:
if self.strand_info not in self.allowed_strands:
raise ValueError('{0} is not a valid strand'.format(self.strand_info))
temp_dir=get_temp_dir(use_ephemeral_space=False)
temp_merged_output=os.path.join(temp_dir,'merged.csv')
temp_cpm_output=os.path.join(temp_dir,'merged_cpm.csv')
temp_png_output=os.path.join(temp_dir,'plot.png')
temp_clustermap=os.path.join(temp_dir,'clustermap.png')
temp_corr=os.path.join(temp_dir,'corr.png')
temp_pca_flowcell=os.path.join(temp_dir,'pca_flowcell.png')
temp_pca_flowcell_lane=os.path.join(temp_dir,'pca_flowcell_lane.png')
temp_html_report=os.path.join(temp_dir,
os.path.basename(self.template_file))
check_file_path(self.input_json_file)
check_file_path(self.rscript_path)
check_file_path(self.batch_effect_rscript_path)
with open(self.input_json_file,'r') as json_data:
input_list=json.load(json_data)
if len(input_list)<2:
raise ValueError('Minimum two input files are required for lane level batch effect checking')
gene_name_label='gene_name'
final_df=pd.DataFrame()
for entry in input_list:
file=entry.get('file')
flowcell=entry.get('flowcell')
lane=entry.get('lane')
if file is None or \
flowcell is None or \
lane is None:
raise ValueError('Missing required info for batch effect check: {0}'.\
format(entry))
unstranded_label='unstranded_{0}_{1}'.format(flowcell,lane)
reverse_strand_label='reverse_strand_{0}_{1}'.format(flowcell,lane)
forward_strand_label='forward_strand_{0}_{1}'.format(flowcell,lane)
data=pd.read_csv(\
file,
sep='\t',
header=None,
skiprows=4,
index_col=False,
names=[gene_name_label,
unstranded_label,
forward_strand_label,
reverse_strand_label])
if self.strand_info=='reverse_strand':
data=data[[gene_name_label,
reverse_strand_label
]]
data=data[data[reverse_strand_label]>self.read_threshold] # filter series and remove any low value gene
elif self.strand_info=='forward_strand':
data=data[[gene_name_label,
forward_strand_label
]]
data=data[data[forward_strand_label]>self.read_threshold] # filter series and remove any low value gene
elif self.strand_info=='unstranded':
data=data[[gene_name_label,
unstranded_label
]]
data=data[data[unstranded_label]>self.read_threshold] # filter series and remove any low value gene
if len(final_df.index)==0:
final_df=copy(data)
else:
final_df=final_df.\
merge(data,
how='outer',
on=gene_name_label)
final_df=final_df.dropna().set_index(gene_name_label) # remove any row with NA values from df
final_df.\
applymap(lambda x: float(x)).\
to_csv(temp_merged_output,index=True) # dump raw counts as csv file
rscript_cmd=[quote(self.rscript_path),
quote(self.batch_effect_rscript_path),
quote(temp_merged_output),
quote(temp_cpm_output),
quote(temp_png_output)
]
subprocess.check_call(' '.join(rscript_cmd),shell=True) # run r script for cpm counts
check_file_path(temp_cpm_output) # check output file
mod_data=pd.read_csv(temp_cpm_output).\
rename(columns={'Unnamed: 0':gene_name_label}).\
set_index(gene_name_label) # read output file
sns_fig=sns.clustermap(mod_data,figsize=(10,10))
sns_fig.fig.savefig(temp_clustermap)
check_file_path(temp_clustermap) # plot clustermap
corr_df=mod_data.corr()
cmap=sns.diverging_palette(220, 10,
as_cmap=True)
fig,ax=plt.subplots(figsize=(7,7))
sns.heatmap(corr_df,
cmap=cmap,
square=True,
linewidths=.5,
cbar_kws={"shrink": .4},
ax=ax);
plt.savefig(temp_corr)
check_file_path(temp_corr) # plot correlation values
pca = PCA(n_components=2)
X_r = pca.fit(mod_data.T).transform(mod_data.T)
pattern1=re.compile(r'(rev_strand|forward_strand|unstranded)_(\S+)_([1-8])')
pattern2=re.compile(r'(rev_strand|forward_strand|unstranded)_(\S+_[1-8])')
results_df=pd.DataFrame(\
{'PCA1':X_r[:,0],
'PCA2':X_r[:,1],
'flowcell':[re.match(pattern1,label).group(2)
if re.match(pattern1,label) else label
for label in mod_data.T.index],
'flowcell_lane':[re.match(pattern2,label).group(2)
if re.match(pattern2,label) else label
for label in mod_data.T.index]
})
pca_plot=sns.lmplot('PCA1',
'PCA2',
hue='flowcell',
data=results_df,
fit_reg=False);
pca_plot.fig.savefig(temp_pca_flowcell) # plot flowcell level pca
pca_plot=sns.lmplot('PCA1',
'PCA2',
hue='flowcell_lane',
data=results_df,
fit_reg=False);
pca_plot.fig.savefig(temp_pca_flowcell_lane) # plot flowcell-lane level pca
template_env=Environment(\
loader=FileSystemLoader(\
searchpath=os.path.dirname(self.template_file)),
autoescape=select_autoescape(['xml']))
template_file=template_env.\
get_template(os.path.basename(self.template_file))
template_file.\
stream(ProjectName=project_name,
SampleName=sample_name,
mdsPlot=self._encode_png_image(png_file=temp_png_output),
clustermapPlot=self._encode_png_image(png_file=temp_clustermap),
corrPlot=self._encode_png_image(png_file=temp_corr),
pca1Plot=self._encode_png_image(png_file=temp_pca_flowcell),
pca2Plot=self._encode_png_image(png_file=temp_pca_flowcell_lane),
).\
dump(temp_html_report)
copy_local_file(temp_html_report,
output_file,
force=True)
except:
raise
| apache-2.0 |
kidaa/crystal-1 | crystal/utils.py | 2 | 3815 | from collections import defaultdict
import toolshed as ts
import numpy as np
import pandas as pd
from .crystal import Feature, CountFeature
import os
def example_random_cluster(n_samples=20, n_sites=4, seed=42):
np.random.seed(seed)
if n_samples % 2 != 0: n_samples += 1
covs = pd.DataFrame({'gender': ['F'] * (n_samples / 2) + ['M'] * (n_samples / 2),
'age': np.random.uniform(10, 25, size=n_samples) })
methylation = np.random.normal(0.15, 0.75, size=(n_sites, n_samples))
cluster = [Feature('chr1', (i + 1) * 10, m) for i, m in enumerate(methylation)]
covs['id'] = ['id_%i' %i for i in range(len(covs))]
return covs, cluster
def real_cluster():
path = os.path.join(os.path.dirname(__file__), "tests")
meth = pd.read_csv('%s/real_cluster.csv' % path, index_col=0)
chroms = [x.split(":")[0] for x in meth.index]
starts = [int(x.split(":")[1]) for x in meth.index]
cluster = [Feature(chroms[i], starts[i], np.array(meth.ix[i, :])) for i in
range(len(meth))]
covs = pd.read_csv('%s/covs.csv' % path)
return covs, cluster
def real_count_cluster():
path = os.path.join(os.path.dirname(__file__), "tests")
c = pd.read_csv('%s/m.counts.csv' % path, index_col=0)
m = pd.read_csv('%s/m.methylated.csv' % path, index_col=0)
chroms = [x.split(":")[0] for x in m.index]
starts = [int(x.split(":")[1]) for x in c.index]
cluster = [CountFeature(chroms[i], starts[i],
np.array(m.ix[i, :]),
np.array(c.ix[i, :]))
for i in range(len(m))]
covs = pd.read_table('%s/m.covs.txt' % path)
return covs, cluster
def write_cluster(cluster, fh, float_format="%.4f", count_fh=None):
"""
Write a cluster to file.
Parameters
----------
cluster : cluster
a cluster from aclust (or just a list of features)
fh : filehandle
count_fh : filehandle
if cluster is of `CountFeature` then a count_fh must be
specified so that the counts can be written to file as
well.
"""
fmt = "{chrom}:{position}\t{values}\n"
if isinstance(cluster[0], Feature):
for f in cluster:
values = "\t".join((float_format % v for v in f.values))
fh.write(fmt.format(chrom=f.chrom, position=f.position, values=values))
elif isinstance(cluster[0], CountFeature):
assert count_fh is not None
for f in cluster:
fh.write(fmt.format(chrom=f.chrom, position=f.position,
values="\t".join(f.methylated)))
count_fh.write(fmt.format(chrom=f.chrom, position=f.position,
values="\t".join(f.counts)))
def roc_out(p_bed, p_col, truth_region_bed, exclude=('-1', 'NA', 'nan')):
"""Create ROC for a bed file of p-values given known truth regions.
Parameters
----------
p_bed : file
p_col : int
column containing the p-value from `p_bed`
truth_region_bed : file
contains the true regions
"""
p_col -= 1 # 0-based
regions = defaultdict(list)
for toks in ts.reader(truth_region_bed, header=False):
if not (toks[1] + toks[2]).isdigit(): continue
regions[toks[0]].append((int(toks[1]), int(toks[2])))
truths = []
vals = []
for toks in ts.reader(p_bed, header=False):
if not (toks[1] + toks[2]).isdigit(): continue
reg = regions[toks[0]]
s, e = int(toks[1]), int(toks[2])
p = toks[p_col]
if p in exclude: continue
vals.append(1.0 - float(p))
truth = any(rs <= s <= re or rs <= e <= re for rs, re in reg)
truths.append(truth)
return np.array(truths).astype(int), np.array(vals)
| mit |
Equitable/trump | trump/reporting/objects.py | 2 | 8364 | # -*- coding: utf-8 -*-
"""
Created on Mon May 04 06:49:37 2015
Creator: JMcLarty
"""
from __future__ import division
import sys
import traceback as trcbm
import pandas as pd
import datetime as dt
from collections import OrderedDict as odict
class HandlePointReport(object):
def __init__(self, handlepoint, trace):
self.hpoint = handlepoint
self.trace = trace
@property
def df(self):
return pd.DataFrame({'handlepoint' : self.hpoint,
'tracelineno' : i,
'file' : tr[0],
'traceline' : tr[1],
'tracefunc' : tr[2],
'tracecode' : tr[3]}
for i, tr in enumerate(self.trace))
@property
def html(self):
return "{}\n{}".format(self.hpoint, self.trace[-3:])
class ReportPoint(object):
def __init__(self, reportpoint, attribute, value, extended=None):
self.rpoint = reportpoint
self.attribute = attribute
self.value = value
self.extended = extended
@property
def df(self):
return pd.DataFrame({'reportpoint' : self.rpoint, 'attribute' : self.attribute, 'value' : self.value, 'extended' : self.extended})
@property
def html(self):
return "{} {} {}\n{}".format(self.rpoint, self.attribute, self.value,
self.extended)
class FeedReport(object):
def __init__(self, num):
self.num = num
self.tstamp = dt.datetime.now()
self.handlepoints = []
self.reportpoints = []
@property
def hpdf(self):
dfs = [hp.df for hp in self.handlepoints]
if len(dfs):
df = pd.concat(dfs, axis=0)
df['tstamp'] = [self.tstamp] * len(df)
df['feednum'] = [self.num] * len(df)
return df
else:
return pd.DataFrame()
def add_handlepoint(self, hpreport):
"""Appends a HandlePointReport"""
self.handlepoints.append(hpreport)
def add_reportpoint(self, rpoint):
"""Appends a ReportPoint"""
self.reportpoints.append(rpoint)
def asodict(self, handlepoints=True, reportpoints=True):
"""Returns an ordered dictionary of handle/report points"""
out = odict()
if handlepoints:
for hp in self.handlepoints:
out[hp.hpoint] = hp.trace
if reportpoints:
for rp in self.reportpoints:
if not (rp.rpoint in out):
out[rp.rpoint] = odict()
out[rp.rpoint][self.attribute] = {'value' : rp.value, 'extended': rp.extended}
return out
@property
def html(self):
thtml = []
thtml.append("<h3>{}</h3>".format(self.num))
thtml.append("<h5>{}</h5>".format(self.tstamp))
for hp in self.handlepoints:
thtml.append(hp.html)
thtml.append("<br>")
for rp in self.reportpoints:
thtml.append(rp.html)
thtml.append("<br>")
return "".join(thtml)
class SymbolReport(object):
def __init__(self, name):
self.name = name
self.freports = []
self.handlepoints = []
self.reportpoints = []
@property
def hpdf(self):
fddfs = [fd.hpdf for fd in self.freports]
dfs = [hp.df for hp in self.handlepoints]
if len(dfs):
df = pd.concat(dfs, axis=0)
df['tstamp'] = [0] * len(dfs)
df['feednum'] = [-1] * len(dfs)
fddfs.append(df)
df = pd.concat(fddfs, axis=0)
df['symbol'] = [self.name] * len(df)
return df
def add_feedreport(self, freport):
"""Appends a FeedReport"""
self.freports.append(freport)
def add_handlepoint(self, hpreport):
"""Appends a HandlePointReport"""
self.handlepoints.append(hpreport)
def add_reportpoint(self, rpoint):
"""Appends a ReportPoint"""
self.reportpoints.append(rpoint)
def asodict(self, freports=True, handlepoints=True, reportpoints=True):
"""Returns an ordered dictionary of feed, and handle/report points"""
out = odict()
if freports:
for fr in self.freports:
out[fr.num] = {'tstamp' : fr.tstamp,
'report' : fr.asodict(handlepoints, reportpoints)}
if handlepoints:
for hp in self.handlepoints:
out[hp.hpoint] = hp.trace
if reportpoints:
for rp in self.reportpoints:
if not (rp.rpoint in out):
out[rp.rpoint] = odict()
out[rp.rpoint][self.attribute] = {'value' : rp.value, 'extended': rp.extended}
return out
def all_handlepoints(self):
out = odict()
for fr in self.freports:
for hp in fr.handlepoints:
if not (hp.hpoint in out):
out[hp.hpoint] = []
out[hp.hpoint].append((self.name, fr.num, hp.trace))
for hp in self.handlepoints:
if not (hp.hpoint in out):
out[hp.hpoint] = []
out[hp.hpoint].append((self.name, None, hp.trace))
return out
@property
def html(self):
thtml = []
thtml.append("<h2>{}</h2>".format(self.name))
for fr in self.freports:
thtml.append(fr.html)
thtml.append("<br>")
for hp in self.handlepoints:
thtml.append(hp.html)
thtml.append("<br>")
for rp in self.reportpoints:
thtml.append(rp.html)
thtml.append("<br>")
return "".join(thtml)
class TrumpReport(object):
def __init__(self, name):
self.name = name
self.sreports = []
def add_symbolreport(self, sreport):
self.sreports.append(sreport)
@property
def hpdf(self):
dfs = [sr.hpdf for sr in self.sreports]
if len(dfs):
dfs = pd.concat(dfs, axis=0)
return dfs.reset_index(drop=True)
else:
return pd.DataFrame()
@property
def html(self):
thtml = []
thtml.append("<h1>{}</h1>".format(self.name))
for sr in self.sreports:
thtml.append(sr.html)
thtml.append("<br>")
return "".join(thtml)
def all_handlepoints(self):
out = {}
for sr in self.sreports:
sout = sr.all_handlepoints()
for hp in sout.keys():
if not (hp in out):
out[hp] = []
out[hp].append(sout[hp])
return out
if __name__ == '__main__':
tr = TrumpReport("Test Report")
for fakesym in list('ABCDE'):
sr = SymbolReport(fakesym)
for fakefeed in list('123'):
fr = FeedReport(fakefeed)
rp = ReportPoint("fetched feed", "somecheck", True)
fr.add_reportpoint(rp)
rp = ReportPoint("fetched feed", "othercheck", 50)
fr.add_reportpoint(rp)
try:
a = b + "a problem"
except:
typ, val, tback = sys.exc_info()
tbextract = trcbm.extract_tb(tback)
hp = HandlePointReport("a+b", tbextract)
fr.add_handlepoint(hp)
rp = ReportPoint("add a and b", "goodcheck", "ab")
fr.add_reportpoint(rp)
try:
a = 4 + "a problem"
except:
typ, val, tback = sys.exc_info()
tbextract = trcbm.extract_tb(tback)
hp = HandlePointReport("4th problem", tbextract)
fr.add_handlepoint(hp)
sr.add_feedreport(fr)
rp = ReportPoint("symbol done", "validwhat", True, pd.DataFrame([1,2,3,4]))
sr.add_reportpoint(rp)
tr.add_symbolreport(sr)
f = open("C:\\jmclarty\\tr.html",'w+')
f.write(tr.html)
f.close() | bsd-3-clause |
ahoyosid/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/graphics/tukeyplot.py | 33 | 2473 | from statsmodels.compat.python import range
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.lines as lines
def tukeyplot(results, dim=None, yticklabels=None):
npairs = len(results)
fig = plt.figure()
fsp = fig.add_subplot(111)
fsp.axis([-50,50,0.5,10.5])
fsp.set_title('95 % family-wise confidence level')
fsp.title.set_y(1.025)
fsp.set_yticks(np.arange(1,11))
fsp.set_yticklabels(['V-T','V-S','T-S','V-P','T-P','S-P','V-M',
'T-M','S-M','P-M'])
#fsp.yaxis.set_major_locator(mticker.MaxNLocator(npairs))
fsp.yaxis.grid(True, linestyle='-', color='gray')
fsp.set_xlabel('Differences in mean levels of Var', labelpad=8)
fsp.xaxis.tick_bottom()
fsp.yaxis.tick_left()
xticklines = fsp.get_xticklines()
for xtickline in xticklines:
xtickline.set_marker(lines.TICKDOWN)
xtickline.set_markersize(10)
xlabels = fsp.get_xticklabels()
for xlabel in xlabels:
xlabel.set_y(-.04)
yticklines = fsp.get_yticklines()
for ytickline in yticklines:
ytickline.set_marker(lines.TICKLEFT)
ytickline.set_markersize(10)
ylabels = fsp.get_yticklabels()
for ylabel in ylabels:
ylabel.set_x(-.04)
for pair in range(npairs):
data = .5+results[pair]/100.
#fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data[1], linewidth=1.25,
fsp.axhline(y=npairs-pair, xmin=data.mean(), xmax=data[1], linewidth=1.25,
color='blue', marker="|", markevery=1)
fsp.axhline(y=npairs-pair, xmin=data[0], xmax=data.mean(), linewidth=1.25,
color='blue', marker="|", markevery=1)
#for pair in range(npairs):
# data = .5+results[pair]/100.
# data = results[pair]
# data = np.r_[data[0],data.mean(),data[1]]
# l = plt.plot(data, [npairs-pair]*len(data), color='black',
# linewidth=.5, marker="|", markevery=1)
fsp.axvline(x=0, linestyle="--", color='black')
fig.subplots_adjust(bottom=.125)
results = np.array([[-10.04391794, 26.34391794],
[-21.45225794, 14.93557794],
[ 5.61441206, 42.00224794],
[-13.40225794, 22.98557794],
[-29.60225794, 6.78557794],
[ -2.53558794, 33.85224794],
[-21.55225794, 14.83557794],
[ 8.87275206, 45.26058794],
[-10.14391794, 26.24391794],
[-37.21058794, -0.82275206]])
#plt.show()
| bsd-3-clause |
koldunovn/geopandas | geopandas/geoseries.py | 8 | 10052 | from functools import partial
from warnings import warn
import numpy as np
from pandas import Series, DataFrame
from pandas.core.indexing import _NDFrameIndexer
from pandas.util.decorators import cache_readonly
import pyproj
from shapely.geometry import box, shape, Polygon, Point
from shapely.geometry.collection import GeometryCollection
from shapely.geometry.base import BaseGeometry
from shapely.ops import transform
from geopandas.plotting import plot_series
from geopandas.base import GeoPandasBase
OLD_PANDAS = issubclass(Series, np.ndarray)
def _is_empty(x):
try:
return x.is_empty
except:
return False
def _convert_array_args(args):
if len(args) == 1 and isinstance(args[0], BaseGeometry):
args = ([args[0]],)
return args
class _CoordinateIndexer(_NDFrameIndexer):
""" Indexing by coordinate slices """
def _getitem_tuple(self, tup):
obj = self.obj
xs, ys = tup
# handle numeric values as x and/or y coordinate index
if type(xs) is not slice:
xs = slice(xs, xs)
if type(ys) is not slice:
ys = slice(ys, ys)
# don't know how to handle step; should this raise?
if xs.step is not None or ys.step is not None:
warn("Ignoring step - full interval is used.")
xmin, ymin, xmax, ymax = obj.total_bounds
bbox = box(xs.start or xmin,
ys.start or ymin,
xs.stop or xmax,
ys.stop or ymax)
idx = obj.intersects(bbox)
return obj[idx]
class GeoSeries(GeoPandasBase, Series):
"""A Series object designed to store shapely geometry objects."""
_metadata = ['name', 'crs']
def __new__(cls, *args, **kwargs):
kwargs.pop('crs', None)
if OLD_PANDAS:
args = _convert_array_args(args)
arr = Series.__new__(cls, *args, **kwargs)
else:
arr = Series.__new__(cls)
if type(arr) is GeoSeries:
return arr
else:
return arr.view(GeoSeries)
def __init__(self, *args, **kwargs):
if not OLD_PANDAS:
args = _convert_array_args(args)
crs = kwargs.pop('crs', None)
super(GeoSeries, self).__init__(*args, **kwargs)
self.crs = crs
self._invalidate_sindex()
def append(self, *args, **kwargs):
return self._wrapped_pandas_method('append', *args, **kwargs)
@property
def geometry(self):
return self
@classmethod
def from_file(cls, filename, **kwargs):
"""
Alternate constructor to create a GeoSeries from a file
Parameters
----------
filename : str
File path or file handle to read from. Depending on which kwargs
are included, the content of filename may vary, see:
http://toblerity.github.io/fiona/README.html#usage
for usage details.
kwargs : key-word arguments
These arguments are passed to fiona.open, and can be used to
access multi-layer data, data stored within archives (zip files),
etc.
"""
import fiona
geoms = []
with fiona.open(filename, **kwargs) as f:
crs = f.crs
for rec in f:
geoms.append(shape(rec['geometry']))
g = GeoSeries(geoms)
g.crs = crs
return g
@property
def __geo_interface__(self):
"""Returns a GeoSeries as a python feature collection
"""
from geopandas import GeoDataFrame
return GeoDataFrame({'geometry': self}).__geo_interface__
def to_file(self, filename, driver="ESRI Shapefile", **kwargs):
from geopandas import GeoDataFrame
data = GeoDataFrame({"geometry": self,
"id":self.index.values},
index=self.index)
data.crs = self.crs
data.to_file(filename, driver, **kwargs)
#
# Implement pandas methods
#
@property
def _constructor(self):
return GeoSeries
def _wrapped_pandas_method(self, mtd, *args, **kwargs):
"""Wrap a generic pandas method to ensure it returns a GeoSeries"""
val = getattr(super(GeoSeries, self), mtd)(*args, **kwargs)
if type(val) == Series:
val.__class__ = GeoSeries
val.crs = self.crs
val._invalidate_sindex()
return val
def __getitem__(self, key):
return self._wrapped_pandas_method('__getitem__', key)
def sort_index(self, *args, **kwargs):
return self._wrapped_pandas_method('sort_index', *args, **kwargs)
def take(self, *args, **kwargs):
return self._wrapped_pandas_method('take', *args, **kwargs)
def select(self, *args, **kwargs):
return self._wrapped_pandas_method('select', *args, **kwargs)
@property
def _can_hold_na(self):
return False
def __finalize__(self, other, method=None, **kwargs):
""" propagate metadata from other to self """
# NOTE: backported from pandas master (upcoming v0.13)
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def copy(self, order='C'):
"""
Make a copy of this GeoSeries object
Parameters
----------
deep : boolean, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : GeoSeries
"""
# FIXME: this will likely be unnecessary in pandas >= 0.13
return GeoSeries(self.values.copy(order), index=self.index,
name=self.name).__finalize__(self)
def isnull(self):
"""Null values in a GeoSeries are represented by empty geometric objects"""
non_geo_null = super(GeoSeries, self).isnull()
val = self.apply(_is_empty)
return np.logical_or(non_geo_null, val)
def fillna(self, value=None, method=None, inplace=False,
**kwargs):
"""Fill NA/NaN values with a geometry (empty polygon by default).
"method" is currently not implemented for pandas <= 0.12.
"""
if value is None:
value = Point()
if not OLD_PANDAS:
return super(GeoSeries, self).fillna(value=value, method=method,
inplace=inplace, **kwargs)
else:
# FIXME: this is an ugly way to support pandas <= 0.12
if method is not None:
raise NotImplementedError('Fill method is currently not implemented for GeoSeries')
if isinstance(value, BaseGeometry):
result = self.copy() if not inplace else self
mask = self.isnull()
result[mask] = value
if not inplace:
return GeoSeries(result)
else:
raise ValueError('Non-geometric fill values not allowed for GeoSeries')
def align(self, other, join='outer', level=None, copy=True,
fill_value=None, **kwargs):
if fill_value is None:
fill_value = Point()
left, right = super(GeoSeries, self).align(other, join=join,
level=level, copy=copy,
fill_value=fill_value,
**kwargs)
if isinstance(other, GeoSeries):
return GeoSeries(left), GeoSeries(right)
else: # It is probably a Series, let's keep it that way
return GeoSeries(left), right
def __contains__(self, other):
"""Allow tests of the form "geom in s"
Tests whether a GeoSeries contains a geometry.
Note: This is not the same as the geometric method "contains".
"""
if isinstance(other, BaseGeometry):
return np.any(self.geom_equals(other))
else:
return False
def plot(self, *args, **kwargs):
return plot_series(self, *args, **kwargs)
#
# Additional methods
#
def to_crs(self, crs=None, epsg=None):
"""Transform geometries to a new coordinate reference system
This method will transform all points in all objects. It has
no notion or projecting entire geometries. All segments
joining points are assumed to be lines in the current
projection, not geodesics. Objects crossing the dateline (or
other projection boundary) will have undesirable behavior.
"""
from fiona.crs import from_epsg
if self.crs is None:
raise ValueError('Cannot transform naive geometries. '
'Please set a crs on the object first.')
if crs is None:
try:
crs = from_epsg(epsg)
except TypeError:
raise TypeError('Must set either crs or epsg for output.')
proj_in = pyproj.Proj(preserve_units=True, **self.crs)
proj_out = pyproj.Proj(preserve_units=True, **crs)
project = partial(pyproj.transform, proj_in, proj_out)
result = self.apply(lambda geom: transform(project, geom))
result.__class__ = GeoSeries
result.crs = crs
result._invalidate_sindex()
return result
#
# Implement standard operators for GeoSeries
#
def __xor__(self, other):
"""Implement ^ operator as for builtin set type"""
return self.symmetric_difference(other)
def __or__(self, other):
"""Implement | operator as for builtin set type"""
return self.union(other)
def __and__(self, other):
"""Implement & operator as for builtin set type"""
return self.intersection(other)
def __sub__(self, other):
"""Implement - operator as for builtin set type"""
return self.difference(other)
GeoSeries._create_indexer('cx', _CoordinateIndexer)
| bsd-3-clause |
OGGM/oggm | oggm/shop/histalp.py | 2 | 6251 | import logging
import warnings
# External libs
import numpy as np
import pandas as pd
from scipy import stats
# Optional libs
try:
import salem
except ImportError:
pass
# Locals
from oggm import cfg
from oggm import utils
from oggm import entity_task
from oggm.exceptions import InvalidParamsError
# Module logger
log = logging.getLogger(__name__)
HISTALP_SERVER = 'http://www.zamg.ac.at/histalp/download/grid5m/'
def set_histalp_url(url):
"""If you want to use a different server for HISTALP (for testing, etc)."""
global HISTALP_SERVER
HISTALP_SERVER = url
@utils.locked_func
def get_histalp_file(var=None):
"""Returns a path to the desired HISTALP baseline climate file.
If the file is not present, download it.
Parameters
----------
var : str
'tmp' for temperature
'pre' for precipitation
Returns
-------
str
path to the file
"""
# Be sure input makes sense
if var not in ['tmp', 'pre']:
raise InvalidParamsError('HISTALP variable {} '
'does not exist!'.format(var))
# File to look for
if var == 'tmp':
bname = 'HISTALP_temperature_1780-2014.nc'
else:
bname = 'HISTALP_precipitation_all_abs_1801-2014.nc'
h_url = HISTALP_SERVER + bname + '.bz2'
return utils.file_extractor(utils.file_downloader(h_url))
@entity_task(log, writes=['climate_historical'])
def process_histalp_data(gdir, y0=None, y1=None, output_filesuffix=None):
"""Processes and writes the HISTALP baseline climate data for this glacier.
Extracts the nearest timeseries and writes everything to a NetCDF file.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
y0 : int
the starting year of the timeseries to write. The default is to take
1850 (because the data is quite bad before that)
y1 : int
the starting year of the timeseries to write. The default is to take
the entire time period available in the file, but with this kwarg
you can shorten it (to save space or to crop bad data)
output_filesuffix : str
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
"""
if cfg.PATHS.get('climate_file', None):
warnings.warn("You seem to have set a custom climate file for this "
"run, but are using the default HISTALP climate file "
"instead.")
if cfg.PARAMS['baseline_climate'] != 'HISTALP':
raise InvalidParamsError("cfg.PARAMS['baseline_climate'] should be "
"set to HISTALP.")
# read the time out of the pure netcdf file
ft = get_histalp_file('tmp')
fp = get_histalp_file('pre')
with utils.ncDataset(ft) as nc:
vt = nc.variables['time']
assert vt[0] == 0
assert vt[-1] == vt.shape[0] - 1
t0 = vt.units.split(' since ')[1][:7]
time_t = pd.date_range(start=t0, periods=vt.shape[0], freq='MS')
with utils.ncDataset(fp) as nc:
vt = nc.variables['time']
assert vt[0] == 0.5
assert vt[-1] == vt.shape[0] - .5
t0 = vt.units.split(' since ')[1][:7]
time_p = pd.date_range(start=t0, periods=vt.shape[0], freq='MS')
# Now open with salem
nc_ts_tmp = salem.GeoNetcdf(ft, time=time_t)
nc_ts_pre = salem.GeoNetcdf(fp, time=time_p)
# Some default
if y0 is None:
y0 = 1850
# set temporal subset for the ts data (hydro years)
# the reference time is given by precip, which is shorter
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
yrs = nc_ts_pre.time.year
y0 = yrs[0] if y0 is None else y0
y1 = yrs[-1] if y1 is None else y1
nc_ts_tmp.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
nc_ts_pre.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
time = nc_ts_pre.time
ny, r = divmod(len(time), 12)
assert r == 0
# Units
assert nc_ts_tmp._nc.variables['HSURF'].units.lower() in ['m', 'meters',
'meter',
'metres',
'metre']
assert nc_ts_tmp._nc.variables['T_2M'].units.lower() in ['degc', 'degrees',
'degrees celcius',
'degree', 'c']
assert nc_ts_pre._nc.variables['TOT_PREC'].units.lower() in ['kg m-2',
'l m-2', 'mm',
'millimeters',
'millimeter']
# geoloc
lon = gdir.cenlon
lat = gdir.cenlat
nc_ts_tmp.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
nc_ts_pre.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
# read the data
temp = nc_ts_tmp.get_vardata('T_2M')
prcp = nc_ts_pre.get_vardata('TOT_PREC')
hgt = nc_ts_tmp.get_vardata('HSURF')
ref_lon = nc_ts_tmp.get_vardata('lon')
ref_lat = nc_ts_tmp.get_vardata('lat')
source = nc_ts_tmp._nc.title[:7]
nc_ts_tmp._nc.close()
nc_ts_pre._nc.close()
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
igrad = None
if use_grad:
igrad = np.zeros(len(time)) * np.NaN
for t, loct in enumerate(temp):
slope, _, _, p_val, _ = stats.linregress(hgt.flatten(),
loct.flatten())
igrad[t] = slope if (p_val < 0.01) else np.NaN
gdir.write_monthly_climate_file(time, prcp[:, 1, 1], temp[:, 1, 1],
hgt[1, 1], ref_lon[1], ref_lat[1],
gradient=igrad,
filesuffix=output_filesuffix,
source=source)
| bsd-3-clause |
sorend/fylearn | fylearn/fpcga.py | 1 | 14513 | # -*- coding: utf-8 -*-
"""Fuzzy pattern classifier with genetic algorithm based methods
The module structure is the following:
- The "FuzzyPatternClassifierGA" is a FPC where the membership
functions are learned using genetic algorithms in global scheme [1]
- The "FuzzyPatternClassifierLGA" also learns mus using a GA, but in
local scheme [1].
References:
-----------
[1] S. A. Davidsen, E. Sreedevi, and M. Padmavathamma, "Local and global genetic fuzzy pattern
classifiers," In Proc. Machine Learning and Data Mining in Pattern Recognition, pp. 55-69,
2015. url: https://link.springer.com/chapter/10.1007/978-3-319-21024-7_4
"""
import logging
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_array
from sklearn.metrics import accuracy_score, mean_squared_error
import fylearn.fuzzylogic as fl
from fylearn.ga import GeneticAlgorithm, helper_fitness, UniformCrossover, helper_n_generations
from fylearn.ga import UnitIntervalGeneticAlgorithm
#
# Authors: Sรธren Atmakuri Davidsen <[email protected]>
#
# default aggregation rules to use
AGGREGATION_RULES = (fl.prod, fl.mean)
# requires 1 gene
def build_aggregation(X, y, rules, chromosome, idx):
i = int(chromosome[idx] * len(rules))
if i < 0:
i = 0
if i >= len(rules):
i = len(rules) - 1
return rules[i](X, y)
# requires 3 genes
def build_pi_membership(chromosome, idx):
a, r, b = sorted(chromosome[idx:idx + 3])
return fl.PiSet(a=a, r=r, b=b)
# requires 4 genes
def build_trapezoidal_membership(chromosome, idx):
a, b, c, d = sorted(chromosome[idx:idx + 4])
return fl.TrapezoidalSet(a, b, c, d)
def build_t_membership(chromosome, idx):
a, b, c = sorted(chromosome[idx:idx + 3])
return fl.TriangularSet(a, b, c)
class StaticFunction:
def __call__(self, X):
return 0.5
def __str__(self):
return "S(0.5)"
# requires 0 genes
def build_static_membership(chromosome, idx):
return StaticFunction()
# default definition of membership function factories
MEMBERSHIP_FACTORIES = (build_pi_membership,)
# requires 1 gene
def build_membership(mu_factories, chromosome, idx):
i = int(chromosome[idx] * len(mu_factories))
if i < 0:
i = 0
if i >= len(mu_factories):
i = len(mu_factories) - 1
return mu_factories[i](chromosome, idx + 1)
# decodes aggregation and protos from chromosome
def _decode(m, X, y, aggregation_rules, mu_factories, classes, chromosome):
aggregation = build_aggregation(X, y, aggregation_rules, chromosome, 0)
protos = {}
for i in range(len(classes)):
protos[i] = [ build_membership(mu_factories, chromosome, 2 + (i * m * 5) + (j * 4)) for j in range(m) ]
return aggregation, protos
def _predict_one(prototype, aggregation, X):
Mus = np.zeros(X.shape)
for i in range(X.shape[1]):
Mus[:, i] = prototype[i](X[:, i])
return aggregation(Mus)
def _predict(prototypes, aggregation, classes, X):
Mus = np.zeros(X.shape)
R = np.zeros((X.shape[0], len(classes))) # holds output for each class
attribute_idxs = range(X.shape[1])
# class_idx has class_prototypes membership functions
for class_idx, class_prototypes in prototypes.items():
for i in attribute_idxs:
Mus[:, i] = class_prototypes[i](X[:, i])
R[:, class_idx] = aggregation(Mus)
return classes.take(np.argmax(R, 1))
logger = logging.getLogger("fpcga")
class AggregationRuleFactory:
pass
class DummyAggregationRuleFactory(AggregationRuleFactory):
def __init__(self, aggregation_rule):
self.aggregation_rule = aggregation_rule
def __call__(self, X, y):
return self.aggregation_rule
class FuzzyPatternClassifierGA(BaseEstimator, ClassifierMixin):
def get_params(self, deep=False):
return {"iterations": self.iterations,
"epsilon": self.epsilon,
"mu_factories": self.mu_factories,
"aggregation_rules": self.aggregation_rules,
"random_state": self.random_state}
def set_params(self, **kwargs):
for key, value in kwargs.items():
self.setattr(key, value)
return self
def __init__(self, mu_factories=MEMBERSHIP_FACTORIES, aggregation_rules=AGGREGATION_RULES,
iterations=10, epsilon=0.0001, random_state=None):
if mu_factories is None or len(mu_factories) == 0:
raise ValueError("no mu_factories specified")
if aggregation_rules is None or len(aggregation_rules) == 0:
raise ValueError("no aggregation_rules specified")
if iterations <= 0:
raise ValueError("iterations must be > 0")
self.mu_factories = mu_factories
self.iterations = iterations
self.epsilon = epsilon
self.aggregation_rules = aggregation_rules
self.random_state = random_state
def fit(self, X, y_orig):
def as_factory(r):
return r if isinstance(r, AggregationRuleFactory) else DummyAggregationRuleFactory(r)
self.aggregation_rules__ = [ as_factory(r) for r in self.aggregation_rules ]
X = check_array(X)
self.classes_, _ = np.unique(y_orig, return_inverse=True)
self.m = X.shape[1]
if np.nan in self.classes_:
raise Exception("nan not supported for class values")
self.build_with_ga(X, y_orig)
return self
def predict(self, X):
"""
Predict outputs given examples.
Parameters:
-----------
X : the examples to predict (array or matrix)
Returns:
--------
y_pred : Predicted values for each row in matrix.
"""
if self.protos_ is None:
raise Exception("Prototypes not initialized. Perform a fit first.")
X = check_array(X)
# predict
return _predict(self.protos_, self.aggregation, self.classes_, X)
def build_with_ga(self, X, y):
# accuracy fitness function
def accuracy_fitness_function(chromosome):
# decode the class model from gene
aggregation, mus = _decode(self.m, X, y, self.aggregation_rules__,
self.mu_factories, self.classes_, chromosome)
y_pred = _predict(mus, aggregation, self.classes_, X)
return 1.0 - accuracy_score(y, y_pred)
# number of genes (2 for the aggregation, 4 for each attribute)
n_genes = 2 + (self.m * 5 * len(self.classes_))
logger.info("initializing GA %d iterations" % (self.iterations,))
# initialize
ga = GeneticAlgorithm(fitness_function=helper_fitness(accuracy_fitness_function),
scaling=1.0,
crossover_function=UniformCrossover(0.5),
# crossover_points=range(2, n_genes, 5),
elitism=5, # no elitism
n_chromosomes=100,
n_genes=n_genes,
p_mutation=0.3,
random_state=self.random_state)
last_fitness = None
#
for generation in range(self.iterations):
ga.next()
logger.info("GA iteration %d Fitness (top-4) %s" % (generation, str(np.sort(ga.fitness_)[:4])))
chromosomes, fitnesses = ga.best(10)
aggregation, protos = _decode(self.m, X, y, self.aggregation_rules__,
self.mu_factories, self.classes_, chromosomes[0])
self.aggregation = aggregation
self.protos_ = protos
# check stopping condition
new_fitness = np.mean(fitnesses)
if last_fitness is not None:
d_fitness = last_fitness - new_fitness
if self.epsilon is not None and d_fitness < self.epsilon:
logger.info("Early stop d_fitness %f" % (d_fitness,))
break
last_fitness = new_fitness
# print learned.
logger.info("+- Final: Aggregation %s" % (str(self.aggregation),))
for key, value in self.protos_.items():
logger.info("`- Class-%d" % (key,))
logger.info("`- Membership-fs %s" % (str([ x.__str__() for x in value ]),))
def __str__(self):
if self.protos_ is None:
return "Not trained"
else:
return str(self.aggregation) + str({ "class-" + str(k): v for k, v in self.protos_ })
class FuzzyPatternClassifierLGA(FuzzyPatternClassifierGA):
def decode(self, chromosome):
return [ build_membership(self.mu_factories, chromosome, i * 5) for i in range(self.m) ]
def build_for_class(self, X, y, class_idx):
y_target = np.zeros(y.shape) # create the target of 1 and 0.
y_target[class_idx] = 1.0
n_genes = 5 * self.m
def rmse_fitness_function(chromosome):
proto = self.decode(chromosome)
y_pred = _predict_one(proto, self.aggregation, X)
return mean_squared_error(y_target, y_pred)
logger.info("initializing GA %d iterations" % (self.iterations,))
# initialize
ga = GeneticAlgorithm(fitness_function=helper_fitness(rmse_fitness_function),
scaling=1.0,
crossover_function=UniformCrossover(0.5),
# crossover_points=range(0, n_genes, 5),
elitism=5, # no elitism
n_chromosomes=100,
n_genes=n_genes,
p_mutation=0.3)
# print "population", ga.population_
# print "fitness", ga.fitness_
chromosomes, fitnesses = ga.best(10)
last_fitness = np.mean(fitnesses)
proto = None
#
for generation in range(self.iterations):
ga.next()
logger.info("GA iteration %d Fitness (top-4) %s" % (generation, str(ga.fitness_[:4])))
chromosomes, fitnesses = ga.best(10)
proto = self.decode(chromosomes[0])
# check stopping condition
new_fitness = np.mean(fitnesses)
d_fitness = last_fitness - new_fitness
if self.epsilon is not None and d_fitness < self.epsilon:
logger.info("Early stop d_fitness %f" % (d_fitness,))
break
last_fitness = new_fitness
return proto
def build_with_ga(self, X, y):
self.aggregation = self.aggregation_rules__[0](X, y)
self.protos_ = {}
for class_no, class_value in enumerate(self.classes_):
class_idx = np.array(y == class_value)
proto = self.build_for_class(X, y, class_idx)
self.protos_[class_no] = proto
# print learned.
logger.info("+- Final: Aggregation %s" % (str(self.aggregation),))
for key, value in self.protos_.items():
logger.info("`- Class-%d" % (key,))
logger.info("`- Membership-fs %s" % (str([ x.__str__() for x in value ]),))
class SEFuzzyPatternClassifier(FuzzyPatternClassifierGA):
def get_params(self, deep=False):
return {"iterations": self.iterations,
"aggregation": self.aggregation,
"adjust_center": self.adjust_center}
def set_params(self, **kwargs):
for key, value in kwargs.items():
self.setattr(key, value)
return self
def __init__(self, aggregation=fl.prod, iterations=25, adjust_center=False):
"""
Constructs classifier
Parameters:
-----------
aggregation : fuzzy aggregation to use.
iterations : number of iterations for the GA.
adjust_center : Allow to adjust center of the membership function.
"""
self.aggregation = aggregation
self.iterations = iterations
self.adjust_center = adjust_center
assert iterations > 0
def build_for_class(self, X, y, class_idx):
# take column-wise min/mean/max for class
mins = np.nanmin(X[class_idx], 0)
means = np.nanmean(X[class_idx], 0)
maxs = np.nanmax(X[class_idx], 0)
ds = (maxs - mins) / 2.0
n_genes = 2 * self.m # adjustment for r and shrinking/expanding value for p/q
B = np.ones(n_genes)
def decode_with_shrinking_expanding(C):
def dcenter(j):
return min(1.0, max(0.0, C[j])) - 0.5 if self.adjust_center else 1.0
return [ fl.PiSet(r=means[j] * dcenter(j),
p=means[j] - (ds[j] * C[j + 1]),
q=means[j] + (ds[j] * C[j + 1]))
for j in range(self.m) ]
y_target = np.zeros(y.shape) # create the target of 1 and 0.
y_target[class_idx] = 1.0
def rmse_fitness_function(chromosome):
proto = decode_with_shrinking_expanding(chromosome)
y_pred = _predict_one(proto, self.aggregation, X)
return mean_squared_error(y_target, y_pred)
logger.info("initializing GA %d iterations" % (self.iterations,))
# initialize
ga = UnitIntervalGeneticAlgorithm(fitness_function=helper_fitness(rmse_fitness_function),
crossover_function=UniformCrossover(0.5),
elitism=3,
n_chromosomes=100,
n_genes=n_genes,
p_mutation=0.3)
ga = helper_n_generations(ga, self.iterations)
chromosomes, fitnesses = ga.best(1)
return decode_with_shrinking_expanding(chromosomes[0]), decode_with_shrinking_expanding(B)
def build_with_ga(self, X, y):
self.protos_ = {}
self.bases_ = {}
for class_no, class_value in enumerate(self.classes_):
class_idx = np.array(y == class_value)
proto, base = self.build_for_class(X, y, class_idx)
self.protos_[class_no] = proto
self.bases_[class_no] = base
def toggle_base(self):
if hasattr(self, "backups_"):
self.protos_ = self.backups_
del self.backups_
else:
self.backups_ = self.protos_
self.protos_ = self.bases_
return self
| mit |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mathtext.py | 69 | 101723 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email [email protected], but please check KNOWN ISSUES below first.
"""
from __future__ import division
import os
from cStringIO import StringIO
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
from matplotlib.pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement
# Enable packrat parsing
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError, message
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.fonts_object = None
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendBbox(MathtextBackend):
"""
A backend whose only purpose is to get a precise bounding box.
Only required for the Agg backend.
"""
def __init__(self, real_backend):
MathtextBackend.__init__(self)
self.bbox = [0, 0, 0, 0]
self.real_backend = real_backend
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def render_glyph(self, ox, oy, info):
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
def render_rect_filled(self, x1, y1, x2, y2):
self._update_bbox(x1, y1, x2, y2)
def get_results(self, box):
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self._switch_to_real_backend()
self.fonts_object.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
return self.fonts_object.get_results(box)
def get_hinting_type(self):
return self.real_backend.get_hinting_type()
def _switch_to_real_backend(self):
self.fonts_object.mathtext_backend = self.real_backend
self.real_backend.fonts_object = self.fonts_object
self.real_backend.ox = self.bbox[0]
self.real_backend.oy = self.bbox[1]
class MathtextBackendAggRender(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
MathtextBackend.__init__(self)
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.ymax, info.glyph)
def render_rect_filled(self, x1, y1, x2, y2):
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box):
return (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
self.fonts_object.get_used_characters())
def get_hinting_type(self):
return LOAD_FORCE_AUTOHINT
def MathtextBackendAgg():
return MathtextBackendBbox(MathtextBackendAggRender())
class MathtextBackendBitmapRender(MathtextBackendAggRender):
def get_results(self, box):
return self.image, self.depth
def MathtextBackendBitmap():
"""
A backend to generate standalone mathtext images. No additional
matplotlib backend is required.
"""
return MathtextBackendBbox(MathtextBackendBitmapRender())
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = StringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box):
ship(0, -self.depth, box)
#print self.depth
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
self.fonts_object.get_used_characters())
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
self.fonts_object.get_used_characters())
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = unichr(info.num)
self.svg_glyphs.append(
(info.font, info.fontsize, thetext, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
self.fonts_object.get_used_characters())
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
# Make these classes doubly-linked
self.mathtext_backend.fonts_object = self
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
return self.mathtext_backend.get_results(box)
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in self.charmap.iteritems()])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(str(filename)))
self._fonts['default'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None:
font = FT2Font(basename)
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return glyph.height/64.0/2.0 + 256.0/64.0 * dpi/72.0
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, 'it', 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
fontmap = {}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, val in self._fontmap.iteritems():
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
try:
cached_font = self._get_font(basename)
except RuntimeError:
pass
else:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
try:
cached_font = self._get_font(fontname)
except RuntimeError:
pass
else:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
fontmap = {}
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
new_fontname = fontname
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
try:
cached_font = self._get_font(new_fontname)
except RuntimeError:
pass
else:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname == 'it' and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s'" %
(fontname, sym.encode('ascii', 'backslashreplace')),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSize1',
2 : 'STIXSize2',
3 : 'STIXSize3',
4 : 'STIXSize4',
5 : 'STIXSize5'
}
fontmap = {}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, name in self._fontmap.iteritems():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if self._sans and mapping is None:
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping[font_class]
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = 'it'
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr(uniindex)))
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm')
default_font = AFM(file(filename, 'r'))
default_font.fname = filename
self.fonts['default'] = default_font
self.pswriter = StringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
cached_font = AFM(file(fname, 'r'))
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(unicode(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g. node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 4
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (str, unicode, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(msg + "\n" + s)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set(r'''
+ *
\pm \sqcap \rhd
\mp \sqcup \unlhd
\times \vee \unrhd
\div \wedge \oplus
\ast \setminus \ominus
\star \wr \otimes
\circ \diamond \oslash
\bullet \bigtriangleup \odot
\cdot \bigtriangledown \bigcirc
\cap \triangleleft \dagger
\cup \triangleright \ddagger
\uplus \lhd \amalg'''.split())
_relation_symbols = set(r'''
= < > :
\leq \geq \equiv \models
\prec \succ \sim \perp
\preceq \succeq \simeq \mid
\ll \gg \asymp \parallel
\subset \supset \approx \bowtie
\subseteq \supseteq \cong \Join
\sqsubset \sqsupset \neq \smile
\sqsubseteq \sqsupseteq \doteq \frown
\in \ni \propto
\vdash \dashv'''.split())
_arrow_symbols = set(r'''
\leftarrow \longleftarrow \uparrow
\Leftarrow \Longleftarrow \Uparrow
\rightarrow \longrightarrow \downarrow
\Rightarrow \Longrightarrow \Downarrow
\leftrightarrow \longleftrightarrow \updownarrow
\Leftrightarrow \Longleftrightarrow \Updownarrow
\mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow
\leftharpoonup \rightharpoonup \swarrow
\leftharpoondown \rightharpoondown \nwarrow
\rightleftharpoons \leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambiDelim = set(r"""
| \| / \backslash \uparrow \downarrow \updownarrow \Uparrow
\Downarrow \Updownarrow .""".split())
_leftDelim = set(r"( [ { < \lfloor \langle \lceil".split())
_rightDelim = set(r") ] } > \rfloor \rangle \rceil".split())
def __init__(self):
# All forward declarations are here
font = Forward().setParseAction(self.font).setName("font")
latexfont = Forward()
subsuper = Forward().setParseAction(self.subsuperscript).setName("subsuper")
placeable = Forward().setName("placeable")
simple = Forward().setName("simple")
autoDelim = Forward().setParseAction(self.auto_sized_delimiter)
self._expression = Forward().setParseAction(self.finish).setName("finish")
float = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
lbrace = Literal('{').suppress()
rbrace = Literal('}').suppress()
start_group = (Optional(latexfont) - lbrace)
start_group.setParseAction(self.start_group)
end_group = rbrace.copy()
end_group.setParseAction(self.end_group)
bslash = Literal('\\')
accent = oneOf(self._accent_map.keys() +
list(self._wide_accents))
function = oneOf(list(self._function_names))
fontname = oneOf(list(self._fontnames))
latex2efont = oneOf(['math' + x for x in self._fontnames])
space =(FollowedBy(bslash)
+ oneOf([r'\ ',
r'\/',
r'\,',
r'\;',
r'\quad',
r'\qquad',
r'\!'])
).setParseAction(self.space).setName('space')
customspace =(Literal(r'\hspace')
- (( lbrace
- float
- rbrace
) | Error(r"Expected \hspace{n}"))
).setParseAction(self.customspace).setName('customspace')
unicode_range = u"\U00000080-\U0001ffff"
symbol =(Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!'@()\[\]|%s])|(\\[%%${}\[\]_|])" % unicode_range)
| (Combine(
bslash
+ oneOf(tex2uni.keys())
) + FollowedBy(Regex("[^a-zA-Z]")))
).setParseAction(self.symbol).leaveWhitespace()
c_over_c =(Suppress(bslash)
+ oneOf(self._char_over_chars.keys())
).setParseAction(self.char_over_chars)
accent = Group(
Suppress(bslash)
+ accent
- placeable
).setParseAction(self.accent).setName("accent")
function =(Suppress(bslash)
+ function
).setParseAction(self.function).setName("function")
group = Group(
start_group
+ ZeroOrMore(
autoDelim
^ simple)
- end_group
).setParseAction(self.group).setName("group")
font <<(Suppress(bslash)
+ fontname)
latexfont <<(Suppress(bslash)
+ latex2efont)
frac = Group(
Suppress(Literal(r"\frac"))
+ ((group + group)
| Error(r"Expected \frac{num}{den}"))
).setParseAction(self.frac).setName("frac")
sqrt = Group(
Suppress(Literal(r"\sqrt"))
+ Optional(
Suppress(Literal("["))
- Regex("[0-9]+")
- Suppress(Literal("]")),
default = None
)
+ (group | Error("Expected \sqrt{value}"))
).setParseAction(self.sqrt).setName("sqrt")
placeable <<(accent
^ function
^ (c_over_c | symbol)
^ group
^ frac
^ sqrt
)
simple <<(space
| customspace
| font
| subsuper
)
subsuperop = oneOf(["_", "^"])
subsuper << Group(
( Optional(placeable)
+ OneOrMore(
subsuperop
- placeable
)
)
| placeable
)
ambiDelim = oneOf(list(self._ambiDelim))
leftDelim = oneOf(list(self._leftDelim))
rightDelim = oneOf(list(self._rightDelim))
autoDelim <<(Suppress(Literal(r"\left"))
+ ((leftDelim | ambiDelim) | Error("Expected a delimiter"))
+ Group(
autoDelim
^ OneOrMore(simple))
+ Suppress(Literal(r"\right"))
+ ((rightDelim | ambiDelim) | Error("Expected a delimiter"))
)
math = OneOrMore(
autoDelim
^ simple
).setParseAction(self.math).setName("math")
math_delim = ~bslash + Literal('$')
non_math = Regex(r"(?:(?:\\[$])|[^$])*"
).setParseAction(self.non_math).setName("non_math").leaveWhitespace()
self._expression << (
non_math
+ ZeroOrMore(
Suppress(math_delim)
+ Optional(math)
+ (Suppress(math_delim)
| Error("Expected end of math '$'"))
+ non_math
)
) + StringEnd()
self.clear()
def clear(self):
"""
Clear any state before parsing.
"""
self._expr = None
self._state_stack = None
self._em_width_cache = {}
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
try:
self._expression.parseString(s)
except ParseException, err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
str(err)]))
return self._expr
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('it', 'rm', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def finish(self, s, loc, toks):
#~ print "finish", toks
self._expr = Hlist(toks)
return [self._expr]
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = 'it'
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, 'it', 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[1]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException("Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def char_over_chars(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent'
}
_wide_accents = set(r"widehat widetilde".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuperscript(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuperscript', toks
nucleus = None
sub = None
super = None
if len(toks[0]) == 1:
return toks[0].asList()
elif len(toks[0]) == 2:
op, next = toks[0]
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 3:
nucleus, op, next = toks[0]
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 5:
nucleus, op1, next1, op2, next2 = toks[0]
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height + hlist.depth + rule_thickness * 2.0
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth * 0.5
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width) + thickness * 10.
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state), # rule
Vbox(0, thickness * 4.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, 'it', '=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
hlist = Hlist([vlist, Hbox(thickness * 2.)])
return [hlist]
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def auto_sized_delimiter(self, s, loc, toks):
#~ print "auto_sized_delimiter", toks
front, middle, back = toks
state = self.get_state()
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state))
parts.extend(middle.asList())
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state))
hlist = Hlist(parts)
return hlist
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
# Free up the transient data structures
self._parser.clear()
# Fix cyclical references
font_output.destroy()
font_output.mathtext_backend.fonts_object = None
font_output.mathtext_backend = None
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
| agpl-3.0 |
mac389/petulant-network | src/latent-schmidt.py | 1 | 1534 | import random, itertools,json
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
def jaccard_similarity(a,b):
a = set(a)
b = set(b)
return len(a&b)/float(len(a|b))
'''
1. Get topics from corpus using gensim
'''
topics = json.load(open('../data/lda_topics.json','rb'))
'''
2. Project words onto topics, using Jaccard similarity
'''
corpus = [line.strip() for line in open('../data/sample_topics','rb').read().splitlines()]
topic_topic_similarities = np.array([[jaccard_similarity(topics[topic_A],topics[topic_B]) for topic_A in topics] for topic_B in topics])
corpus_topic_similarities = np.array([[jaccard_similarity(text,topics[topic]) for text in corpus] for topic in topics])
'''
3. Gram-Schmidt Orthogonalize (SciPy) and, identify clusters in this space
--generalized eigenvalues are better
'''
eigenvalues,eigenvectors = LA.eig(topic_topic_similarities) #symmetric left and right eigenvectors are the same
projections = eigenvectors.T.dot(corpus_topic_similarities).T #ijth entry denotes the projection of the ith piece of text onto the jth topic
'''
4. Visuazlie
'''
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.imshow(projections,interpolation='nearest',aspect='auto')
ax.set_xticks(range(projections.shape[1]))
ax.set_yticks(range(projections.shape[0]))
ax.set_xlabel('Principal components')
ax.set_ylabel('Piece of text')
cbar = plt.colorbar(cax)
cbar.set_label('Strength of projection')
plt.tight_layout()
plt.savefig('../data/lda_projections.png',dpi=300) | apache-2.0 |
fionahamey/Pseudotime-network-inference | encodingFunctions.py | 1 | 9319 | #!/usr/bin/env python
"""encodingFunctions.py: Provides encoding functions for finding boolean rules governing gene expression with booleanRules.py script."""
__author__ = "Fiona Hamey"
import pandas as pd
import sys
sys.path.append('/Users/fiona/z3/build')
import z3
expressionFile = sys.argv[2]
networkFile = sys.argv[7]
# Read in expression matrix
expression = pd.read_table(expressionFile, sep = "\t",
index_col = 0, header = 0)
# Read in network
network = pd.read_table(networkFile, sep = "\t", header = 0)
# How many genes are there
allGenes = list(expression.columns)
numGenes = len(allGenes)
# Define numbers corresponding to gates, genes and nothing variable
AND = 0
OR = 1
NOTHING = numGenes + 2
# Circuits are encoded as bitvectors
def makeCircuitVar(name):
return z3.BitVec(name, 32)
def makeEnforcedVar(name):
return z3.BitVec(name, 32)
# For each element in bitvector which gene/gate values is it allowed to take
def variableDomains(var, booleanAllowed, possibleInputs):
if booleanAllowed == True:
allowedValues = [0, 1] + possibleInputs
else:
allowedValues = possibleInputs
return z3.Or([var == allowedValues[i] for i in range(len(allowedValues))])
# If a node = nothing then it is not allowed a parent as a gate
def parentsOfNothingArentGates(a, r):
def f(c1,c2,p):
return z3.Implies(z3.Or((c1 == NOTHING), (c2 == NOTHING)), z3.And(p != AND, p != OR))
aParents = z3.And(z3.Implies(z3.Or(a[1] == NOTHING, a[2] == NOTHING), z3.And(a[0] != AND, a[0] != OR, a[0] != NOTHING)), \
f(a[3], a[4], a[1]), \
f(a[5], a[6], a[2]))
rParents = z3.And(f(r[1], r[2], r[0]), \
f(r[3], r[4], r[1]), \
f(r[5], r[6], r[2]))
return z3.And(aParents, rParents)
# If a node is a gene then it must have a gate as a parent
def parentsOfRestAreGates(a, r):
def f(c1,c2,p):
return z3.Implies(z3.Or((c1 != NOTHING), (c2 != NOTHING)), z3.Or(p == AND, p == OR))
aParents = z3.And(f(a[1], a[2], a[0]), \
f(a[3], a[4], a[1]), \
f(a[5], a[6], a[2]))
rParents = z3.And(f(r[1], r[2], r[0]), \
f(r[3], r[4], r[1]), \
f(r[5], r[6], r[2]))
return z3.And(aParents, rParents)
# Can't have a gene more than once in the relation
def variablesDoNotAppearMoreThanOnce(symVars):
def isVar(v):
return z3.And(v != NOTHING, v != AND, v != OR)
def notEqual(v, vars):
return z3.And([v != i for i in vars if not z3.eq(v,i)])
def doesNotAppearMoreThanOnce(v):
return z3.Implies(isVar(v), notEqual(v, symVars))
return z3.And([ doesNotAppearMoreThanOnce(j) for j in symVars])
# Don't waste time doing things multiple times
def enforceSiblingLexigraphicalOrdering(v1, v2):
return (v1 <= v2)
def enforceLexigraphicalOrderingBetweenBranches(p1, p2, c1, c2):
return z3.Implies(p1 == p2, c1 <= c2)
def enforceLexigraphicalOrderingNaryGate(vars):
return z3.Implies(vars[0] == vars[1], vars[2] <= vars[3])
# Store the activator and repressor variables in a list
activatorVars = ["a" + str(i) for i in xrange(7)]
repressorVars = ["r" + str(i) for i in xrange(7)]
circuitVars = activatorVars + repressorVars
# Depending on maximum number of inputs may want fewer nodes
def fixMaxInputs(v, max):
if max == 0:
return makeCircuitVar(v+"0") == NOTHING
elif max == 1:
return makeCircuitVar(v+"2") == NOTHING
elif max == 2:
return makeCircuitVar(v+"4") == NOTHING
elif max == 3:
return makeCircuitVar(v+"6") == NOTHING
else:
return True
def fixMaxActivators(max):
return fixMaxInputs("a", max)
def fixMaxRepressors(max):
return fixMaxInputs("r", max)
# This encodes the allowed update functions for a gene
def encodeUpdateFunction(gene, genes, maxActivators, maxRepressors, possAct, possRep):
# Check all inputs are of right form
assert (gene in genes and maxActivators > 0 and maxActivators <= 4 and maxRepressors >= 0 and maxRepressors <= 4), \
"Incorrect arguments to encodeUpdateFunction"
a = [makeCircuitVar("a%i" %i) for i in xrange(7)]
r = [makeCircuitVar("r%i" %i) for i in xrange(7)]
circuitEncoding = z3.And(variableDomains(a[0], True, possAct), \
variableDomains(r[0], True, possRep + [NOTHING]), \
variableDomains(a[1], True, possAct + [NOTHING]), variableDomains(a[2], True, possAct + [NOTHING]), \
variableDomains(r[1], True, possRep + [NOTHING]), variableDomains(r[2], True, possRep + [NOTHING]), \
variableDomains(a[3], False, possAct+ [NOTHING]), variableDomains(a[4], False, possAct + [NOTHING]), \
variableDomains(a[5], False, possAct + [NOTHING]), variableDomains(a[6], False, possAct + [NOTHING]), \
variableDomains(r[3], False, possRep + [NOTHING]), variableDomains(r[4], False, possRep + [NOTHING]), \
variableDomains(r[5], False, possRep + [NOTHING]), variableDomains(r[6], False, possRep + [NOTHING]), \
parentsOfNothingArentGates(a, r), \
parentsOfRestAreGates(a, r), \
variablesDoNotAppearMoreThanOnce(a + r), \
z3.And([enforceSiblingLexigraphicalOrdering(a[i], a[i+1]) for i in [1,3,5]]), \
z3.And([enforceSiblingLexigraphicalOrdering(r[i], r[i+1]) for i in [1,3,5]]), \
enforceLexigraphicalOrderingBetweenBranches(a[1], a[2], a[3], a[5]), \
enforceLexigraphicalOrderingBetweenBranches(r[1], r[2], r[3], r[5]), \
enforceLexigraphicalOrderingNaryGate(a), \
enforceLexigraphicalOrderingNaryGate(r), \
fixMaxActivators(maxActivators), \
fixMaxRepressors(maxRepressors))
return (circuitEncoding, a, r)
# Given inputs evaluates function
def evaluateUpdateFunction(aVars, rVars, geneValues, counter):
i = counter
intermediateValueVariablesA = [ z3.Bool("va%i_%i" % (j, i)) for j in range(7)]
intermediateValueVariablesR = [ z3.Bool("vr%i_%i" % (j, i)) for j in range(7)]
def andConstraints(symVars, variables, pi, c1i, c2i):
return z3.Implies(symVars[pi] == z3.BitVecVal(AND,32), variables[pi] == z3.And(variables[c1i], variables[c2i]))
def orConstraints(symVars, variables, pi, c1i, c2i):
return z3.Implies(symVars[pi] == z3.BitVecVal(OR,32), variables[pi] == z3.Or(variables[c1i], variables[c2i]))
def variableConstraints(symVars, intermediateVars):
def f(symVar, i):
return z3.And([z3.Implies(symVar == v, intermediateVars[i] == z3.BoolVal(geneValues[v-2])) for v in range(2, NOTHING)])
return z3.And([f(symVars[i], i) for i in range(7)])
circuitVal = z3.Bool("circuit_%i" % i)
def circuitValue():
noRepressors = rVars[0] == NOTHING
return z3.If(noRepressors, intermediateValueVariablesA[0], \
z3.And(intermediateValueVariablesA[0], z3.Not(intermediateValueVariablesR[0])))
return (z3.And([variableConstraints(aVars, intermediateValueVariablesA), \
variableConstraints(rVars, intermediateValueVariablesR), \
andConstraints(aVars, intermediateValueVariablesA, 0, 1, 2), \
andConstraints(aVars, intermediateValueVariablesA, 1, 3, 4), \
andConstraints(aVars, intermediateValueVariablesA, 2, 5, 6), \
andConstraints(rVars, intermediateValueVariablesR, 0, 1, 2), \
andConstraints(rVars, intermediateValueVariablesR, 1, 3, 4), \
andConstraints(rVars, intermediateValueVariablesR, 2, 5, 6), \
orConstraints(aVars, intermediateValueVariablesA, 0, 1, 2), \
orConstraints(aVars, intermediateValueVariablesA, 1, 3, 4), \
orConstraints(aVars, intermediateValueVariablesA, 2, 5, 6), \
orConstraints(rVars, intermediateValueVariablesR, 0, 1, 2), \
orConstraints(rVars, intermediateValueVariablesR, 1, 3, 4), \
orConstraints(rVars, intermediateValueVariablesR, 2, 5, 6), \
circuitVal == circuitValue()]), circuitVal)
def circuitEvaluatesTo(gene, aVars, rVars, input, output, counter):
outValue = output[gene-2]
inValues = input
evaluationEncoding, circuitVal = evaluateUpdateFunction(aVars, rVars, inValues, counter)
return (evaluationEncoding, circuitVal == z3.BoolVal(outValue))
| apache-2.0 |
andrewnc/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/manifold/t_sne.py | 106 | 20057 | # Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
keflavich/scikit-image | skimage/transform/tests/test_radon_transform.py | 16 | 14464 | from __future__ import print_function, division
import numpy as np
from numpy.testing import assert_raises
import itertools
import os.path
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage.io import imread
from skimage import data_dir
from skimage._shared.testing import test_parallel
PHANTOM = imread(os.path.join(data_dir, "phantom.png"),
as_grey=True)[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def check_radon_center(shape, circle):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=np.float)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
def test_radon_center():
shapes = [(16, 16), (17, 17)]
circles = [False, True]
for shape, circle in itertools.product(shapes, circles):
yield check_radon_center, shape, circle
rectangular_shapes = [(32, 16), (33, 17)]
for shape in rectangular_shapes:
yield check_radon_center, shape, False
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=np.float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=np.float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
def test_iradon_center():
sizes = [16, 17]
thetas = [0, 90]
circles = [False, True]
for size, theta, circle in itertools.product(sizes, thetas, circles):
yield check_iradon_center, size, theta, circle
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image), filter=filter_type,
interpolation=interpolation_type)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
def test_radon_iradon():
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
for interpolation_type, filter_type in \
itertools.product(interpolation_types, filter_types):
yield check_radon_iradon, interpolation_type, filter_type
# cubic interpolation is slow; only run one test for it
yield check_radon_iradon, 'cubic', 'shepp-logan'
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
radon_image_200 = radon(image, theta=np.linspace(0, 180, nb_angles,
endpoint=False))
reconstructed = iradon(radon_image_200)
delta_200 = np.mean(abs(_rescale_intensity(image) - _rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=np.linspace(0, 180, nb_angles,
endpoint=False))
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=np.float)
image[slices] = 1.
sinogram = radon(image, theta)
reconstructed = iradon(sinogram, theta)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
def test_radon_iradon_minimal():
shapes = [(3, 3), (4, 4), (5, 5)]
for shape in shapes:
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
for coordinate in coordinates:
yield check_radon_iradon_minimal, shape, coordinate
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2])
iradon(p, theta=[0, 1, 2])
assert_raises(ValueError, iradon, p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
assert_raises(ValueError, radon, a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
argmax_shape = lambda a: np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square)
== argmax_shape(sinogram_circle_to_square))
def test_sinogram_circle_to_square():
for size in (50, 51):
yield check_sinogram_circle_to_square, size
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
def test_radon_iradon_circle():
shape = (61, 79)
interpolations = ('nearest', 'linear')
output_sizes = (None, min(shape), max(shape), 97)
for interpolation, output_size in itertools.product(interpolations,
output_sizes):
yield check_radon_iradon_circle, interpolation, shape, output_size
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack(np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause |
calebfoss/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | 11 | 12252 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering.
This goes on top of skflow API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import TransformerMixin
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator_lib.Estimator, TransformerMixin):
"""GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
batch_size=128,
steps=10,
continue_training=False,
config=None,
verbose=1):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
batch_size: See Estimator
steps: See Estimator
continue_training: See Estimator
config: See Estimator
verbose: See Estimator
"""
super(GMM, self).__init__(model_dir=model_dir, config=config)
self.batch_size = batch_size
self.steps = steps
self.continue_training = continue_training
self.verbose = verbose
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
def fit(self, x, y=None, monitors=None, logdir=None, steps=None):
"""Trains a GMM clustering on x.
Note: See Estimator for logic for continuous training and graph
construction across multiple calls to fit.
Args:
x: training input matrix of shape [n_samples, n_features].
y: labels. Should be None.
monitors: List of `Monitor` objects to print training progress and
invoke early stopping.
logdir: the directory to save the log file that can be used for optional
visualization.
steps: number of training steps. If not None, overrides the value passed
in constructor.
Returns:
Returns self.
"""
if logdir is not None:
self._model_dir = logdir
self._data_feeder = data_feeder.setup_train_data_feeder(x, None,
self._num_clusters,
self.batch_size)
_legacy_train_model( # pylint: disable=protected-access
self,
input_fn=self._data_feeder.input_builder,
feed_fn=self._data_feeder.get_feed_dict_fn(),
steps=steps or self.steps,
monitors=monitors,
init_feed_fn=self._data_feeder.get_feed_dict_fn())
return self
def predict(self, x, batch_size=None):
"""Predict cluster id for each element in x.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, containing cluster ids.
"""
return np.array([
prediction[GMM.ASSIGNMENTS]
for prediction in super(GMM, self).predict(
x=x, batch_size=batch_size, as_iterable=True)
])
def score(self, x, batch_size=None):
"""Predict total sum of distances to nearest clusters.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Total score.
"""
return np.sum(self.evaluate(x=x, batch_size=batch_size)[GMM.SCORES])
def transform(self, x, batch_size=None):
"""Transforms each element in x to distances to cluster centers.
Args:
x: 2-D matrix or iterator.
batch_size: size to use for batching up x for querying the model.
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
return np.array([
prediction[GMM.ALL_SCORES]
for prediction in super(GMM, self).predict(
x=x, batch_size=batch_size, as_iterable=True)
])
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())], 1)
return features
def _get_train_ops(self, features, _):
(_, _, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
return training_op, loss
def _get_predict_ops(self, features):
(all_scores, model_predictions, _, _) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
return {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
def _get_eval_ops(self, features, _, unused_metrics):
(_,
_,
losses,
_) = gmm_ops.gmm(
self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters,
self._random_seed,
self._covariance_type,
self._params)
return {GMM.SCORES: _streaming_sum(math_ops.reduce_sum(losses))}
# TODO(xavigonzalvo): delete this after implementing model-fn based Estimator.
def _legacy_train_model(estimator,
input_fn,
steps,
feed_fn=None,
init_op=None,
init_feed_fn=None,
init_fn=None,
device_fn=None,
monitors=None,
log_every_steps=100,
fail_on_nan_loss=True,
max_steps=None):
"""Legacy train function of Estimator."""
if hasattr(estimator.config, 'execution_mode'):
if estimator.config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(
estimator.config.training_worker_max_startup_secs,
estimator.config.task_id *
estimator.config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
estimator.config.task_id)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or estimator._device_fn # pylint: disable=protected-access
with ops.Graph().as_default() as g, g.device(device_fn):
random_seed_lib.set_random_seed(estimator.config.tf_random_seed)
global_step = framework.create_global_step(g)
features, labels = input_fn()
estimator._check_inputs(features, labels) # pylint: disable=protected-access
# The default return type of _get_train_ops is ModelFnOps. But there are
# some subclasses of tf.contrib.learn.Estimator which override this
# method and use the legacy signature, namely _get_train_ops returns a
# (train_op, loss) tuple. The following else-statement code covers these
# cases, but will soon be deleted after the subclasses are updated.
# TODO(b/32664904): Update subclasses and delete the else-statement.
train_ops = estimator._get_train_ops(features, labels) # pylint: disable=protected-access
if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default signature
train_op = train_ops.train_op
loss_op = train_ops.loss
if estimator.config.is_chief:
hooks = train_ops.training_chief_hooks + train_ops.training_hooks
else:
hooks = train_ops.training_hooks
else: # Legacy signature
if len(train_ops) != 2:
raise ValueError('Expected a tuple of train_op and loss, got {}'.format(
train_ops))
train_op = train_ops[0]
loss_op = train_ops[1]
hooks = []
hooks += monitor_lib.replace_monitors_with_hooks(monitors, estimator)
ops.add_to_collection(ops.GraphKeys.LOSSES, loss_op)
return graph_actions._monitored_train( # pylint: disable=protected-access
graph=g,
output_dir=estimator.model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
init_op=init_op,
init_feed_dict=init_feed_fn() if init_feed_fn is not None else None,
init_fn=init_fn,
log_every_steps=log_every_steps,
supervisor_is_chief=estimator.config.is_chief,
supervisor_master=estimator.config.master,
supervisor_save_model_secs=estimator.config.save_checkpoints_secs,
supervisor_save_model_steps=estimator.config.save_checkpoints_steps,
supervisor_save_summaries_steps=estimator.config.save_summary_steps,
keep_checkpoint_max=estimator.config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
estimator.config.keep_checkpoint_every_n_hours),
feed_fn=feed_fn,
steps=steps,
fail_on_nan_loss=fail_on_nan_loss,
hooks=hooks,
max_steps=max_steps)
| apache-2.0 |
argriffing/numpy | numpy/lib/polynomial.py | 30 | 38012 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/widgets.py | 69 | 40833 | """
GUI Neutral widgets
All of these widgets require you to predefine an Axes instance and
pass that as the first arg. matplotlib doesn't try to be too smart in
layout -- you have to figure out how wide and tall you want your Axes
to be to accommodate your widget.
"""
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
class LockDraw:
"""
some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstaces, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use matplotlib.widgets.lock(someobj) to pr
"""
def __init__(self):
self._owner = None
def __call__(self, o):
'reserve the lock for o'
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
'release the lock'
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
'drawing is available to o'
return not self.locked() or self.isowner(o)
def isowner(self, o):
'o owns the lock'
return self._owner is o
def locked(self):
'the lock is held'
return self._owner is not None
class Widget:
"""
OK, I couldn't resist; abstract base class for mpl GUI neutral
widgets
"""
drawon = True
eventson = True
class Button(Widget):
"""
A GUI neutral button
The following attributes are accesible
ax - the Axes the button renders into
label - a text.Text instance
color - the color of the button when not hovering
hovercolor - the color of the button when hovering
Call "on_clicked" to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
ax is the Axes instance the button will be placed into
label is a string which is the button text
image if not None, is an image to place in the button -- can
be any legal arg to imshow (numpy array, matplotlib Image
instance, or PIL image)
color is the color of the button when not activated
hovercolor is the color of the button when the mouse is over
it
"""
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.ax = ax
ax.figure.canvas.mpl_connect('button_press_event', self._click)
ax.figure.canvas.mpl_connect('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if event.inaxes != self.ax: return
if not self.eventson: return
for cid, func in self.observers.items():
func(event)
def _motion(self, event):
if event.inaxes==self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon: self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this func with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class Slider(Widget):
"""
A slider representing a floating point range
The following attributes are defined
ax : the slider axes.Axes instance
val : the current slider value
vline : a Line2D instance representing the initial value
poly : A patch.Polygon instance which is the slider
valfmt : the format string for formatting the slider text
label : a text.Text instance, the slider label
closedmin : whether the slider is closed on the minimum
closedmax : whether the slider is closed on the maximum
slidermin : another slider - if not None, this slider must be > slidermin
slidermax : another slider - if not None, this slider must be < slidermax
dragging : allow for mouse dragging on slider
Call on_changed to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from valmin to valmax in axes ax;
valinit - the slider initial position
label - the slider label
valfmt - used to format the slider value
closedmin and closedmax - indicate whether the slider interval is closed
slidermin and slidermax - be used to contrain the value of
this slider to the values of other sliders.
additional kwargs are passed on to self.poly which is the
matplotlib.patches.Rectangle which draws the slider. See the
matplotlib.patches.Rectangle documentation for legal property
names (eg facecolor, edgecolor, alpha, ...)
"""
self.ax = ax
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs)
self.vline = ax.axvline(valinit,0,1, color='r', lw=1)
self.valfmt=valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
ax.figure.canvas.mpl_connect('button_press_event', self._update)
if dragging:
ax.figure.canvas.mpl_connect('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
def _update(self, event):
'update the slider position'
if event.button !=1: return
if event.inaxes != self.ax: return
val = event.xdata
if not self.closedmin and val<=self.valmin: return
if not self.closedmax and val>=self.valmax: return
if self.slidermin is not None:
if val<=self.slidermin.val: return
if self.slidermax is not None:
if val>=self.slidermax.val: return
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[-1] = val, 0
xy[-2] = val, 1
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for cid, func in self.observers.items():
func(val)
def on_changed(self, func):
"""
When the slider valud is changed, call this func with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
labels - a list of text.Text instances
lines - a list of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have set_visible(False) when
box is not checked
rectangles - a list of patch.Rectangle instances
Connect to the CheckButtons with the on_clicked method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
actives is a len(buttons) list of booleans indicating whether
the button is active
"""
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels)>1:
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes,
'solid_capstyle':'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy/2., dy/2.
x, y = 0.05, y-h/2.
p = Rectangle(xy=(x,y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x+w], [y+h, y], **lineparams)
l2 = Line2D([x, x+w], [y, y+h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1,l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
for p,t,lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y) ):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class RadioButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
activecolor - the color of the button when clicked
labels - a list of text.Text instances
circles - a list of patch.Circle instances
Connect to the RadioButtons with the on_clicked method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
active is the index into labels for the button that is active
activecolor is the color of the button when clicked
"""
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt==active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p,t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else: return
for p in self.circles:
if p==inp: color = self.activecolor
else: color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of fig
"""
def __init__(self, targetfig, toolfig):
"""
targetfig is the figure to adjust
toolfig is the figure to embed the the subplot tool into. If
None, a default pylab figure will be created. If you are
using this from the GUI
"""
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt)
return fmt%x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon: self.targetfig.canvas.draw()
class Cursor:
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
horizOn =True|False: controls visibility of the horizontal line
vertOn =True|False: controls visibility of the horizontal line
And the visibility of the cursor itself with visible attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to ax. If useblit=True, use the backend
dependent blitting features for faster updates (GTKAgg only
now). lineprops is a dictionary of line properties. See
examples/widgets/cursor.py.
"""
self.ax = ax
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor:
"""
Provide a vertical line cursor shared between multiple axes
from matplotlib.widgets import MultiCursor
from pylab import figure, show, nx
t = nx.arange(0.0, 2.0, 0.01)
s1 = nx.sin(2*nx.pi*t)
s2 = nx.sin(4*nx.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
show()
"""
def __init__(self, canvas, axes, useblit=True, **lineprops):
self.canvas = canvas
self.axes = axes
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5*(xmin+xmax)
self.lines = [ax.axvline(xmid, visible=False, **lineprops) for ax in axes]
self.visible = True
self.useblit = useblit
self.background = None
self.needclear = False
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox)
for line in self.lines: line.set_visible(False)
def onmove(self, event):
if event.inaxes is None: return
if not self.canvas.widgetlock.available(self): return
self.needclear = True
if not self.visible: return
for line in self.lines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for ax, line in zip(self.axes, self.lines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector:
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
onmove_callback is an optional callback that will be called on mouse move
with the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(vmin, vmax)
and clear the span.
direction must be 'horizontal' or 'vertical'
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
rectprops = dict(facecolor='red', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.ax = None
self.canvas = None
self.visible = True
self.cids=[]
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.useblit = useblit
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
self.new_axes(ax)
def new_axes(self,ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
self.canvas = ax.figure.canvas
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
self.cids.append(self.canvas.mpl_connect('button_press_event', self.press))
self.cids.append(self.canvas.mpl_connect('button_release_event', self.release))
self.cids.append(self.canvas.mpl_connect('draw_event', self.update_background))
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData, self.ax.transAxes)
w,h = 0,1
else:
trans = blended_transform_factory(self.ax.transAxes, self.ax.transData)
w,h = 1,0
self.rect = Rectangle( (0,0), w, h,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
'on button release event'
if self.pressv is None or (self.ignore(event) and not self.buttonDown): return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span<self.minspan: return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressv is None or self.ignore(event): return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv>maxv: minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv-minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv-minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
# For backwards compatibility only!
class HorizontalSpanSelector(SpanSelector):
def __init__(self, ax, onselect, **kwargs):
import warnings
warnings.warn('Use SpanSelector instead!', DeprecationWarning)
SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs)
class RectangleSelector:
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data'):
"""
Create a selector in ax. When a selection is made, clear
the span and call onselect with
onselect(pos_1, pos_2)
and clear the drawn box/line. There pos_i are arrays of length 2
containing the x- and y-coordinate.
If minspanx is not None then events smaller than minspanx
in x direction are ignored(it's the same for y).
The rect is drawn with rectprops; default
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with lineprops; default
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use type if you want the mouse to draw a line, a box or nothing
between click and actual position ny setting
drawtype = 'line', drawtype='box' or drawtype = 'none'.
spancoords is one of 'data' or 'pixels'. If 'data', minspanx
and minspanx will be interpreted in the same coordinates as
the x and ya axis, if 'pixels', they are in pixels
"""
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
# If RectangleSelector is not active :
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes!=self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
# Is the correct button pressed within the correct axes?
if self.ignore(event): return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event): return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords=='data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords=='pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
if (self.drawtype=='box') and (xproblems or yproblems):
"""Box to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
if (self.drawtype=='line') and (xproblems and yproblems):
"""Line to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event if box/line is wanted'
if self.eventpress is None or self.ignore(event): return
x,y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
""" Use this to activate / deactivate the RectangleSelector
from your program with an boolean variable 'active'.
"""
self.active = active
def get_active(self):
""" to get status of active mode (boolean variable)"""
return self.active
class Lasso(Widget):
def __init__(self, ax, xy, callback=None, useblit=True):
self.axes = ax
self.figure = ax.figure
self.canvas = self.figure.canvas
self.useblit = useblit
if useblit:
self.background = self.canvas.copy_from_bbox(self.axes.bbox)
x, y = xy
self.verts = [(x,y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.axes.add_line(self.line)
self.callback = callback
self.cids = []
self.cids.append(self.canvas.mpl_connect('button_release_event', self.onrelease))
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
def onrelease(self, event):
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts)>2:
self.callback(self.verts)
self.axes.lines.remove(self.line)
self.verts = None
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def onmove(self, event):
if self.verts is None: return
if event.inaxes != self.axes: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.axes.draw_artist(self.line)
self.canvas.blit(self.axes.bbox)
else:
self.canvas.draw_idle()
| agpl-3.0 |
edhuckle/statsmodels | statsmodels/examples/example_functional_plots.py | 33 | 1367 | '''Functional boxplots and rainbow plots
see docstrings for an explanation
Author: Ralf Gommers
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
#Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
#surface temperature data.
data = sm.datasets.elnino.load()
#Create a functional boxplot:
#We see that the years 1982-83 and 1997-98 are outliers; these are
#the years where El Nino (a climate pattern characterized by warming
#up of the sea surface and higher air pressures) occurred with unusual
#intensity.
fig = plt.figure()
ax = fig.add_subplot(111)
res = sm.graphics.fboxplot(data.raw_data[:, 1:], wfactor=2.58,
labels=data.raw_data[:, 0].astype(int),
ax=ax)
ax.set_xlabel("Month of the year")
ax.set_ylabel("Sea surface temperature (C)")
ax.set_xticks(np.arange(13, step=3) - 1)
ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
ax.set_xlim([-0.2, 11.2])
#Create a rainbow plot:
fig = plt.figure()
ax = fig.add_subplot(111)
res = sm.graphics.rainbowplot(data.raw_data[:, 1:], ax=ax)
ax.set_xlabel("Month of the year")
ax.set_ylabel("Sea surface temperature (C)")
ax.set_xticks(np.arange(13, step=3) - 1)
ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
ax.set_xlim([-0.2, 11.2])
plt.show()
| bsd-3-clause |
jjx02230808/project0223 | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
dfm/pcp | demo.py | 2 | 1640 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from PIL import Image
from pcp import pcp
def bitmap_to_mat(bitmap_seq):
"""from blog.shriphani.com"""
matrix = []
shape = None
for bitmap_file in bitmap_seq:
img = Image.open(bitmap_file).convert("L")
if shape is None:
shape = img.size
assert img.size == shape
img = np.array(img.getdata())
matrix.append(img)
return np.array(matrix), shape[::-1]
def do_plot(ax, img, shape):
ax.cla()
ax.imshow(img.reshape(shape), cmap="gray", interpolation="nearest")
ax.set_xticklabels([])
ax.set_yticklabels([])
if __name__ == "__main__":
import sys
import glob
import matplotlib.pyplot as pl
if "--test" in sys.argv:
M = (10*np.ones((10, 10))) + (-5 * np.eye(10))
L, S, svd = pcp(M, verbose=True, svd_method="exact")
assert np.allclose(M, L + S), "Failed"
print("passed")
sys.exit(0)
M, shape = bitmap_to_mat(glob.glob("test_data/Escalator/*.bmp")[:2000:2])
print(M.shape)
L, S, (u, s, v) = pcp(M, maxiter=50, verbose=True, svd_method="exact")
fig, axes = pl.subplots(1, 3, figsize=(10, 4))
fig.subplots_adjust(left=0, right=1, hspace=0, wspace=0.01)
for i in range(min(len(M), 500)):
do_plot(axes[0], M[i], shape)
axes[0].set_title("raw")
do_plot(axes[1], L[i], shape)
axes[1].set_title("low rank")
do_plot(axes[2], S[i], shape)
axes[2].set_title("sparse")
fig.savefig("results/{0:05d}.png".format(i))
| mit |
VGonPa/datasets-poses2012 | notebooks/hdf_converter.py | 1 | 1256 | import pandas as pd
import user_data_loader as udl
from itertools import product
def make_filename(dir_, experiment, user):
''' Produces a filename from input arguments'''
return dir_ + experiment + '-' + user + '.arff'
#def get_filenames(dir_, experiments, users):
# ''' Produces a generator with all the filenames '''
# for exp, user in it.product(experiments, users):
# yield make_filename(dir_, exp, user)
def build_store(storename, data_dir, experiments, users):
''' Builds an HDF store from the data in arff format
@param storename: the filename of the HDFStore
@param data_dir: directory where the arff files are located
@param experiments: list containing the experiment names
@type experiments: list of strings
@param users: list with the user ids
@type users: list of strings
@return: the hdfstore object with all the datasets from the users
@rtype: pandas.HDFStore
'''
store = pd.HDFStore(storename)
for exp, user in product(experiments, users):
filename = make_filename(data_dir, exp, user)
print filename,
df = udl.load_user_file(filename)
store.put(exp + '/' + user, df, format='table')
return store
| gpl-3.0 |
ch3ll0v3k/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
gweidner/systemml | src/main/python/systemml/converters.py | 5 | 14040 | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
__all__ = [
'getNumCols',
'convertToMatrixBlock',
'convert_caffemodel',
'convert_lmdb_to_jpeg',
'convertToNumPyArr',
'convertToPandasDF',
'SUPPORTED_TYPES',
'convertToLabeledDF',
'convertImageToNumPyArr',
'getDatasetMean']
import numpy as np
import pandas as pd
import os
import math
from pyspark.context import SparkContext
from scipy.sparse import coo_matrix, spmatrix, csr_matrix
from .classloader import *
SUPPORTED_TYPES = (np.ndarray, pd.DataFrame, spmatrix)
DATASET_MEAN = {'VGG_ILSVRC_19_2014': [103.939, 116.779, 123.68]}
def getNumCols(numPyArr):
if numPyArr.ndim == 1:
return 1
else:
return numPyArr.shape[1]
def get_pretty_str(key, value):
return '\t"' + key + '": ' + str(value) + ',\n'
def save_tensor_csv(tensor, file_path, shouldTranspose):
w = w.reshape(w.shape[0], -1)
if shouldTranspose:
w = w.T
np.savetxt(file_path, w, delimiter=',')
with open(file_path + '.mtd', 'w') as file:
file.write('{\n\t"data_type": "matrix",\n\t"value_type": "double",\n')
file.write(get_pretty_str('rows', w.shape[0]))
file.write(get_pretty_str('cols', w.shape[1]))
file.write(get_pretty_str('nnz', np.count_nonzero(w)))
file.write(
'\t"format": "csv",\n\t"description": {\n\t\t"author": "SystemML"\n\t}\n}\n')
def convert_caffemodel(sc, deploy_file, caffemodel_file,
output_dir, format="binary", is_caffe_installed=False):
"""
Saves the weights and bias in the caffemodel file to output_dir in the specified format.
This method does not requires caffe to be installed.
Parameters
----------
sc: SparkContext
SparkContext
deploy_file: string
Path to the input network file
caffemodel_file: string
Path to the input caffemodel file
output_dir: string
Path to the output directory
format: string
Format of the weights and bias (can be binary, csv or text)
is_caffe_installed: bool
True if caffe is installed
"""
if is_caffe_installed:
if format != 'csv':
raise ValueError(
'The format ' +
str(format) +
' is not supported when caffe is installed. Hint: Please specify format=csv')
import caffe
net = caffe.Net(deploy_file, caffemodel_file, caffe.TEST)
for layerName in net.params.keys():
num_parameters = len(net.params[layerName])
if num_parameters == 0:
continue
elif num_parameters == 2:
# Weights and Biases
layerType = net.layers[list(
net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(
net.params[layerName][0].data,
os.path.join(
output_dir,
layerName +
'_weight.mtx'),
shouldTranspose)
save_tensor_csv(
net.params[layerName][1].data,
os.path.join(
output_dir,
layerName +
'_bias.mtx'),
shouldTranspose)
elif num_parameters == 1:
# Only Weight
layerType = net.layers[list(
net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(
net.params[layerName][0].data,
os.path.join(
output_dir,
layerName +
'_weight.mtx'),
shouldTranspose)
else:
raise ValueError(
'Unsupported number of parameters:' +
str(num_parameters))
else:
createJavaObject(sc, 'dummy')
utilObj = sc._jvm.org.apache.sysml.api.dl.Utils()
utilObj.saveCaffeModelFile(
sc._jsc,
deploy_file,
caffemodel_file,
output_dir,
format)
def convert_lmdb_to_jpeg(lmdb_img_file, output_dir):
"""
Saves the images in the lmdb file as jpeg in the output_dir. This method requires caffe to be installed along with lmdb and cv2 package.
To install cv2 package, do `pip install opencv-python`.
Parameters
----------
lmdb_img_file: string
Path to the input lmdb file
output_dir: string
Output directory for images (local filesystem)
"""
import lmdb
import caffe
import cv2
lmdb_cursor = lmdb.open(lmdb_file, readonly=True).begin().cursor()
datum = caffe.proto.caffe_pb2.Datum()
i = 1
for _, value in lmdb_cursor:
datum.ParseFromString(value)
data = caffe.io.datum_to_array(datum)
output_file_path = os.path.join(output_dir, 'file_' + str(i) + '.jpg')
image = np.transpose(data, (1, 2, 0)) # CxHxW to HxWxC in cv2
cv2.imwrite(output_file_path, image)
i = i + 1
def convertToLabeledDF(sparkSession, X, y=None):
from pyspark.ml.feature import VectorAssembler
if y is not None:
pd1 = pd.DataFrame(X)
pd2 = pd.DataFrame(y, columns=['label'])
pdf = pd.concat([pd1, pd2], axis=1)
inputColumns = ['C' + str(i) for i in pd1.columns]
outputColumns = inputColumns + ['label']
else:
pdf = pd.DataFrame(X)
inputColumns = ['C' + str(i) for i in pdf.columns]
outputColumns = inputColumns
assembler = VectorAssembler(inputCols=inputColumns, outputCol='features')
out = assembler.transform(sparkSession.createDataFrame(pdf, outputColumns))
if y is not None:
return out.select('features', 'label')
else:
return out.select('features')
def _convertSPMatrixToMB(sc, src):
src = coo_matrix(src, dtype=np.float64)
numRows = src.shape[0]
numCols = src.shape[1]
data = src.data
row = src.row.astype(np.int32)
col = src.col.astype(np.int32)
nnz = len(src.col)
buf1 = bytearray(data.tostring())
buf2 = bytearray(row.tostring())
buf3 = bytearray(col.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertSciPyCOOToMB(
buf1, buf2, buf3, numRows, numCols, nnz)
def _convertDenseMatrixToMB(sc, src):
numCols = getNumCols(src)
numRows = src.shape[0]
src = np.asarray(src, dtype=np.float64) if not isinstance(src, np.ndarray) else src
# data_type: 0: int, 1: float and 2: double
if src.dtype is np.dtype(np.int32):
arr = src.ravel().astype(np.int32)
dataType = 0
elif src.dtype is np.dtype(np.float32):
arr = src.ravel().astype(np.float32)
dataType = 1
else:
arr = src.ravel().astype(np.float64)
dataType = 2
buf = bytearray(arr.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertPy4JArrayToMB(
buf, numRows, numCols, dataType)
def _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen):
rowIndex = int(i / numRowsPerBlock)
tmp = src[i:min(i + numRowsPerBlock, rlen), ]
mb = _convertSPMatrixToMB(
sc,
tmp) if isinstance(
src,
spmatrix) else _convertDenseMatrixToMB(
sc,
tmp)
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.copyRowBlocks(
mb, rowIndex, ret, numRowsPerBlock, rlen, clen)
return i
def convertToMatrixBlock(sc, src, maxSizeBlockInMB=128):
if not isinstance(sc, SparkContext):
raise TypeError('sc needs to be of type SparkContext')
if isinstance(src, spmatrix):
isSparse = True
else:
isSparse = False
src = np.asarray(src, dtype=np.float64) if not isinstance(src, np.ndarray) else src
if len(src.shape) != 2:
src_type = str(type(src).__name__)
raise TypeError('Expected 2-dimensional ' +
src_type +
', instead passed ' +
str(len(src.shape)) +
'-dimensional ' +
src_type)
worstCaseSizeInMB = (8*(src.getnnz()*3 if isSparse else src.shape[0]*src.shape[1])) / 1000000
# Ignoring sparsity for computing numRowsPerBlock for now
numRowsPerBlock = int(
math.ceil((maxSizeBlockInMB * 1000000) / (src.shape[1] * 8)))
if worstCaseSizeInMB <= maxSizeBlockInMB:
return _convertSPMatrixToMB(
sc, src) if isSparse else _convertDenseMatrixToMB(sc, src)
else:
# Since coo_matrix does not have range indexing
src = csr_matrix(src) if isSparse else src
rlen = int(src.shape[0])
clen = int(src.shape[1])
ret = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.allocateDenseOrSparse(
rlen, clen, isSparse)
[_copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen)
for i in range(0, src.shape[0], numRowsPerBlock)]
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.postProcessAfterCopying(
ret)
return ret
def convertToNumPyArr(sc, mb):
if isinstance(sc, SparkContext):
numRows = mb.getNumRows()
numCols = mb.getNumColumns()
createJavaObject(sc, 'dummy')
buf = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertMBtoPy4JDenseArr(
mb)
return np.frombuffer(buf, count=numRows * numCols,
dtype=np.float64).reshape((numRows, numCols))
else:
# TODO: We can generalize this by creating py4j gateway ourselves
raise TypeError('sc needs to be of type SparkContext')
# Returns the mean of a model if defined otherwise None
def getDatasetMean(dataset_name):
"""
Parameters
----------
dataset_name: Name of the dataset used to train model. This name is artificial name based on dataset used to train the model.
Returns
-------
mean: Mean value of model if its defined in the list DATASET_MEAN else None.
"""
try:
mean = DATASET_MEAN[dataset_name.upper()]
except BaseException:
mean = None
return mean
# Example usage: convertImageToNumPyArr(im, img_shape=(3, 224, 224), add_rotated_images=True, add_mirrored_images=True)
# The above call returns a numpy array of shape (6, 50176) in NCHW format
def convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mirrored_images=False,
color_mode='RGB', mean=None):
# Input Parameters
# color_mode: In case of VGG models which expect image data in BGR format instead of RGB for other most models,
# color_mode parameter is used to process image data in BGR format.
# mean: mean value is used to subtract from input data from every pixel
# value. By default value is None, so mean value not subtracted.
if img_shape is not None:
num_channels = img_shape[0]
size = (img_shape[1], img_shape[2])
else:
num_channels = 1 if im.mode == 'L' else 3
size = None
if num_channels != 1 and num_channels != 3:
raise ValueError('Expected the number of channels to be either 1 or 3')
from PIL import Image
if size is not None:
im = im.resize(size, Image.LANCZOS)
expected_mode = 'L' if num_channels == 1 else 'RGB'
if expected_mode is not im.mode:
im = im.convert(expected_mode)
def _im2NumPy(im):
if expected_mode == 'L':
return np.asarray(im.getdata()).reshape((1, -1))
else:
im = (np.array(im).astype(np.float))
# (H,W,C) -> (C,H,W)
im = im.transpose(2, 0, 1)
# RGB -> BGR
if color_mode == 'BGR':
im = im[..., ::-1]
# Subtract Mean
if mean is not None:
for c in range(3):
im[:, :, c] = im[:, :, c] - mean[c]
# (C,H,W) --> (1, C*H*W)
return im.reshape((1, -1))
ret = _im2NumPy(im)
if add_rotated_images:
ret = np.vstack(
(ret, _im2NumPy(
im.rotate(90)), _im2NumPy(
im.rotate(180)), _im2NumPy(
im.rotate(270))))
if add_mirrored_images:
ret = np.vstack(
(ret, _im2NumPy(
im.transpose(
Image.FLIP_LEFT_RIGHT)), _im2NumPy(
im.transpose(
Image.FLIP_TOP_BOTTOM))))
return ret
def convertToPandasDF(X):
if not isinstance(X, pd.DataFrame):
return pd.DataFrame(X, columns=['C' + str(i)
for i in range(getNumCols(X))])
return X
| apache-2.0 |
RachitKansal/scikit-learn | sklearn/svm/tests/test_sparse.py | 70 | 12992 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
michaelpacer/networkx | examples/graph/atlas.py | 54 | 2609 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas=graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U=nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree=[n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U=nx.disjoint_union(U,G)
# list of graphs of all connected components
C=nx.connected_component_subgraphs(U)
UU=nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist=[] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G,nlist):
nlist.append(G)
UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1,G2):
return True
return False
if __name__ == '__main__':
import networkx as nx
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
print(nx.number_connected_components(G),"connected components")
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1,figsize=(8,8))
# layout graphs with positions using graphviz neato
pos=nx.graphviz_layout(G,prog="neato")
# color nodes the same in each connected subgraph
C=nx.connected_component_subgraphs(G)
for g in C:
c=[random.random()]*nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png",dpi=75)
| bsd-3-clause |
SwissTPH/TBRU_serialTB | scripts/variant_processing/serial_functions.py | 1 | 62017 | #!/usr/bin/env python
#Custom analysis functions for processing data from Serial Isolates of TB patients
#NB written for Python 3.5+
################
# DEPENDENCIES #
################
import numpy as np
import sklearn.utils as sku
import scipy.stats as ss
from collections import Counter
import random
#############
# FUNCTIONS #
#############
def populate_poly(data_dict, patient='Patient02'):
"""
Extract SNP frequencies from raw data.
INPUTS:
-------
patient: str, dict key for data_dict
data_dict: dict, {PatientID: [['timepoint', (locus, frequency)]...]}
OUTPUT:
-------
nd.array, first column are loci, sequenital columns are frequencies
"""
_polys = np.zeros((2,8)) #Need to start with a 2x8 array to be able to iterate correctly.
_timepoints = ['00','02','04','06','08','16','24']
_values = data_dict[patient][1:]
for _v in _values:
for (_locus,_freq) in _v[1:]:
_ind = _timepoints.index(_v[0])
if float(_locus) in _polys[:,0]:
_mapper = list(_polys[:,0])
_bob = _mapper.index(float(_locus))
_polys[_bob,_ind+1]+=float(_freq)
if float(_locus) not in _polys[:,0]:
_new = np.zeros(8)
_new[0]+=int(_locus)
_new[_ind+1]+=float(_freq)
_polys = np.vstack((_polys, _new))
_polys = _polys[2:] #Remove first two rows.
return _polys
def polymorphic_sites_plot(data, cutoff=0.005, colour='red', return_data=False, savefig=False):
"""Plot of v-SNP counts.
INPUT:
------
data: 2D-array generated from populate_poly()
cutoff: float, minimal frequency required for counting
colour: color of line, can take any value accepted by matplotlib.
return_data: Boolean, return the plotted table
savefig: give string with filename to generate if you want to save the figure.
"""
_timepoints = np.array([0,2,4,6,8,16,24])
_polymorphic_sites = np.array([len(np.where(data[:,x]>cutoff)[0]) for x in np.arange(1,8)])
#only plot (consider) timepoints for which we have data.
_to_consider = np.nonzero(np.array([len(np.where(data[:,x]>0)[0]) for x in np.arange(1,8)]))
_x, _y = _timepoints[_to_consider], _polymorphic_sites[_to_consider]
c_label = 'Cutoff = %.1f' %(cutoff*100)+'%'
plt.plot(_x, _y, color=colour, label=c_label,lw=1.5)
if savefig!=False:
plt.savefig(savefig)
if return_data!=False:
return _x, _y
def SFS(data, title='Patient__', plots=[0,2,4,6], grid=(2,2)):
_timepoints = [0,2,4,6,8,16,24]
for _sn,_pl in enumerate(plots):
_i = _timepoints.index(_pl)
if grid!=False:
plt.subplot(grid[0],grid[1],_sn+1)
plt.title('Week %s' %_timepoints[_i])
_to_plot = data[:,_i+1][data[:,_i+1]>0]
plt.hist(_to_plot,bins=np.arange(0,1.01,0.01), label='Week %s' %_timepoints[_i], alpha=0.5)
plt.xlim(0,1)
def SNP_fate(data, cutoff=0.005, method='imshow'):
_a = np.zeros(np.shape(data[:,1:]))
_a[data[:,1:]>cutoff]+=1
_relevant = np.where(_a.sum(axis=1)>1)
_labs = data[:,0][_relevant]
_tm = np.array([0,2,4,6,8,16,24])
if method=='imshow':
plt.imshow(_a[_relevant], interpolation='none',cmap='Greys')
plt.yticks(np.arange(1,len(_relevant)),_labs)
if method=='plot':
if len(_relevant)>0:
for _ind,_ys in enumerate(data[:,1:][_relevant]):
plt.plot(_tm[_a.sum(axis=0)>0], _ys[_a.sum(axis=0)>0], label='SNP at %i' %data[:,0][_relevant][_ind],lw=1.5)
def SNP_fate_NSI(data, annotation_dict, cutoff=0.005, method='imshow'):
_a = np.zeros(np.shape(data[:,1:]))
_a[data[:,1:]>cutoff]+=1
_relevant = np.where(_a.sum(axis=1)>1)
_labs = data[:,0][_relevant]
_tm = np.array([0,2,4,6,8,16,24])
if method=='imshow':
plt.imshow(_a[_relevant], interpolation='none',cmap='Greys')
plt.yticks(np.arange(1,len(_relevant)),_labs)
if method=='plot':
if len(_relevant)>0:
for _ind,_ys in enumerate(data[:,1:][_relevant]):
_mutation_type_color = {'synonymous':'grey', 'nonsynonymous':'red','IGR':'dodgerblue'}
_mutation_type = _mutation_type_color.get(annotation_dict[str(int(data[:,0][_relevant][_ind]))][0][1],'yellow')
plt.plot(_tm[_a.sum(axis=0)>0], _ys[_a.sum(axis=0)>0],color=_mutation_type, label='SNP at %i' %data[:,0][_relevant][_ind],lw=1.5,alpha=0.8)
def SNP_fate_DR(data, annotation_dict, cutoff=0.005, method='imshow'):
_a = np.zeros(np.shape(data[:,1:]))
_a[data[:,1:]>cutoff]+=1
_relevant = np.where(_a.sum(axis=1)>1)
_labs = data[:,0][_relevant]
_tm = np.array([0,2,4,6,8,16,24])
if method=='imshow':
plt.imshow(_a[_relevant], interpolation='none',cmap='Greys')
plt.yticks(np.arange(1,len(_relevant)),_labs)
if method=='plot':
if len(_relevant)>0:
for _ind,_ys in enumerate(data[:,1:][_relevant]):
_mutation_type_color = ['grey','red']
_mutation_type = _mutation_type_color[int(annotation_dict[str(int(data[:,0][_relevant][_ind]))][0][0] in DR_set)]
_mutation_type_style = {'nonsynonymous':'solid', 'synonymous':'dashed','IGR':'dotted'}
_mutation_style = _mutation_type_style.get(annotation_dict[str(int(data[:,0][_relevant][_ind]))][0][1],'dashdot')
plt.plot(_tm[_a.sum(axis=0)>0], _ys[_a.sum(axis=0)>0],color=_mutation_type, linestyle=_mutation_style,label='SNP at %i' %data[:,0][_relevant][_ind],lw=1.5,alpha=0.6)
def confidence_interval(data, CI=0.95, normal=False):
"""Calculate the confidence interval for a dataset
which is distribution blind.
Can be used in the case of a normal distribution expectation.
INPUT:
------
data: 1D np.array
CI: float < 1.0, confidence interval of interest
expects 0.95 for 95% CI, 0.9 for 90% CI...
normal: bool, determine CI based on normal distribution.
OUTPUT:
-------
lower_CI, upper_CI: float
NOTES:
------
Designed to be used in conjuncion with bootstrapping
Adapted from:
http://stackoverflow.com/questions/19124239/scikit-learn-roc-curve-with-confidence-intervals
http://adventuresinpython.blogspot.ch/2012/12/confidence-intervals-in-python.html
"""
_CI = (1.+CI)/2.
_lower_CI = sorted(data)[int((1.-_CI)*len(data))]
_upper_CI = sorted(data)[int((_CI)*len(data))]
_R = (_lower_CI, _upper_CI)
if normal!=False:
_n, _min_max, _mean, _var, _skew, _kurt = ss.describe(data)
_std=np.sqrt(_var)
_R = ss.norm.interval(1-_CI,loc=_mean,scale=_std)
return _R
def simpson_index(data):
"""Calculate Simpson's index from data.
In ecology this index corresponds to the
probabilty that two individuals drawn at
random from a population belong to the
same group.
Index takes a value between 0 and 1. The
greater the value, the more homogeneous a
sample is.
1-SI = Simpson's index of diversity (SID)
Probability of two individuals belonging
to different groups.
1/SI = Simpson's reciprocal index
Effective number of groups in a population.
This parameters takes a value between 1 and
K where K is the actual number of groups in
the sample.
INPUT:
------
data: 1d array (sums to 1.)
OUTPUT:
-------
Simpason's index: float
NOTES:
------
Based on http://en.wikipedia.org/wiki/Diversity_index and
http://www.countrysideinfo.co.uk/simpsons.htm.
"""
return np.sum(data**2)
def euclidean_distance(data1, data2):
"""Calculate the euclidian distance between two points.
INPUTS:
-------
data1, data2: 1D-array of equal length
OUTPUT:
-------
float
NOTES:
------
Based on http://en.wikipedia.org/wiki/Euclidean_distance
"""
return np.sqrt(np.sum((np.array(data1)-np.array(data2))**2))
def heterozygosity(het_calls, genome_size):
"""Calcualtes the heterozygosity of a sample from
the relative frequencies of the heterozygous alleles
in the population.
In this iteration it assumes two alleles per position.
So Fa = frequency of allele 1, while 1-Fa is frequency
of allele 2.
INPUTS:
-------
het_calls: 1-D array, values range 0-1.
genome_size: float, size of genome in question.
OUTPUT:
-------
float
NOTES:
------
Based on the method for determining heterozygosity in
Cuevas et al, MBE 2015
http://www.ncbi.nlm.nih.gov/pubmed/25660377
"""
_N = len(het_calls)
_het1 = het_calls
_het2 = np.ones(_N)-het_calls
_hets = np.sum(np.ones(_N)-(_het1**2+_het2**2))
return _hets/float(genome_size)
def codon_counter(annotation_file):
"""Calcuates codon frequencies in
an annotated genome.
Assumes a file where:
>gene_annotation
TTGACCGATGACCCCGGTTC...
the coding sequence is given in a
single line.
ARGUMENT:
---------
Text file formatted as described above
OUTPUT:
-------
codon_freq: codon counts, dict
total_codons: absolute codon count, float
total_features: absolute feature count, float
"""
_codon_dict = {}
_codon_clicker = 0.
_feature_clicker = 0.
for _line in open(annotation_file):
if _line[0]!='>':
_feature_clicker+=1.
_sequence = _line.strip()
for i in range(0,len(_sequence),3):
_codon = _sequence[i:i+3]
_codon_clicker+=1.
if _codon in _codon_dict:
_codon_dict[_codon]+=1
if _codon not in _codon_dict:
_codon_dict[_codon]=1
return _codon_dict, _codon_clicker, _feature_clicker
def generate_codon_pool(codon_dict, N):
"""Generate a weighted pool of codons to
be used for codon resampling from a fake
genome-defined average gene.
ARGUMENTS:
----------
codon_dict: dictionary of codon counts in
a genome from codon_counter()
N: number of genes in the genome.
total_features from codon_counter()
OUTPUT:
-------
codon_pool: list
NOTES:
------
Assumes numpy is imported
"""
_codon_pool = []
for _key in sorted(codon_dict.keys()):
weight = int(np.ceil(codon_dict[_key]/N))
_codon_pool+=[_key,]*weight
return _codon_pool
def codon_mutate(codon):
"""Mutates codon at one position.
Checks whether the mutations was
synonymous or nonsynonymous.
ARGUMENTS:
----------
codon: str (3 bases)
OUTPUT:
-------
0,1 for S or NS respectively
"""
codon_table = {'AAA': 'K', 'AAC': 'N', 'AAG': 'K', 'AAU': 'N', 'ACA': 'T',
'ACC': 'T', 'ACG': 'T', 'ACU': 'T', 'AGA': 'R', 'AGC': 'S', 'AGG': 'R',
'AGU': 'S', 'AUA': 'I', 'AUC': 'I', 'AUG': 'M', 'AUU': 'I', 'CAA': 'Q',
'CAC': 'H', 'CAG': 'Q', 'CAU': 'H', 'CCA': 'P', 'CCC': 'P', 'CCG': 'P',
'CCU': 'P', 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGU': 'R', 'CUA': 'L',
'CUC': 'L', 'CUG': 'L', 'CUU': 'L', 'GAA': 'E', 'GAC': 'D', 'GAG': 'E',
'GAU': 'D', 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCU': 'A', 'GGA': 'G',
'GGC': 'G', 'GGG': 'G', 'GGU': 'G', 'GUA': 'V', 'GUC': 'V', 'GUG': 'V',
'GUU': 'V', 'UAA': 'STOP', 'UAC': 'Y', 'UAG': 'STOP', 'UAU': 'Y',
'UCA': 'S', 'UCC': 'S', 'UCG': 'S', 'UCU': 'S', 'UGA': 'STOP', 'UGC': 'C',
'UGG': 'W', 'UGU': 'C', 'UUA': 'L', 'UUC': 'F', 'UUG': 'L', 'UUU': 'F'}
#Format the sequnce.
codon = codon.upper()
if 'T' in codon:
codon = codon.replace('T', 'U')
_bases = ['A', 'C', 'G', 'U'] #all bases
_mut_pos = np.random.randint(3) #pick codon position to mutate
_bases.remove(codon[_mut_pos]) #remove the ancestral base
_mut_base = _bases[np.random.randint(3)] #pick random base
_alt_codon = codon[:_mut_pos]+_mut_base+codon[_mut_pos+1:] #mutate codon
if codon_table[codon]==codon_table[_alt_codon]: return 0
if codon_table[codon]!=codon_table[_alt_codon]: return 1
def codon_mutate_TiTv_GC(codon, gc_content=0.5, ti=0.5):
"""Mutates codon at one position.
Checks whether the mutations was
synonymous or nonsynonymous.
ARGUMENTS:
----------
codon: str (3 bases)
gc_content: float, if specified will be used for
for calculating the substitution_matrix.
otherwise will assume 0.5.
ti: float, transition probability
OUTPUT:
-------
0,1 for S or NS respectively
NOTES:
------
Substitution matrix calculated based on
Jukes-Cantor 1969 in the basic (default) case.
Changing 'ti' modifies the approach according to
Kimura 1980. Changing 'gc_content' modifies it based
on Felsenstein 1981. Changing both 'ti' and 'gc_content'
approximates Hasegawa, Kishino and Yano 1985.
"""
codon_table = {'AAA': 'K', 'AAC': 'N', 'AAG': 'K', 'AAU': 'N', 'ACA': 'T',
'ACC': 'T', 'ACG': 'T', 'ACU': 'T', 'AGA': 'R', 'AGC': 'S', 'AGG': 'R',
'AGU': 'S', 'AUA': 'I', 'AUC': 'I', 'AUG': 'M', 'AUU': 'I', 'CAA': 'Q',
'CAC': 'H', 'CAG': 'Q', 'CAU': 'H', 'CCA': 'P', 'CCC': 'P', 'CCG': 'P',
'CCU': 'P', 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGU': 'R', 'CUA': 'L',
'CUC': 'L', 'CUG': 'L', 'CUU': 'L', 'GAA': 'E', 'GAC': 'D', 'GAG': 'E',
'GAU': 'D', 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCU': 'A', 'GGA': 'G',
'GGC': 'G', 'GGG': 'G', 'GGU': 'G', 'GUA': 'V', 'GUC': 'V', 'GUG': 'V',
'GUU': 'V', 'UAA': 'STOP', 'UAC': 'Y', 'UAG': 'STOP', 'UAU': 'Y',
'UCA': 'S', 'UCC': 'S', 'UCG': 'S', 'UCU': 'S', 'UGA': 'STOP', 'UGC': 'C',
'UGG': 'W', 'UGU': 'C', 'UUA': 'L', 'UUC': 'F', 'UUG': 'L', 'UUU': 'F'}
#Format the sequnce.
codon = codon.upper()
if 'T' in codon:
codon = codon.replace('T', 'U')
_pa = _pt = (1-gc_content)/2. #genomewide probability of base being A/T
_pg = _pc = gc_content/2. #genomewide probability of base being C/G
_a = ti #null expectation for transition
_b = 1-ti #null expectation for transversion
_sub_matrix = {
'A':[_b*_pc/((2*_b+_a)*_pc), _a*_pg/((2*_b+_a)*_pg), _b*_pt/((2*_b+_a)*_pt)],
'C':[_b*_pa/((2*_b+_a)*_pa), _b*_pg/((2*_b+_a)*_pg), _a*_pt/((2*_b+_a)*_pt)],
'G':[_a*_pa/((2*_b+_a)*_pa), _b*_pc/((2*_b+_a)*_pc), _b*_pt/((2*_b+_a)*_pt)],
'U':[_b*_pa/((2*_b+_a)*_pa), _a*_pc/((2*_b+_a)*_pc), _b*_pg/((2*_b+_a)*_pg)]
}
_bases = ['A', 'C', 'G', 'U'] #all bases
_mut_pos = np.random.randint(3) #pick codon position to mutate
_sub_params = _sub_matrix[codon[_mut_pos]] #Get substitution probabilities
_bases.remove(codon[_mut_pos]) #remove the ancestral base
_adjusted_bases = []
for _ind,_val in enumerate(_bases):
_expansion = [_val,]*int(_sub_params[_ind]*100)
_adjusted_bases += _expansion
_mut_base = _adjusted_bases[np.random.randint(len(_adjusted_bases))] #pick random base
_alt_codon = codon[:_mut_pos]+_mut_base+codon[_mut_pos+1:] #mutate codon
if codon_table[codon]==codon_table[_alt_codon]: return 0
if codon_table[codon]!=codon_table[_alt_codon]: return 1
def calculate_NS(sequence):
"""Calculates the synonymous/nonsynonymous score
for a sequence.
It takes the sequence, generates all codons and
calculates all that are 1 mutation away. Then it
scores the resulting population for mutational
potential.
Used for simplified dN/dS calculations.
ARGUMENT:
---------
sequence : any nucleotide sequence, at the core should
be a codon.
OUTPUT:
-------
[(N,S)]
N : nonsynonymous score per codon
S : synonymous score per codon
Note, the function will return a list of tuples if multiple codons are present
"""
codon_table = {'AAA': 'K', 'AAC': 'N', 'AAG': 'K', 'AAT': 'N', 'ACA': 'T',
'ACC': 'T', 'ACG': 'T', 'ACT': 'T', 'AGA': 'R', 'AGC': 'S', 'AGG': 'R',
'AGT': 'S', 'ATA': 'I', 'ATC': 'I', 'ATG': 'M', 'ATT': 'I', 'CAA': 'Q',
'CAC': 'H', 'CAG': 'Q', 'CAT': 'H', 'CCA': 'P', 'CCC': 'P', 'CCG': 'P',
'CCT': 'P', 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R', 'CTA': 'L',
'CTC': 'L', 'CTG': 'L', 'CTT': 'L', 'GAA': 'E', 'GAC': 'D', 'GAG': 'E',
'GAT': 'D', 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A', 'GGA': 'G',
'GGC': 'G', 'GGG': 'G', 'GGT': 'G', 'GTA': 'V', 'GTC': 'V', 'GTG': 'V',
'GTT': 'V', 'TAA': 'STOP', 'TAC': 'Y', 'TAG': 'STOP', 'TAT': 'Y',
'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S', 'TGA': 'STOP', 'TGC': 'C',
'TGG': 'W', 'TGT': 'C', 'TTA': 'L', 'TTC': 'F', 'TTG': 'L', 'TTT': 'F'}
#Format the sequnce.
sequence = sequence.upper()
if 'T' in sequence:
sequence = sequence.replace('T', 'U')
bases = ['A', 'C', 'G', 'U'] #all bases
sequence_len = len(sequence)
recipient_list = []
for codon in [sequence[c:c+3] for c in np.arange(0,len(sequence),3)]: #codonise
original_aa = codon_table[codon] #expected codon
codon_N = 0.
codon_S = 0.
for ind,base in enumerate(codon):
codon_marked = codon[:ind]+'X'+codon[ind+1:] #Put 'X' in place of the base to be mutated, this is to avoid replacing all the bases of a certain type in a string...
alt_codons = [codon_marked.replace('X',b) for b in [x for x in bases if x not in base]] # nested list comprehension, the first gets all other bases, the second generates all codons one mutation apart at given position
syn = [codon_table[ac] for ac in alt_codons].count(original_aa)/3. #translate alt_codons and see how many match the original aa
nsyn = 1.-syn #how many dont match
codon_N+=nsyn
codon_S+=syn
recipient_list.append((codon_N, codon_S))
if len(recipient_list)==1:
return recipient_list[0][0], recipient_list[0][1]
if len(recipient_list)>1:
return recipient_list
def drift_simulation(codon_pool, N_codons, N=1000, method='binomial', pars=False):
"""Simulate a resamplng of a codon pool N times
each time fishing out N_codons. A binomial random
draw is used to "mutate" the codon to model ranom
mutations. The wildtype codons are concatenated
and the expected number of synonymous and non-
synonymous mutations is calculated using
calculate_NS().
ARGUMENTS:
----------
codon_pool: weighted list of codons done with
generate_codon_pool()
N_codons: number of codons to fish out
N: number of iterations
method ['derived'|'binomial']: codon mutation model
pars: return dN and dS separately - in case there are too
mutations to effectively calculate dN/dS
OUTPUT:
-------
dN/dS values, np.array
NOTES:
------
This function has many dependencies, ensure that
all the relevant functions are defined first.
The probabilites of N/S are defined by _pr_N
"""
#Define NS probabilities for binomial draws.
#This is based on the codon table.
_pr_N = {'AAA': 0.888888888888889, 'AAC': 0.888888888888889, 'AAG': 0.888888888888889, 'AAT': 0.888888888888889,
'ACA': 0.6666666666666666, 'ACC': 0.6666666666666666, 'ACG': 0.6666666666666666, 'ACT': 0.6666666666666666,
'AGA': 0.7777777777777778, 'AGC': 0.888888888888889, 'AGG': 0.7777777777777778, 'AGT': 0.888888888888889,
'ATA': 0.7777777777777778, 'ATC': 0.7777777777777778, 'ATG': 1.0, 'ATT': 0.7777777777777778,
'CAA': 0.888888888888889, 'CAC': 0.888888888888889, 'CAG': 0.888888888888889, 'CAT': 0.888888888888889,
'CCA': 0.6666666666666666, 'CCC': 0.6666666666666666, 'CCG': 0.6666666666666666, 'CCT': 0.6666666666666666,
'CGA': 0.5555555555555556, 'CGC': 0.6666666666666666, 'CGG': 0.5555555555555556, 'CGT': 0.6666666666666666,
'CTA': 0.5555555555555556, 'CTC': 0.6666666666666666, 'CTG': 0.5555555555555556, 'CTT': 0.6666666666666666,
'GAA': 0.888888888888889, 'GAC': 0.888888888888889, 'GAG': 0.888888888888889, 'GAT': 0.888888888888889,
'GCA': 0.6666666666666666, 'GCC': 0.6666666666666666, 'GCG': 0.6666666666666666, 'GCT': 0.6666666666666666,
'GGA': 0.6666666666666666,'GGC': 0.6666666666666666, 'GGG': 0.6666666666666666, 'GGT': 0.6666666666666666,
'GTA': 0.6666666666666666, 'GTC': 0.6666666666666666, 'GTG': 0.6666666666666666, 'GTT': 0.6666666666666666,
'TAA': 0.7777777777777778, 'TAC': 0.888888888888889, 'TAG': 0.888888888888889, 'TAT': 0.888888888888889,
'TCA': 0.6666666666666666, 'TCC': 0.6666666666666666, 'TCG': 0.6666666666666666, 'TCT': 0.6666666666666666,
'TGA': 0.888888888888889, 'TGC': 0.888888888888889, 'TGG': 1.0, 'TGT': 0.888888888888889,
'TTA': 0.7777777777777778, 'TTC': 0.888888888888889, 'TTG': 0.7777777777777778, 'TTT': 0.888888888888889}
#define recipient variables
_dNdS = []
_dNdS_par = []
for _trial in range(N):#loop over number of trials
_Ns = 0. #recipient for Nonsynonymous mutations
_CONCATENATED_SEQUENCE = '' #recipient for concatenated sequence
for _fish in np.random.randint(0,len(codon_pool),N_codons): #select random codon weighted by genomewide codon usage
_fished_codon = codon_pool[_fish] #define the fished codon
_CONCATENATED_SEQUENCE+=_fished_codon #add fished codon to the concatenated sequence
if method=='derived': _Ns+=codon_mutate(_fished_codon) #use the codon_mutate() to model mutation
if method=='binomial': _Ns+=np.random.binomial(1, _pr_N[_fished_codon]) #use a binomial random trial to determine if the mutation is Ns
_eNS = calculate_NS(_CONCATENATED_SEQUENCE)
_eN = 0.
_eS = 0.
for (_a,_b) in _eNS:
_eN+=_a
_eS+=_b
_dNdS.append((_Ns/_eN)/((N_codons-_Ns)/_eS))
_dNdS_par.append((_Ns/_eN,(N_codons-_Ns)/_eS))
if pars: return _dNdS_par
else: return _dNdS
def drift_null_Mtb(sequence, N=1000, method='binomial', pars=False, gc=0.656, ti=0.729):
"""Generate an estimate of the null expectation for
the pNS of a given Mtb sequence.
ARGUMENTS:
----------
sequence: str, concatenated codons of interest
N: number of iterations
method ['derived'|'binomial']: codon mutation model
pars: return dN and dS separately - in case there are too
mutations to effectively calculate dN/dS
gc: float, gc content of the genome
ti: float, estimated probability of transitions.
OUTPUT:
-------
expected pNS value under drift, np.array
NOTES:
------
This function has many dependencies, ensure that
all the relevant functions are defined first.
The probabilites of N/S are defined by _pr_N
"""
#Define NS probabilities for binomial draws.
#This is based on the codon table.
_pr_N = {'ACC': 0.66410000000000013, 'ATG': 1.0, 'AAG': 0.80871999999999999,
'AAA': 0.80523999999999984, 'ATC': 0.73629999999999984, 'AAC': 0.80698000000000003,
'ATA': 0.85933999999999988, 'AGG': 0.73813999999999991, 'CCT': 0.66512000000000004,
'CTC': 0.66899999999999993, 'AGC': 0.81028000000000011, 'ACA': 0.66819999999999991,
'AGA': 0.7396600000000001, 'CAT': 0.80928000000000011, 'AAT': 0.80779999999999996,
'ATT': 0.73552000000000017, 'CTG': 0.47383999999999993, 'CTA': 0.47404000000000002,
'ACT': 0.66971999999999998, 'CAC': 0.80871999999999999, 'ACG': 0.66917999999999989,
'CAA': 0.80668000000000006, 'AGT': 0.80940000000000001, 'CAG': 0.80868000000000007,
'CCG': 0.6644199999999999, 'CCC': 0.6670799999999999, 'TAT': 0.81141999999999992,
'GGT': 0.66510000000000002, 'TGT': 0.80779999999999996, 'CGA': 0.59169999999999989,
'CCA': 0.66690000000000016, 'CGC': 0.66578000000000004, 'GAT': 0.8096399999999998,
'CGG': 0.59478000000000009, 'CTT': 0.66659999999999997, 'TGC': 0.80824000000000007,
'GGG': 0.6641800000000001, 'TAG': 0.80645999999999995, 'GGA': 0.66880000000000006,
'TAA': 0.61687999999999998, 'GGC': 0.66660000000000008, 'TAC': 0.80803999999999998,
'GAG': 0.81069999999999998, 'TCG': 0.66408, 'TTA': 0.61682000000000003,
'GAC': 0.80948000000000009, 'CGT': 0.6698400000000001, 'TTT': 0.80830000000000002,
'TCA': 0.66878000000000004, 'GCA': 0.66555999999999993, 'GTA': 0.66721999999999992,
'GCC': 0.6660600000000001, 'GTC': 0.66648000000000007, 'GCG': 0.66559999999999997,
'GTG': 0.66546000000000005, 'TTC': 0.80668000000000006, 'GTT': 0.66598000000000002,
'GCT': 0.66961999999999999, 'TGA': 0.80687999999999993, 'TTG': 0.61943999999999999,
'TCC': 0.66835999999999995, 'TGG': 1.0, 'GAA': 0.80825999999999998,
'TCT': 0.66670000000000007}
_N_codons = len(sequence)/3
#define recipient variables
_dNdS = []
_dNdS_par = []
for _trial in range(N):#loop over number of trials
_Ns = 0. #recipient for Nonsynonymous mutations
#range of codon steps along the given sequence
for _step in np.arange(0,len(sequence),3):
#define the fished codon
_fished_codon = sequence[_step:_step+3]
#use the codon_mutate() to model mutation
if method=='derived': _Ns+=codon_mutate_TiTv_GC(_fished_codon,
gc_content=gc,
ti=ti)
#use a binomial random trial to determine if the mutation is Ns
if method=='binomial': _Ns+=np.random.binomial(1, _pr_N[_fished_codon])
_eNS = calculate_NS(sequence)
_eN = 0.
_eS = 0.
for (_a,_b) in _eNS:
_eN+=_a
_eS+=_b
try:
_dNdS.append((_Ns/_eN)/((_N_codons-_Ns)/_eS))
_dNdS_par.append((_Ns/_eN,(_N_codons-_Ns)/_eS))
except:
pass
if pars: return _dNdS_par
else: return _dNdS
def get_dNdS(annotated_dataset, gene_list=None, het_list=None, pars=False):
"""Calculate dNdS for a the annotated dataset.
Same can be done to a subset of genes speicified
by 'gene_list'.
Assumes a specific data format - see below.
ARGUMENTS:
----------
annotated_dataset: annotation dict,
example of value[0] in dict:
['Rv3885c', 'synonymous', '39', 'Val/V-Val/V', 'GTC-GTT']
gene_list: list of genes of interest
het_list: list of heterozygous positions of interest
pars: return dN and dS separately - in case there are too
mutations to effectively calculate dN/dS
OUTPUT:
-------
dNdS: calculated dN/dS,float
N: number of codons used for the calculation, float
NOTES:
------
First version. The format in which the data is inputed will
probably change.
Requires the calculate_NS() to be defined.
"""
_concatenation = ''
_N = 0.
_S = 0.
_dict_of_interest = {}
if het_list!=None:
for _het in het_list:
_het = str(_het)
if _het in annotated_dataset:
_dict_of_interest[_het] = annotated_dataset[_het]
if het_list==None:
_dict_of_interest = annotated_dataset
if gene_list==None:
for _val in annotated_dataset.values():
_info = _val[0]
if _info[1]=='nonsynonymous':
_N+=1.
_codon = _info[4][:3]
_concatenation+=_codon
if _info[1]=='synonymous':
_S+=1.
_codon = _info[4][:3]
_concatenation+=_codon
if gene_list!=None:
for _val in _dict_of_interest.values():
_info = _val[0]
if _info[1]=='nonsynonymous' and _info[0] in gene_list:
_N+=1.
_codon = _info[4][:3]
_concatenation+=_codon
if _info[1]=='synonymous' and _info[0] in gene_list:
_S+=1.
_codon = _info[4][:3]
_concatenation+=_codon
_NS = calculate_NS(_concatenation)
_eN = 0.
_eS = 0.
for (_a,_b) in _NS:
_eN+=_a
_eS+=_b
if pars: return _N/_eN, _S/_eS, _N+_S
else: return ((_N/_eN)/(_S/_eS)), _N+_S
def pNS_CI(data, min_freq=0.005, N=1000, time=0, CI=95, codon_info='WT_CODON', parameters=False):
"""Calcualte pNS and their confidence intervals based on specific
parameters.
INPUTS:
-------
data: pd.DataFrame, like 'ALL'
min_freq: float, minimum frequency for the allele
N: int, number of bootstrapping iterations,
time: 0|2|4|6|8, timepoint for which to calcualte
CI: int, confidence interval
codon_info: column label containing codon data
parameters: bool, return pN and pS instead of pNS
OUTPUTS:
--------
(pNS,CI_L, CI_H)
NOTES:
------
Depends on pandas as pd, scikits-learn utilities as sku,
numpy as np, calculate_NS()
"""
#define indices for empirical CI
_CI_IND_L, _CI_IND_H = int((100-CI)/200.*N-1),int(-(100-CI)/200.*N)
#define recipient variables
_eNeS_results = []
_oNoS_results = []
#populate recipient variables based on parts above
for trial in range(N):
_data = sku.resample(data[(data['FREQUENCY']>=min_freq)&
(data['TIME']==time)])
_eNeS_results.append(np.sum([[x[0],x[1]] for x in calculate_NS(''.join(_data[codon_info]))],0))
_oNoS_results.append(np.sum(_data[['TYPE_NSY','TYPE_SYN']],axis=0))
#reformat the output
_eNeS_results = np.array(_eNeS_results,dtype=float)
_oNoS_results = np.array(_oNoS_results,dtype=float)
#calcualte the pN and pS as well as pNS
_pN_pS = _oNoS_results/_eNeS_results
_pNS = _pN_pS[:,0]/_pN_pS[:,1]
#Get the specific indices for the measurements at CI edges
_CI_INDS = np.argsort(_pNS)[[_CI_IND_L, _CI_IND_H]]
if parameters:
return np.median(_pN_pS, axis=0), _pN_pS[_CI_INDS[0]], _pN_pS[_CI_INDS[1]]
else:
return np.median(_pNS, axis=0), _pNS[_CI_INDS[0]], _pNS[_CI_INDS[1]]
#MARKOV CHAINS
def transition_matrix_PA(patient_matrix, annotation_dict, steps='Auto', min_freq=0, matrix_print=False, exclude=None,mutation_type=None):
"""Count the different types of transitions
in our patient data and estimate transition
probabilites.
First convert the matrix to a binary matrix
reflecting present/absent. The scoring depends
on doubling the first column and subtracting
the second. Possible outcomes are:
0 - 0->0
1 - 1->1
2 - 1->0
-1 - 0->1
Incidentally the above is the order in which
counts will emerge from "Counter".
The transition probabilities are calculated as:
p(0->0) = #0/(#0+#-1)
p(1->1) = #1/(#1+#2)
p(1->0) = #2/(#0+#-1)
p(0->1) = #-1/(#1+#2)
This calculation is done on the collated counts.
INPUTS:
------
patient_matrix: np.array <- populate_poly()
annotation_dict: dict, {locus:['LOCUS_ID','synonymous/nonsynonymous'...]}
steps: list, transitions to consider.
steps[0] start transition, steps[1], number
of steps to take.
If 'Auto' it will take all the transitions
for which there are data.
min_freq: minimum frequency
matrix_print: bool, print the transition matrix
exclude: timepoints to exclude (e.g. missing data)
Note that this index should reflect the
postion within the patient_matrix.
mutation_type ('synonymous'|'nonsynonymous'|'IGR'):
only take mutations of a specific type
OUTPUT:
-------
np.array(p(0->0), p(1->1), p(1->0), p(0->1))
NOTES:
------
This function is based on:
http://www.stat.cmu.edu/~cshalizi/462/lectures/06/markov-mle.pdf
"""
#Note that the patient matrix should be a
#2D-np.array with locus at positon 1 and
#then allele frequency in the following cols.
#Start by transforming the array into a
#Boolean array
if exclude:
patient_matrix = np.delete(patient_matrix,exclude,1)
if steps=='Auto':
#Just in case we want all the data for a patient
#determine earliest timepoint with data
_start = min(np.arange(1,8)[patient_matrix[:,1:].sum(0)>0])
#determine the latest timepoint with data and calculate the # of steps
n_steps = max(np.arange(1,8)[patient_matrix[:,1:].sum(0)>0])-_start
#Turn steps into a list, where [0] is the start position and
#[1] is the number of steps.
steps = [_start,n_steps]
_yesno = np.array(patient_matrix>min_freq,dtype=int)[:,np.arange(steps[0],1+np.sum(steps))]
_result = _yesno[:,:-1]*2-_yesno[:,1:]
if mutation_type: #filter by mutation type
_ref = [float(k) for k,v in annotation_dict.items() if v[0][1]==mutation_type]
_mtf = [_ind for _ind,_locus in enumerate(patient_matrix[:,0]) if _locus in _ref]
_result = _yesno[_mtf,:-1]*2-_yesno[_mtf,1:]
#Define count variables
_c = np.array([Counter(_result.flatten()).get(x,0) for x in [0,1,2,-1]], dtype=float) #counts of 0->0, 1->1, 1->0, 0->1
_t = np.array(_c+[_c[i] for i in [3,2,1,0]]) #transition counts to 0 and 1.
if matrix_print:
print('Transition matrix\n\t0\t1\n0\t%.3f\t%.3f\n1\t%.3f\t%.3f\n' %tuple((_c/_t)[i] for i in [0,2,3,1]))
return _c/_t
def transition_matrix_PL(patient_matrix, annotation_dict, steps='Auto', min_freq=0, cutoff=0.01, matrix_print=False, exclude=None, mutation_type=None):
"""Count the different types of transitions
in our patient data and estimate transition
probabilites.
First convert the matrix to a matrix
reflecting absent(0)/present<=1%(1)/present>1%(2).
The scoring depends on tripling the first column
and subtracting the second.
Possible outcomes are:
0 - 0 -> 0
1 - 1 -> 1+
2 - 1 -> 1
3 - 1 -> 0
4 - 1+ -> 1+
5 - 1+ -> 1
6 - 1+ -> 0
-1 - 0 -> 1
-2 - 0 -> 1+
Incidentally the above is the order in which
counts will emerge from "Counter".
The transition probabilities are calculated as:
p(0->X) = #Xs/(#0+#-1+#-2)
p(1->Y) = #Ys/(#1+#2+#3)
p(1+->Z) = #Zs/(#4+#5+#6)
This calculation is done on the collated counts.
INPUTS:
------
patient_matrix: np.array <- populate_poly()
annotation_dict: dict, {locus:['LOCUS_ID','synonymous/nonsynonymous'...]}
steps: list, transitions to consider.
steps[0] start transition, steps[1], number
of steps to take.
If 'Auto' it will take all the transitions
for which there are data.
min_freq: minimum frequency
cutoff: float, cutoff for inclusion into 1+
matrix_print: bool, print the transition matrix
exclude: timepoints to exclude (e.g. missing data)
Note that this index should reflect the
postion within the patient_matrix.
mutation_type ('synonymous'|'nonsynonymous'|'IGR'):
only take mutations of a specific type
OUTPUT:
-------
np.array(p(0->0), p(1->1+), p(1->1), p(1->0),p(1+->1+), p(1+->1), p(1+->0), p(0->1+), p(0->1))
NOTES:
------
This script is based on:
http://www.stat.cmu.edu/~cshalizi/462/lectures/06/markov-mle.pdf
"""
#Note that the patient matrix should be a
#2D-np.array with locus at positon 1 and
#then allele frequency in the following cols.
#Start by transforming the array into a
#Boolean array
if exclude:
patient_matrix = np.delete(patient_matrix,exclude,1)
if steps=='Auto':
#Just in case we want all the data for a patient
n_steps = list(np.array(patient_matrix>0,dtype=int).sum(axis=0)).index(0)-2
steps = [1,n_steps] #Turn steps into a list, where [0] is the start position and [1] is the number of steps.
_yesno = np.array(patient_matrix>min_freq,dtype=int)[:,np.arange(steps[0],1+np.sum(steps))]+np.array(patient_matrix>cutoff,dtype=int)[:,np.arange(steps[0],1+np.sum(steps))]
_result = _yesno[:,:-1]*3-_yesno[:,1:]
if mutation_type: #filter by mutation type
_ref = [float(k) for k,v in annotation_dict.items() if v[0][1]==mutation_type]
_mtf = [_ind for _ind,_locus in enumerate(patient_matrix[:,0]) if _locus in _ref]
_result = _yesno[_mtf,:-1]*3-_yesno[_mtf,1:]
#Define count variables
_c = np.array([Counter(_result.flatten()).get(x,0) for x in [0,1,2,3,4,5,6,-2,-1]],dtype=float) #counts of 0->0, 1->1, 1->0, 0->1
_t = np.array(_c+[_c[i] for i in [7,2,3,1,5,6,4,8,0]] + [_c[i] for i in [8,3,1,2,6,4,5,0,7]]) #transition counts to 0 and 1.
if matrix_print:
print('Transition matrix\n\t0\t1\t2\n0\t%.3f\t%.3f\t%.3f\n1\t%.3f\t%.3f\t%.3f\n2\t%.3f\t%.3f\t%.3f\n' %tuple((_c/_t)[i] for i in [0,3,6,-1,2,5,-2,1,4]))
return _c/_t
def MC_PA_CI(patient_matrix, annotation_dict, min_freq=0, steps='Auto', exclude=None, mutation_type=None):
"""Merge transition data to re-sample and
allow the calculation of transition
probability CI.
INPUTS:
-------
patient_matrix: np.array
annotation_dict: dict, {locus:['LOCUS_ID','synonymous/nonsynonymous'...]}
min_freq: minimum frequency threshold
steps: list of steps to take into consideration
exclude: define columns to exclude
mutation_type ('synonymous'|'nonsynonymous'|'IGR'):
only take mutations of a specific type
OUTPUT:
-------
flattened np.array of simple outputs
"""
if exclude: #exclude columns for which we don't have data
patient_matrix = np.delete(patient_matrix,exclude,1)
if steps=='Auto': #Just in case we want all the data for a patient
#Just in case we want all the data for a patient
#determine earliest timepoint with data
_start = min(np.arange(1,8)[patient_matrix[:,1:].sum(0)>0])
#determine the latest timepoint with data and calculate the # of steps
n_steps = max(np.arange(1,8)[patient_matrix[:,1:].sum(0)>0])-_start
#Turn steps into a list, where [0] is the start position and
#[1] is the number of steps.
steps = [_start,n_steps]
_yesno = np.array(patient_matrix>min_freq,dtype=int)[:,np.arange(steps[0],1+np.sum(steps))]
_result = _yesno[:,:-1]*2-_yesno[:,1:]
if mutation_type: #filter by mutation type
_ref = [float(k) for k,v in annotation_dict.items() if v[0][1]==mutation_type]
_mtf = [_ind for _ind,_locus in enumerate(patient_matrix[:,0]) if _locus in _ref]
_result = _yesno[_mtf,:-1]*2-_yesno[_mtf,1:]
return _result.flatten()
def MC_PL_CI(patient_matrix, annotation_dict, steps='Auto', min_freq=0, cutoff=0.01, exclude=None, mutation_type=None):
"""Merge transition data to re-sample and
allow the calculation of transition
probability CI.
INPUTS:
-------
patient_matrix: np.array
annotation_dict: dict, {locus:['LOCUS_ID','synonymous/nonsynonymous'...]}
steps: list of steps to take into consideration
exclude: define columns to exclude
mutation_type ('synonymous'|'nonsynonymous'|'IGR'):
only take mutations of a specific type
min_freq: minimum allele frequency
OUTPUT:
-------
flattened np.array of simple outputs
"""
if exclude:
patient_matrix = np.delete(patient_matrix,exclude,1)
if steps=='Auto':
#Just in case we want all the data for a patient
n_steps = list(np.array(patient_matrix>0,dtype=int).sum(axis=0)).index(0)-2
steps = [1,n_steps] #Turn steps into a list, where [0] is the start position and [1] is the number of steps.
_yesno = np.array(patient_matrix>min_freq,dtype=int)[:,np.arange(steps[0],1+np.sum(steps))]+np.array(patient_matrix>cutoff,dtype=int)[:,np.arange(steps[0],1+np.sum(steps))]
_result = _yesno[:,:-1]*3-_yesno[:,1:]
if mutation_type: #filter by mutation type
_ref = [float(k) for k,v in annotation_dict.items() if v[0][1]==mutation_type]
_mtf = [_ind for _ind,_locus in enumerate(patient_matrix[:,0]) if _locus in _ref]
_result = _yesno[_mtf,:-1]*3-_yesno[_mtf,1:]
return _result.flatten()
#EXCESS MUTATION ACCUMULATION
def selection_contingency_generator(genes,dataframe,exclude_check=False):
"""Generates a contingency table using a given set of
genes. It will return two tables, one representing
the status at time 0 and one covering the rest.
INPUTS:
-------
genes: list, genes of interest
dataframe: requires a df that has many of the
same categories as ALL.
exclude_check: False|list, check that all
problematic genes are removed.
OUTPUTS:
--------
nd.array, nd.array
the first array is the contingency table at t=0
the second array is the contigency table at t!=0
NOTES:
------
Assumes the given data structure.
Return array: [[NS_in, S_in],[NS_not, S_not]]
EXAMPLE:
--------
ct0, ct1 = selection_contingency_generator(DR_set, ALL, exclude_check=excluded)
"""
_df = dataframe.copy()
if exclude_check:
genes = [x for x in genes if x not in exclude_check]
_df['INTEREST'] = [int(x in genes) for x in _df.GENE]
_a = len(np.unique(_df.LOCUS[(_df.TYPE=='NSY')&(_df.INTEREST==1)&(_df.TIME==0)]))
_a2 = len(np.unique(_df.LOCUS[(_df.TYPE=='NSY')&(_df.INTEREST==1)]))-_a
_b = len(np.unique(_df.LOCUS[(_df.TYPE=='NSY')&(_df.INTEREST==0)&(_df.TIME==0)]))
_b2 = len(np.unique(_df.LOCUS[(_df.TYPE=='NSY')&(_df.INTEREST==0)]))-_b
_c = len(np.unique(_df.LOCUS[(_df.TYPE=='SYN')&(_df.INTEREST==1)&(_df.TIME==0)]))
_c2 = len(np.unique(_df.LOCUS[(_df.TYPE=='SYN')&(_df.INTEREST==1)]))-_c
_d = len(np.unique(_df.LOCUS[(_df.TYPE=='SYN')&(_df.INTEREST==0)&(_df.TIME==0)]))
_d2 = len(np.unique(_df.LOCUS[(_df.TYPE=='SYN')&(_df.INTEREST==0)]))-_d
return np.array([[_a,_c],[_b,_d]]), np.array([[_a2,_c2],[_b2,_d2]])
def geneset_size(gene_list, locus_dict):
"""Gives you the cumulative size (in bp) of the specified
genes in the genome.
INPUTS:
-------
gene_list: list, genes of interest
locus_dict: dict, keys are genes in gene_list
value is gene length float.
OUTPUT:
-------
float, genomic size in bp.
NOTES:
------
This function only calculates the size of genes
that are present in the locus_dict, if a gene is
not present it will not contribute to the tally.
"""
total_size = 0.
for gene in gene_list:
total_size+=locus_dict.get(gene,0)
return total_size
def get_KEGG_pathway_genes(organism='mtu',pathway='mtu00010'):
"""Uses requests to query the KEGG REST API
and retrieve all the genes in a given pathway.
INPUTS:
-------
organism: str, KEGG organism code, mtu for H37Rv
pathway: str, e.g. mtu00100 is H37Rv glycolysis
OUTPUTS:
--------
list, genes in the pathway
NOTES:
------
Needs a connection and the requests module as rq
"""
_query = 'http://rest.kegg.jp/link/%s/%s' %(organism, pathway)
_fetch = rq.get(_query)
_entries = _fetch.content.strip().split('\n')
_genes = [x.split('\t')[-1].split(':')[-1] for x in _entries]
return _genes
def excess_mutation(genes,dataframe,genome_size,locus_dict,exclude_check=False,H37Rv=True):
"""Performs multiple tests to determine
whether or not the number of mutations in
a locus is higher than expected.
INPUTS:
-------
genes: list, genes of interest
dataframe: requires a df that has many of the
same categories as ALL.
genome_size: float, denominator
locus_dict: dict, keys are genes in gene_list
value is gene length float.
exclude_check: False|list, check that all
problematic genes are removed.
H37Rv: boolean, whether or not to expect
H37Rv identifiers.
OUTPUTS:
--------
dict of data and statistics
NOTES:
------
This approach explicity ignores mutations that fall
outside of coding regions for the sake of simplicity.
Also, the counts are nonredundant (i.e. each allele
is only counted once, even if it occurs at multiple
timepoints.
EXAMPLE:
--------
excess_mutation(DR_set, ALL, len(MTB_GENOME), SIZE_DICT, exclude_check=excluded)
"""
_df = dataframe.copy()
if H37Rv:
genome_size = geneset_size([x for x in locus_dict.keys() if len(x) in [6,7]], locus_dict)
if exclude_check:
genes = [x for x in genes if x not in exclude_check]
genome_size-=geneset_size(exclude_check, locus_dict)
_p_hit = geneset_size(genes, locus_dict)/genome_size
_df['INTEREST'] = [int(x in genes) for x in _df.GENE]
_a = len(np.unique(_df.LOCUS[(_df.SNP_TYPE=='NSY')&(_df.INTEREST==1)&(_df.TIME==0)]))
_a2 = len(np.unique(_df.LOCUS[(_df.SNP_TYPE=='NSY')&(_df.INTEREST==1)]))-_a
_b = len(np.unique(_df.LOCUS[(_df.SNP_TYPE=='NSY')&(_df.INTEREST==0)&(_df.TIME==0)]))
_b2 = len(np.unique(_df.LOCUS[(_df.SNP_TYPE=='NSY')&(_df.INTEREST==0)]))-_b
_c = len(np.unique(_df.LOCUS[(_df.SNP_TYPE=='SYN')&(_df.INTEREST==1)&(_df.TIME==0)]))
_c2 = len(np.unique(_df.LOCUS[(_df.SNP_TYPE=='SYN')&(_df.INTEREST==1)]))-_c
_d = len(np.unique(_df.LOCUS[(_df.SNP_TYPE=='SYN')&(_df.INTEREST==0)&(_df.TIME==0)]))
_d2 = len(np.unique(_df.LOCUS[(_df.SNP_TYPE=='SYN')&(_df.INTEREST==0)]))-_d
_ct0, _ct1 = np.array([[_a,_c],[_b,_d]]), np.array([[_a2,_c2],[_b2,_d2]])
#_N = len(np.unique(_df.LOCUS))
_N = (_ct0+_ct1).sum()
_result = {}
#Enrichment of mutations following treatment initiation ie more mutations than expected once treatment has comenced
try:
_result['Enrichment'] = ss.chi2_contingency(np.vstack((_ct0.sum(1),_ct1.sum(1))),lambda_='log-likelihood')
except:
_result['Enrichment'] = ss.fisher_exact(np.vstack((_ct0.sum(1),_ct1.sum(1))))
#Excess of mutations within given region (G-test, based on probability null)
try:
_result['Excess_Gtest'] = ss.chi2_contingency(np.vstack(((_ct0+_ct1).sum(1), np.array([_N*_p_hit, _N*(1-_p_hit)]))),lambda_='log-likelihood')
except:
_result['Excess_Fisher'] = ss.fisher_exact(np.vstack(((_ct0+_ct1).sum(1), np.array([_N*_p_hit, _N*(1-_p_hit)]))))
#Excess of mutations within given region (Binomial tet, based on probability null)
try:
_result['Excess_binomial'] = ss.binom_test([_a+_a2+_c+_c2,_N-(_a+_a2+_c+_c2)],p=_p_hit,alternative='greater')
except:
_result['Excess_binomial'] = 'Failed'
#Relative accumulation of nonsynonymous and synonymous polymorphisms
try:
_result['NS_binomial'] = ss.binom_test(_ct1[0],p=float(_ct1[1,0])/np.sum(_ct1[1]),alternative='greater')
except:
_result['NS_binomial'] = 'Failed'
try:
_result['Excess_binomial_treatment'] = ss.binom_test(sum(_ct1[0]), sum(_ct1),p=_p_hit,alternative='greater')
except:
_result['Excess_binomial_treatment'] = 'Failed'
try:
_result['NS_all'] = ss.chi2_contingency((_ct0+_ct1),lambda_='log-likelihood')
except:
_result['NS_all'] = ss.fisher_exact((_ct0+_ct1))
#Relative accumulation of nonsynonymous and synonymous polymorphisms following treatment
try:
_result['NS_treatment'] = ss.chi2_contingency(_ct1,lambda_='log-likelihood')
except:
_result['NS_treatment'] = ss.fisher_exact(_ct1)
#Relative accumulation of nonsynonymous and synonymous polymorphisms prior to treatment
try:
_result['NS_diagnosis'] = ss.chi2_contingency(_ct0,lambda_='log-likelihood')
except:
_result['NS_diagnosis'] = ss.fisher_exact(_ct0)
#Data used to calculate statistics
_result['Data'] = [_ct0, _ct1, _p_hit, _N, genome_size, geneset_size(genes, locus_dict)]
return _result
def ScoreData(data,mapper,output='sum'):
"""Scores a collection of data based on a score mapper.
INPUTS:
-------
data: 1darray-like
mapper: dict, items in 'data' are keys
values are int or float.
output: 'sum'|'mean'
OUTPUT:
--------
float
NOTES:
------
Expects the items in a list to be the same as keys in the
mapper. Uses dict.get() returning 0 for queries that are
not in the mapper dictionary.
EXAMPLE:
--------
E.g. a list of codons ('codons') needs to be scored for
the probability of being mutated to a nonsynonymous codon
('scores'):
codons = ['ATG','GGT']
scores = {'ATG': 1., 'GGT': 0.67}
ScoreData(codons, scores, output='mean')
>>> 0.835
"""
_score = sum([mapper.get(_x,0) for _x in data])
if output=='mean':
return float(_score)/len(data)
else:
return float(_score)
def BinomialExcess(genes,dataframe,genome_size,locus_dict,score_dict,exclude_check=False,H37Rv=True,codons='WT_CODON'):
"""Performs binomal tests to determine
whether or not the number of mutations in
a locus is higher than expected and whether
they are more likely than expected to be NSY.
INPUTS:
-------
genes: list, genes of interest
dataframe: requires a df that has many of the
same categories as ALL.
genome_size: float, denominator
locus_dict: dict, keys are genes in gene_list
value is gene length float.
score_dict: dict, keys are codons in dataframe[codons]
value is probabiliy of NSY float.
exclude_check: False|list, check that all
problematic genes are removed.
H37Rv: boolean, whether or not to expect
H37Rv identifiers.
codons: str, category in dataframe containing codon info.
OUTPUTS:
--------
list(p_excess_mutations[2-tail], p_excess_NSY[2-tail],
p_excess_mutations[1-tail], p_excess_NSY[1-tail])
NOTES:
------
This approach explicity ignores mutations that fall
outside of coding regions for the sake of simplicity.
Also, the counts are nonredundant (i.e. each allele
is only counted once, even if it occurs at multiple
timepoints. It also needs a dataframe where wild type
codons are specified as it estimates the probability of
observing a given number of NSY - diffrent models of
substitution can be accounted for by using specific
score_dict.
EXAMPLE:
--------
BinomialExcess(DR_set, ALL, len(MTB_GENOME), SIZE_DICT, Pr_N, exclude_check=excluded)
"""
_df = dataframe.copy()
if H37Rv:
genome_size = geneset_size([x for x in locus_dict.keys() if len(x) in [6,7]], locus_dict)
if exclude_check:
genes = [x for x in genes if x not in exclude_check]
genome_size-=geneset_size(exclude_check, locus_dict)
_p_hit = geneset_size(genes, locus_dict)/genome_size
_df['INTEREST'] = [int(x in genes) for x in _df.GENE]
_a2 = len(np.unique(_df.LOCUS[(_df.TYPE=='NSY')&(_df.INTEREST==1)&(_df.TIME!=0)]))
_c2 = len(np.unique(_df.LOCUS[(_df.TYPE=='SYN')&(_df.INTEREST==1)&(_df.TIME!=0)]))
_l2 = len(np.unique(_df.LOCUS[(_df['TYPE'].isin(['NSY','SYN']))&(_df.TIME!=0)]))
_p_N = ScoreData(list(_df[_df['TYPE'].isin(['NSY','SYN'])].drop_duplicates('LOCUS')[codons]), score_dict, output='mean')
return [[_a2,_c2,_l2],ss.binom_test((_a2+_c2),_l2,p=_p_hit),
ss.binom_test(_a2,(_a2+_c2),p=_p_N),
ss.binom.sf((_a2+_c2)-1, _l2, _p_hit),
ss.binom.sf(_a2-1, (_a2+_c2), _p_N)]
#GENE ANNOTATION
def mutated_gene_id(SNP_position, gene_matrix):
"""Identify the location of a SNP.
INPUTS:
-------
SNP_position: locus, integer
gene_matrix: ND-array, start..stop
OUTPUT:
-------
int, upstream edge of mutated locus.
NOTES:
------
This was established to be used on our MTB_anc
annotation. It will return the start positon of
a gene to be used with REF_annotation.
REF_annotation={start:[end, orientation, locus_ID],...}
The rationale is: the only case where (start-SNP)*(end-SNP)
is negative will be when start<SNP<end. Both start<end<SNP
and SNP<start<end will produce positive values when multiplied.
NB. If the SNP falls exactly at the edge of a locus the product
will be zero. Therefore we're looking for non-positive products.
"""
return int(gene_matrix[np.product(gene_matrix-SNP_position, axis=1)<=0][0][0])
def codon_fetch(SNP_position, gene_cooridnates, genome, return_params=False):
"""Extracts the codon based on a SNP coordinate.
INPUTS:
-------
SNP_position: locus, integer
gene_coordinate: tuple, pythonic indices
genome: string, fasta of the genome
return_params: bool, return position information
OUTPUT:
-------
codon: if coding, else IGR
NOTES:
------
This was established to be used on our MTB_anc
annotation
"""
_gene_start = gene_cooridnates[0]
_gene_end = gene_cooridnates[1]
if _gene_start<_gene_end: #Forward gene
_into_gene = SNP_position-_gene_start
_direction = 1 #set direction, 1 for F, -1 for R
if _gene_start>_gene_end: #Reverse gene
_into_gene = _gene_start-SNP_position
_direction = -1
_codon_position = int(_into_gene)/3+1 #codon
_position_in_codon = (int(_into_gene)%3+1)*_direction #base in codon [1,2,3] for F or [-1,-2,-3] for R
_slice = {1:(0,3), 2:(-1,2), 3:(-2,1), -1:(-2,1), -2:(-1,2), -3:(0,3)} #define codon locations
_begin = SNP_position+_slice[_position_in_codon][0] #where to begin slicing a genome
_end = SNP_position+_slice[_position_in_codon][1] #where to end slicing a genome
if _direction==1:
if return_params: return genome[_begin:_end], _codon_position, _position_in_codon
else: return genome[_begin:_end]
if _direction==-1:
#Need the reverse complement of the codon. Used the translate function from
#the string module.
xmz = 'ATGC' #the basis for the replacement
mzx = 'TACG'
complement=(genome[_begin:_end].translate(str.maketrans(xmz, mzx))) #generate complement
if return_params: return complement[::-1], _codon_position, _position_in_codon
else: return complement[::-1] #reverse complement
def mutation_classifier(reference_base, alternative_base):
"""
Determine mutation type: transition vs transversion
INPUTS:
-------
refrence_base: A|C|G|T reference base, upper case string
alternative_base: A|C|G|T mutated base, upper case string
OUTPUT:
-------
int: 0 for transversion, 1 for transition.
NOTES:
------
see: https://en.wikipedia.org/wiki/Transition_(genetics)
"""
_mutation_class = {'A':'G', 'C':'T', 'G':'A', 'T':'C'}
return int(_mutation_class[reference_base]==alternative_base)
def SNP_annotate(SNP_position, wt_base, mut_base, locus_matrix, reference, genome, codon_table):
"""Generate a basic annotation for SNPs.
INPUTS:
-------
SNP_position: locus, integer
wt_base: str, reference base
mut_base: str, mutant base
locus_matrix: 2D-array, [[start,end],...]
reference: dict, {start:[end,direction,gene],...}
genome: genome sequence in FASTA
codon_table: {codon: AA,...}
OUTPUT:
-------
list: [Rv, mutation_type, codon positon, AA/AA, W_codon-M_codon, Ti]
if IGR: [Rv-Rv, 'IGR', -, ---, _N-M_, Ti]
if RNA: [name, 'RNA', '-', '---', W#posM, Ti]
NOTES:
------
This was established to be used on our MTB_anc
annotation and with the H37Rv_QC snp input data:
pos wt_base alt_base freq
"""
_gene_internal_id = mutated_gene_id(SNP_position, locus_matrix)
_gene = reference[_gene_internal_id][2]
if reference[_gene_internal_id][1]=='+':
_start, _end = _gene_internal_id,reference[_gene_internal_id][0] #define start and end of CDS
_wt_codon, _codon_pos, _pos_in_codon = codon_fetch(SNP_position, (_start,_end), genome, return_params=True) #get codon information
_mut_codon = _wt_codon[:abs(_pos_in_codon)-1]+mut_base+_wt_codon[abs(_pos_in_codon):] #derive the mutated codon, change the position affected by the mutation keeping the other two
#Call the mutation type
if codon_table[_wt_codon]==codon_table[_mut_codon]:
_mutation_type='synonymous'
if codon_table[_wt_codon]!=codon_table[_mut_codon]:
_mutation_type='nonsynonymous'
#Generate the information output
return [_gene, _mutation_type, _codon_pos, '%s/%s' %(codon_table[_wt_codon], codon_table[_mut_codon]), '%s-%s' %(_wt_codon, _mut_codon),mutation_classifier(wt_base, mut_base)]
if reference[_gene_internal_id][1]=='-':
_complementary_base = {'A':'T', 'C':'G', 'G':'C', 'T':'A'} #Assumes all SNP calls are done in the + orientation.
_start, _end = reference[_gene_internal_id][0]-1, _gene_internal_id+2 #define start and end of CDS, need to tweak a bit, to make sure we stay in frame. Keep an eye out.
#The values used here were determined based on a manual insepction of one gene.
_wt_codon, _codon_pos, _pos_in_codon = codon_fetch(SNP_position, (_start,_end), genome, return_params=True) #get codon information
_mut_codon = _wt_codon[:abs(_pos_in_codon)-1]+_complementary_base[mut_base]+_wt_codon[abs(_pos_in_codon):] #derive the mutated codon, change the position affected by the mutation keeping the other two
#Call the mutation type
if codon_table[_wt_codon]==codon_table[_mut_codon]:
_mutation_type='synonymous'
if codon_table[_wt_codon]!=codon_table[_mut_codon]:
_mutation_type='nonsynonymous'
#Generate the information output
return [_gene, _mutation_type, _codon_pos, '%s/%s' %(codon_table[_wt_codon], codon_table[_mut_codon]), '%s-%s' %(_wt_codon, _mut_codon),mutation_classifier(wt_base, mut_base)]
if reference[_gene_internal_id][1]=='I':
_gene = reference[_gene_internal_id][2].split('_')[1] #Modify _gene to reflect only the position of the intergenic region.
_len_to_symbol = {3:'+',6:'+', 7:'-', 9:'..'} #define dict to be used for the construction of the IGR tag string. NB this trick does not work for CCDC5079. In that case '..' is added on both sides of the gene...
_up,_down = _len_to_symbol.get(len(_gene.split('-')[0]),'..'), _len_to_symbol.get(len(_gene.split('-')[1]),'..') #get +/- for F and R genes respectively
_IGR_tag = '%s%d-%d%s' %(_up,SNP_position-_gene_internal_id,reference[_gene_internal_id][0]-SNP_position,_down) #generate IGR tag: +/- denotes whether the upstream or downstream if F or R. Distance from start/end of neighbouring genes
return [_gene, 'IGR', '-', '---', _IGR_tag, mutation_classifier(wt_base, mut_base)] #define output list
if reference[_gene_internal_id][1]=='R':
_start, _end = _gene_internal_id,reference[_gene_internal_id][0] #get start and stop for ncRNA
_into_gene = SNP_position-_start #get position in ncRNA
_RNA_tag = '%s%d%s' %(wt_base, _into_gene, mut_base)
return [_gene, 'RNA', '-', '---', _RNA_tag, mutation_classifier(wt_base, mut_base)] #define output list
#Define functions
def logistic(x, K, r, N0):
"""Generate a logistic curve
Input
-------
x: 1D array
K: carrying capacity
r: Growth rate
N0: starting population
Output
-------
Calculates the population at time x given the parameters using a simple logistic model
adapted from wikipedia: http://en.wikipedia.org/wiki/Logistic_function
Notes
-------
Made to be used with scipy.optimize.curve_fit
"""
return (K*N0*np.exp(r*x))/(K+N0*(np.exp(r*x)-1))
def exp_decay(x, N, k):
"""Calculate the exponential decay of a number.
Input
-------
x: 1D array
N: Starting population
k: exponential decay constant
Output
-------
1D array
Notes
-------
Adapted form: http://en.wikipedia.org/wiki/Exponential_decay
"""
return N*np.exp(-x/k)
def demo_dynamic(g, N0, r, K, k, point_A, point_S, r_=None, K_=None):
"""Demographic function of growth, cell death, sampling and
regrowth.
INPUT:
------
g: number of generations to be conisdered
N0: starting population (logistic)
r: growth rate (logistic)
K: carrying capacity (logistic)
k: decay rate (exponential decay)
point_A: generation of effective treatment
point_S: generation of sampling
r_: growth rate of re-growth
K_: carrying capacity of re-growth
OUTPUT:
-------
Population size dynamics
NOTES:
------
Modelled on:
http://simupop.sourceforge.net/manual_release/build/userGuide_ch8_sec2.html
therefore expected to be called as:
demo_func = demo_dynamic(101, 100., np.log(2), 100000., 15., 30, 60)
demo_func(70) : population size at generation 70
"""
if r_==None: r_ = r
if K_==None: K_ = K
_unperturbed = logistic(np.arange(g), K, r, N0)
_decayed = exp_decay(np.arange(g), _unperturbed[point_A], k)
_regrown = logistic(np.arange(g), K_, r_, _decayed[point_S-point_A]*0.001) #1/1000 dilution
_overall = np.concatenate((_unperturbed[:point_A], _decayed[:point_S-point_A], _regrown))
def snipett(gen):
return _overall[gen]
return snipett
| gpl-3.0 |
liberatorqjw/scikit-learn | sklearn/feature_extraction/hashing.py | 29 | 5648 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : NumPy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
mahak/spark | python/pyspark/pandas/tests/test_dataframe_spark_io.py | 14 | 19999 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import unittest
import glob
import os
import numpy as np
import pandas as pd
import pyarrow as pa
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class DataFrameSparkIOTest(PandasOnSparkTestCase, TestUtils):
"""Test cases for big data I/O using Spark."""
@property
def test_column_order(self):
return ["i32", "i64", "f", "bhello"]
@property
def test_pdf(self):
pdf = pd.DataFrame(
{
"i32": np.arange(20, dtype=np.int32) % 3,
"i64": np.arange(20, dtype=np.int64) % 5,
"f": np.arange(20, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=20).astype("O"),
},
columns=self.test_column_order,
index=np.random.rand(20),
)
return pdf
def test_parquet_read(self):
with self.temp_dir() as tmp:
data = self.test_pdf
self.spark.createDataFrame(data, "i32 int, i64 long, f double, bhello string").coalesce(
1
).write.parquet(tmp, mode="overwrite")
def check(columns, expected):
if LooseVersion("0.21.1") <= LooseVersion(pd.__version__):
expected = pd.read_parquet(tmp, columns=columns)
actual = ps.read_parquet(tmp, columns=columns)
self.assertPandasEqual(expected, actual.to_pandas())
check(None, data)
check(["i32", "i64"], data[["i32", "i64"]])
check(["i64", "i32"], data[["i64", "i32"]])
if LooseVersion(pa.__version__) < LooseVersion("1.0.0"):
# TODO: `pd.read_parquet()` changed the behavior due to PyArrow 1.0.0.
# We might want to adjust the behavior. Let's see how pandas handles it.
check(("i32", "i64"), data[["i32", "i64"]])
check(["a", "b", "i32", "i64"], data[["i32", "i64"]])
check([], pd.DataFrame([]))
check(["a"], pd.DataFrame([]))
check("i32", pd.DataFrame([]))
check("float", data[["f"]])
# check with pyspark patch.
if LooseVersion("0.21.1") <= LooseVersion(pd.__version__):
expected = pd.read_parquet(tmp)
else:
expected = data
actual = ps.read_parquet(tmp)
self.assertPandasEqual(expected, actual.to_pandas())
# When index columns are known
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
expected_idx = expected.set_index("bhello")[["f", "i32", "i64"]]
actual_idx = ps.read_parquet(tmp, index_col="bhello")[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
def test_parquet_read_with_pandas_metadata(self):
with self.temp_dir() as tmp:
expected1 = self.test_pdf
path1 = "{}/file1.parquet".format(tmp)
expected1.to_parquet(path1)
self.assert_eq(ps.read_parquet(path1, pandas_metadata=True), expected1)
expected2 = expected1.reset_index()
path2 = "{}/file2.parquet".format(tmp)
expected2.to_parquet(path2)
self.assert_eq(ps.read_parquet(path2, pandas_metadata=True), expected2)
expected3 = expected2.set_index("index", append=True)
path3 = "{}/file3.parquet".format(tmp)
expected3.to_parquet(path3)
self.assert_eq(ps.read_parquet(path3, pandas_metadata=True), expected3)
def test_parquet_write(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_parquet(tmp, mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_parquet(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_parquet(tmp, mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_parquet(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
def test_table(self):
with self.table("test_table"):
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.spark.to_table("test_table", mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_table("test_table")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_table("test_table", mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_table("test_table")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# When index columns are known
expected_idx = expected.set_index("bhello")[["f", "i32", "i64"]]
actual_idx = ps.read_table("test_table", index_col="bhello")[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
expected_idx = expected.set_index(["bhello"])[["f", "i32", "i64"]]
actual_idx = ps.read_table("test_table", index_col=["bhello"])[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
expected_idx = expected.set_index(["i32", "bhello"])[["f", "i64"]]
actual_idx = ps.read_table("test_table", index_col=["i32", "bhello"])[["f", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
def test_spark_io(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_spark_io(tmp, format="json", mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_spark_io(tmp, format="json")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_spark_io(
tmp, format="json", mode="overwrite", partition_cols=["i32", "bhello"]
)
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_spark_io(path=tmp, format="json")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# When index columns are known
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
col_order = ["f", "i32", "i64"]
expected_idx = expected.set_index("bhello")[col_order]
actual_idx = ps.read_spark_io(tmp, format="json", index_col="bhello")[col_order]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
@unittest.skip("openpyxl")
def test_read_excel(self):
with self.temp_dir() as tmp:
path1 = "{}/file1.xlsx".format(tmp)
self.test_pdf[["i32"]].to_excel(path1)
self.assert_eq(ps.read_excel(open(path1, "rb")), pd.read_excel(open(path1, "rb")))
self.assert_eq(
ps.read_excel(open(path1, "rb"), index_col=0),
pd.read_excel(open(path1, "rb"), index_col=0),
)
self.assert_eq(
ps.read_excel(open(path1, "rb"), index_col=0, squeeze=True),
pd.read_excel(open(path1, "rb"), index_col=0, squeeze=True),
)
self.assert_eq(ps.read_excel(path1), pd.read_excel(path1))
self.assert_eq(ps.read_excel(path1, index_col=0), pd.read_excel(path1, index_col=0))
self.assert_eq(
ps.read_excel(path1, index_col=0, squeeze=True),
pd.read_excel(path1, index_col=0, squeeze=True),
)
self.assert_eq(ps.read_excel(tmp), pd.read_excel(path1))
path2 = "{}/file2.xlsx".format(tmp)
self.test_pdf[["i32"]].to_excel(path2)
self.assert_eq(
ps.read_excel(tmp, index_col=0).sort_index(),
pd.concat(
[pd.read_excel(path1, index_col=0), pd.read_excel(path2, index_col=0)]
).sort_index(),
)
self.assert_eq(
ps.read_excel(tmp, index_col=0, squeeze=True).sort_index(),
pd.concat(
[
pd.read_excel(path1, index_col=0, squeeze=True),
pd.read_excel(path2, index_col=0, squeeze=True),
]
).sort_index(),
)
with self.temp_dir() as tmp:
path1 = "{}/file1.xlsx".format(tmp)
with pd.ExcelWriter(path1) as writer:
self.test_pdf.to_excel(writer, sheet_name="Sheet_name_1")
self.test_pdf[["i32"]].to_excel(writer, sheet_name="Sheet_name_2")
sheet_names = [["Sheet_name_1", "Sheet_name_2"], None]
pdfs1 = pd.read_excel(open(path1, "rb"), sheet_name=None, index_col=0)
pdfs1_squeezed = pd.read_excel(
open(path1, "rb"), sheet_name=None, index_col=0, squeeze=True
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(open(path1, "rb"), sheet_name=sheet_name, index_col=0)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1["Sheet_name_2"])
psdfs = ps.read_excel(
open(path1, "rb"), sheet_name=sheet_name, index_col=0, squeeze=True
)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1_squeezed["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1_squeezed["Sheet_name_2"])
self.assert_eq(
ps.read_excel(tmp, index_col=0, sheet_name="Sheet_name_2"),
pdfs1["Sheet_name_2"],
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1["Sheet_name_2"])
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0, squeeze=True)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1_squeezed["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1_squeezed["Sheet_name_2"])
path2 = "{}/file2.xlsx".format(tmp)
with pd.ExcelWriter(path2) as writer:
self.test_pdf.to_excel(writer, sheet_name="Sheet_name_1")
self.test_pdf[["i32"]].to_excel(writer, sheet_name="Sheet_name_2")
pdfs2 = pd.read_excel(path2, sheet_name=None, index_col=0)
pdfs2_squeezed = pd.read_excel(path2, sheet_name=None, index_col=0, squeeze=True)
self.assert_eq(
ps.read_excel(tmp, sheet_name="Sheet_name_2", index_col=0).sort_index(),
pd.concat([pdfs1["Sheet_name_2"], pdfs2["Sheet_name_2"]]).sort_index(),
)
self.assert_eq(
ps.read_excel(
tmp, sheet_name="Sheet_name_2", index_col=0, squeeze=True
).sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_2"], pdfs2_squeezed["Sheet_name_2"]]
).sort_index(),
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0)
self.assert_eq(
psdfs["Sheet_name_1"].sort_index(),
pd.concat([pdfs1["Sheet_name_1"], pdfs2["Sheet_name_1"]]).sort_index(),
)
self.assert_eq(
psdfs["Sheet_name_2"].sort_index(),
pd.concat([pdfs1["Sheet_name_2"], pdfs2["Sheet_name_2"]]).sort_index(),
)
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0, squeeze=True)
self.assert_eq(
psdfs["Sheet_name_1"].sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_1"], pdfs2_squeezed["Sheet_name_1"]]
).sort_index(),
)
self.assert_eq(
psdfs["Sheet_name_2"].sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_2"], pdfs2_squeezed["Sheet_name_2"]]
).sort_index(),
)
def test_read_orc(self):
with self.temp_dir() as tmp:
path = "{}/file1.orc".format(tmp)
data = self.test_pdf
self.spark.createDataFrame(data, "i32 int, i64 long, f double, bhello string").coalesce(
1
).write.orc(path, mode="overwrite")
# `spark.write.orc` create a directory contains distributed orc files.
# But pandas only can read from file, not directory. Therefore, we need orc file path.
orc_file_path = glob.glob(os.path.join(path, "*.orc"))[0]
expected = data.reset_index()[data.columns]
actual = ps.read_orc(path)
self.assertPandasEqual(expected, actual.to_pandas())
# columns
columns = ["i32", "i64"]
expected = data.reset_index()[columns]
actual = ps.read_orc(path, columns=columns)
self.assertPandasEqual(expected, actual.to_pandas())
# index_col
expected = data.set_index("i32")
actual = ps.read_orc(path, index_col="i32")
self.assert_eq(actual, expected)
expected = data.set_index(["i32", "f"])
actual = ps.read_orc(path, index_col=["i32", "f"])
self.assert_eq(actual, expected)
# index_col with columns
expected = data.set_index("i32")[["i64", "bhello"]]
actual = ps.read_orc(path, index_col=["i32"], columns=["i64", "bhello"])
self.assert_eq(actual, expected)
expected = data.set_index(["i32", "f"])[["bhello", "i64"]]
actual = ps.read_orc(path, index_col=["i32", "f"], columns=["bhello", "i64"])
self.assert_eq(actual, expected)
msg = "Unknown column name 'i'"
with self.assertRaises(ValueError, msg=msg):
ps.read_orc(path, columns="i32")
msg = "Unknown column name 'i34'"
with self.assertRaises(ValueError, msg=msg):
ps.read_orc(path, columns=["i34", "i64"])
def test_orc_write(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_orc(tmp, mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_orc(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_orc(tmp, mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_orc(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_dataframe_spark_io import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
ronaldahmed/labor-market-demand-analysis | shallow parsing models/utils.py | 1 | 37005 | # -*- coding: utf-8 -*-
__author__ = 'ronald'
import os, sys
import time
import re, string
import pymongo
from pymongo import MongoClient
import nltk
from nltk import UnigramTagger, BigramTagger, TrigramTagger
from nltk.tag.hmm import HiddenMarkovModelTagger
from nltk.probability import *
import pickle
import ancora
import pdb, ipdb
EXT_CORPUS_PATH = '../../nlp/'
START = '_START_'
END = '_END_'
START_TAG = '<START>'
END_TAG = '<STOP>'
BR = '**'
RARE = "<RARE>"
NO_LABELS = [
START_TAG,
END_TAG,
BR,
RARE,
]
############################################################################################
GAZETTERS_DIR = 'external_gazetters/'
############################################################################################
vinetas = ['?',
'โฆ', # agregar aki + viรฑetas q sean caracter especial en regex
'+',
'.',
'~',
'*', # limite para viรฑetas regex-special
'โข',
'ยบ',
'ยป',
'ยท',
'ยฐ',
'ยฌ',
'ยช',
'ยจ',
'ยง',
'รฐ',
'โ',
'ร',
'-', # SIEMPRE AL FINAL -- NO MOVER
]
thr = vinetas.index('*')
norm_vineta = []
for (i, vin) in enumerate(vinetas):
if i > thr:
norm_vineta.append(r'\n\s*%s+' % vin)
else:
norm_vineta.append(r'\n\s*\%s+' % vin)
# norm_vineta.append(r'\n\s*[0-9]{0,2}[.)]-?')
norm_vineta.append(r'^ *\d+[.)-](?:\d[.)-]*)*') # match con cualquier numero del formato 1.12.12.-
norm_vineta.append(r'^ *[a-zA-Z]\d*[.)-](?:[a-zA-Z]?\d*[.)-]+)*') # match con a) A) a.1) a.2.- etc..
roman = r'(?:X(?:X(?:V(?:I(?:I?I)?)?|X(?:I(?:I?I)?)?|I(?:[VX]|I?I)?)?|V(?:I(?:I?I)?)?|I(?:[VX]|I?I)?)?|' \
r'V(?:I(?:I?I)?)?|I(?:[VX]|I?I)?)'
norm_vineta.append(r'^ *' + roman + r'[\da-zA-Z]{0,2}[.)-](' + roman + '?[.)-]*[\da-zA-Z]{0,2}[.)-]+)*')
# REGEX CASES
# Normalizar viรฑetas
norm_vineta = re.compile('|'.join(norm_vineta), re.UNICODE | re.MULTILINE)
# SI O SI VIรETA
unicode_vineta = re.compile('[โขยบยปยทยฐยฌยชยจยง~รฐโร]', re.UNICODE)
META_VINETA = '*'
# curar lineas en blanco
blank_line = re.compile(r'\n(\s*[.:,;-]*\s*\n)*', re.UNICODE)
# Caso de dinero
# money = re.compile(r'(?P<total>(?P<currency>[sS$])\s*/\s*\.?\s*(?P<number>[0-9]+))')
soles = r'(?P<totalSoles>(?P<currencySoles>[sS])(?=.*[.,/])\s*/?\s*\.?\s*/?\s*(?P<numberSoles>(\d+ ?[.,]? ?)+))'
dolar = r'(?P<totalDolar>(?P<currencyDolar>[$])\s*/?\s*\.?\s*/?\s*(?P<numberDolar>(\d+ ?[.,]? ?)+))'
money = re.compile(soles + r'|' + dolar)
dinero = re.compile(r'(?P<number>(\d+ ?[.,]? ?)+)')
hora = re.compile(r'(^|\s)\d{1,2} ?: ?\d{1,2}')
# Normalizar comillas
comillas = ['"', "'", '`', 'โ', 'โ','ยซ','ยป','ยด'] # ยจ se hace strip nomas
norm_comillas = re.compile(r'[%s]+' % ''.join(comillas), re.UNICODE)
META_COMILLA = '"'
# reducir puntuaciones duplicadas
doubled = [',', ';', ':', '_', '!', 'ยก', '?', 'ยฟ'] + vinetas[:vinetas.index('+')] + vinetas[vinetas.index('+') + 1:]
clean_doubled = re.compile(r'(?P<multi>[%s]+)' % (''.join(doubled)), re.UNICODE)
# CORRECION DE TILDES
tildes = [
('ร ', 'รก'),
('รจ', 'รฉ'),
('รฌ', 'รญ'),
('รฒ', 'รณ'),
('รน', 'รบ'),
('ร', 'ร'),
('ร', 'ร'),
('ร', 'ร'),
('ร', 'ร'),
('ร', 'ร'),
('รบu', 'รบ'),
('รงc', 'c'),
('รฉtc', 'etc'),
('ยธ', ' '),
('ยฎ', ''),
('ยฉ', '@'),
('รค', 'a'),
('รซ', 'e'),
('รฏ', 'i'),
('รถ', 'o'),
('รผ', 'u'),
('ร', 'A'),
('ร', 'E'),
('ร', 'I'),
('ร', 'O'),
('ร', 'U'),
('รฟ', 'y'),
('รข', 'รก'),
('รช', 'รฉ'),
('รฎ', 'รญ'),
('รด', 'รณ'),
('รป', 'รบ'),
('ร', 'ร'),
('ร', 'ร'),
('ร', 'ร'),
('ร', 'ร'),
('ร', 'ร'),
]
########################################################################################################################################################################################
def separate_number(match):
for key in match.groupdict():
if key == 'prefix':
return match.group('prefix') + ' ' + match.group('number')
if len(match.group('number')) == 1 and len(match.group('word')) == 1:
return match.group('number') + match.group('word')
return match.group('number') + ' ' + match.group('word')
def separate_suffix(match):
return match.group('word') + ' '
def clean_space(text):
import re
text = re.sub('[ \r\t\f\v]+', ' ', text)
text = re.sub('(\n+ *)+', '\n', text)
return text
def corrections_tokenizer(text):
# reemplazar puntuacion โ -> -
text = text.replace('โ', '-')
text = text.replace('โฆ', '.')
# correciones de tildes
for wrong, good in tildes:
text = text.replace(wrong, good)
##########################################
pattern3 = re.compile(r'\b(?P<first>[0-9]{1,2})y(?P<second>[0-9]{1,2})\b') # 1y30, 08y30
match = pattern3.search(text)
while match:
ntext = text[:match.start()] + match.group('first') + ':' + match.group('second') + text[match.end():]
match = pattern3.search(ntext)
if text == ntext:
break
text = ntext
##########################################
pat_number = re.compile(r'(?P<number>[\d]{1,2})(?P<word>[a-zA-Z]+)') # 6meses 1hora
text = pat_number.sub(separate_number, text)
text = clean_space(text)
#ipdb.set_trace()
##########################################
pattern3 = re.compile(r'\b[oรณ]\s*/\s*[yรฝ]\b') # y/0
match = pattern3.search(text)
while match:
ntext = text[:match.start()] + 'y/o' + text[match.end():]
match = pattern3.search(ntext)
if text == ntext:
break
text = ntext
##########################################
pattern3 = re.compile(r'\b[yeoรฝ]\s*7\s*[opรณ0]\b')
# y/0
match = pattern3.search(text)
while match:
ntext = text[:match.start()] + 'y/o' + text[match.end():]
match = pattern3.search(ntext)
if text == ntext:
break
text = ntext
##########################################
pattern3 = re.compile(r'\b[yeoรฝ]\s*/\s*[pรณ0]\b') # y/0
match = pattern3.search(text)
while match:
ntext = text[:match.start()] + 'y/o' + text[match.end():]
match = pattern3.search(ntext)
if text == ntext:
break
text = ntext
##########################################
pattern3 = re.compile(r'(?P<first>[a-z]) ?/ (?P<second>[a-z])') # y / o y/ o
match = pattern3.search(text)
while match:
ntext = text[:match.start()] + match.group('first') + '/' + match.group('second') + text[match.end():]
match = pattern3.search(ntext)
if text == ntext:
break
text = ntext
pattern3 = re.compile(r'(?P<first>[a-z]) / ?(?P<second>[a-z])') # y / o y /o
match = pattern3.search(text)
while match:
ntext = text[:match.start()] + match.group('first') + '/' + match.group('second') + text[match.end():]
match = pattern3.search(ntext)
if text == ntext:
break
text = ntext
############################################
# pat_parenthesis = re.compile(r'^[(] ?([aA][sS]|[eE][sS]|[aA]|[oO]|[oO][sS]) ?[)]$')
pat_parenthesis = re.compile(r'(?P<word>[a-zA-Z]{3,} ?)(?P<suffix>[(] ?[a-zA-z]{0,2} ?[)])')
text = pat_parenthesis.sub(separate_suffix, text)
text = clean_space(text)
pat_slash = re.compile(r'(?P<word>[a-zA-Z]{3,} ?)(?P<suffix>[/] ?([a-zA-z]{1,2}) )')
text = pat_slash.sub(separate_suffix, text)
text = clean_space(text)
###########################################
pat_number = re.compile(r'(?P<prefix>[nN]ยฐ)(?P<number>\d+)') # Nยฐ123
text = pat_number.sub(separate_number, text)
###########################################
pattern3 = re.compile(r'x{3,}') # xxxxxxxxxxxxxxxxxxxx
match = pattern3.search(text)
while match:
ntext = text[:match.start()] + 'xxx' + text[match.end():]
match = pattern3.search(ntext)
if text == ntext:
break
text = ntext
##########################################
text = clean_space(text)
return text
def specialCases(words):
ans = []
change = False
dolar = False
for (i, word) in enumerate(words):
# eliminar tilde no intencional
word = word.strip('ยด')
match = money.search(word)
if match: # is it currency?
curr = match.group('currencySoles')
if curr is None:
curr = match.group('currencyDolar')
if curr.lower() == 's' and '/.' not in word:
word = word.replace('/', '/.')
change = True
ans.append(word)
continue
if word == '$':
dolar = True
ans.append(word)
continue
if dolar:
match = dinero.search(word)
if match is None:
dolar = False
ans.append(word)
continue
ans[-1] += word
change = True
dolar = False
continue
if '#' == word:
if i - 1 >= 0 and len(ans) > 0:
if ans[-1] == 'c' or ans[-1] == 'C':
ans[-1] += word
change = True
else:
ans.append(word)
continue
if '*' in word and len(word) > 1:
splitWord = word.split('*')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('*')
change = True
ans.pop()
continue
if 'ยก' in word and len(word) > 1:
splitWord = word.split('ยก')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('ยก')
change = True
ans.pop()
continue
if 'ยฟ' in word and len(word) > 1:
splitWord = word.split('ยฟ')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('ยฟ')
change = True
ans.pop()
continue
if ':' in word and len(word) > 1:
splitWord = word.split(':')
match = hora.search(word)
if match:
ans.append(word)
continue
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append(':')
change = True
ans.pop()
continue
if '_' in word and len(word) > 1:
splitWord = word.split('_')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('_')
change = True
ans.pop()
continue
if '\\' in word and len(word) > 1:
splitWord = word.split('\\')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('\\')
change = True
ans.pop()
continue
if '\'' in word and len(word) > 1:
splitWord = word.split('\'')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('\'')
change = True
ans.pop()
continue
if '|' in word and len(word) > 1:
splitWord = word.split('|')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('|')
change = True
ans.pop()
continue
if '/' in word and len(word) > 1:
if word.count('/') >= 2:
splitWord = word.split('/')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('/')
change = True
ans.pop()
else:
slashPos = word.find('/')
if len(word[:slashPos]) > 1 and len(word[slashPos + 1:]) > 1:
ans.extend([word[:slashPos], '/', word[slashPos + 1:]])
change = True
elif len(word[:slashPos]) == 1 and len(word[slashPos + 1:]) == 1:
ans.append(word)
else:
if word[:slashPos]:
ans.append(word[:slashPos])
ans.append('/')
if word[slashPos + 1:]:
ans.append(word[slashPos + 1:])
change = True
continue
if ',' in word and len(word) > 1:
splitWord = word.split(',')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append(',')
change = True
ans.pop()
continue
if '-' in word and len(word) > 1:
splitWord = word.split('-')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('-')
change = True
ans.pop()
continue
if '+' in word and len(word) > 1:
if ('c++' in word) or ('C++' in word):
ans.append(word)
continue
splitWord = word.split('+')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('+')
change = True
ans.pop()
continue
if '.' in word and len(word) > 1:
#print('asdasd')
if word.count('.') > 1:
if any([word == '...',
word.lower() == 'a.m.',
word.lower() == 'p.m.',
]): # no separar ...
ans.append(word)
continue
splitWord = word.split('.')
for sw in splitWord:
if len(sw) > 0:
ans.append(sw)
ans.append('.')
change = True
ans.pop()
else:
#print('asdasd')
if word == 'm.' or '.net' == word:
ans.append(word)
continue
dotPos = word.find('.')
if len(word[:dotPos]) >= 1 and len(word[dotPos + 1:]) >= 1:
if word[:dotPos].isdigit() and word[dotPos + 1:].isdigit():
#print(word)
ans.append(word)
else:
ans.extend([word[:dotPos], '.', word[dotPos + 1:]])
#print(word[:dotPos], '.', word[dotPos + 1:])
change = True
elif dotPos == len(word) - 1:
ans.extend([word[:-1], '.'])
#print(word[:-1], '.')
change = True
else:
ans.extend(['.', word[1:]])
#print('.', word[1:])
change = True
continue
ans.append(word)
return (ans, change)
def tokenizer(text):
'''
:param text: raw text to tokenize
:return: list of words
'''
res = []
if type(text) != str:
text = text.decode('utf8')
text = clean_space(text)
# Curar lineas en blanco
text = blank_line.sub('\n', text)
text = list(text)
import unicodedata
for i in range(len(text)):
try:
text[i].encode('latin-1')
if text[i] != '\n' and unicodedata.category(text[i])[0] == 'C':
text[i] = '*'
except UnicodeEncodeError:
text[i] = '*'
if text[-1] == '*':
text.pop()
text = "".join(text)
text = corrections_tokenizer(text)
# caso vinetas duplicadas
match = clean_doubled.search(text)
text = list(text)
if text[-1] == '*':
text.pop()
text = "".join(text)
temp = ''
while (match):
multis = match.group('multi')
f = text.find(multis)
temp += text[:f] + multis[0]
text = text[f + len(multis):]
match = clean_doubled.search(text)
text = temp + text
# caso dinero
match = money.search(text)
temp = ''
while (match):
total = match.group('totalSoles')
if total is None:
total = match.group('totalDolar')
curr = match.group('currencySoles')
if curr is None:
curr = match.group('currencyDolar')
number = match.group('numberSoles')
if number is None:
number = match.group('numberDolar')
f = text.find(total)
sub_text = ''
if curr.lower() == 's':
sub_text = 'S/'
else:
sub_text = curr
sub_text += number.replace(' ', '')
temp += text[:f] + ' ' + sub_text + ' '
text = text[f + len(total):]
match = money.search(text)
text = temp + text
text = clean_space(text)
# normalizar viรฑetas
text = norm_vineta.sub('\n' + META_VINETA, text)
text = unicode_vineta.sub(META_VINETA, text) # si o si es viรฑeta pero no tiene \n antes
# normalizar comillas
text = norm_comillas.sub(META_COMILLA, text)
# sent_tokenizer : \n
sents = [line + ' ' for line in text.split('\n') if len(line.strip(' ')) > 0]
for line in sents:
temp = []
for chunk in line.split(META_COMILLA):
temp.extend(nltk.word_tokenize(chunk))
temp.append(META_COMILLA)
temp.pop()
## casos especiales aqui
(temp, change) = specialCases(temp)
#print(temp)
while change:
(temp, change) = specialCases(temp)
res.append(temp)
res = [r for r in res if len(r) > 0]
return res
########################################################################################################################################################################################
def saveModel(model, name='model'):
with open(name + '_model.pickle', 'wb') as fd:
pickle.dump(model, fd, protocol=pickle.HIGHEST_PROTOCOL)
def uploadModel(name_model):
# Load tagger
with open(name_model + '.pickle', 'rb') as fd:
tagger = pickle.load(fd)
return tagger
########################################################################################################################################################################################
def simplifyTagset(data):
res = []
for doc in data:
new_sent = []
for meta in doc:
word = meta[0]
pos = meta[1].lower()
new_pos = pos
if pos == 'unk':
if word.isdigit():
new_pos = 'z'
else:
new_pos = 'np00'
if pos[0] == 'a': # adjetivo
new_pos = pos[:2] + pos[3:5]
elif pos[0] == 'd': # determinante
new_pos = pos[:5]
elif pos[0] == 'n': # sustantivo
new_pos = pos[:4]
elif pos[0] == 'v': # verbo
new_pos = pos[:3]
elif pos[0] == 'p': # pronombre
new_pos = pos[:5]
if pos[1] == 'e': # PE -> PT
new_pos = list(new_pos)
new_pos[1] = 't'
new_pos = ''.join(new_pos)
elif pos[0] == 's': # preposicion
new_pos = pos[:2]
elif pos[0] == 'f': # puntuacion
if pos[:2] == 'fa' or pos[:2] == 'fi':
new_pos = 'fa'
if (pos[:2] == 'fc' or pos[:2] == 'fl') and len(pos) > 2:
new_pos = 'fl'
if pos == 'fc' or pos == 'fx':
new_pos == 'fc'
if pos == 'fe' or pos[:2] == 'fr':
new_pos == 'fe'
elif pos[0] == 'z': # numerico
if pos == 'zd':
new_pos = 'z'
# dividir multi-words
try:
words = word.split('_')
except:
pdb.set_trace()
for w in words:
new_sent.append(tuple([w, new_pos]))
# build new data
res.append(new_sent)
return res
def getDataAncora(max_docs='inf'):
# get data
# Data : lista de documentos
# doc : lista de tuples | oraciones separadas por tuple (BR,BR_LABEL)
reader = ancora.AncoraCorpusReader(EXT_CORPUS_PATH + 'ancora-2.0/')
docs = reader.tagged_sents(max_docs)
data = []
for doc in docs:
jd = []
doc = simplifyTagset(doc)
for sent in doc:
jd.extend(sent)
jd.append(tuple([BR, BR_LABEL]))
jd.pop()
data.append(jd)
return data
def getDataWikicorpus(max_docs='inf'):
res = []
sent = []
cont = 0
doc_cont = 0
with open(EXT_CORPUS_PATH + 'wikicorpus/data_wc') as file:
for line in file:
if '<doc' not in line and '</doc' not in line:
line = line.strip('\n')
if line:
(multiwords, lemma, pos, num) = line.strip('\n').split(' ')
for word in multiwords.split('_'):
sent.append(tuple([word, pos.lower()]))
elif len(sent) > 0:
res.append(list(sent))
doc_cont += 1
if cont % 10 == 0:
print("--->", cont)
cont += 1
if max_docs != 'inf' and doc_cont == max_docs:
break
sent = []
return res
########################################################################################################################################################################################
def train_and_test(train, test, est):
'''
:train : training dataset
:test : testing dataset
:est : NLTK Prob object
'''
tag_set = list(set([tag for sent in train for (word, tag) in sent]))
symbols = list(set([word for sent in train for (word, tag) in sent]))
# trainer HMM
trainer = nltk.HiddenMarkovModelTrainer(tag_set, symbols)
hmm = trainer.train_supervised(train, estimator=est)
res = 100 * hmm.evaluate(test)
return res
# estimadores prob
mle = lambda fd, bins: MLEProbDist(fd)
laplace = LaplaceProbDist
ele = ELEProbDist
witten = WittenBellProbDist
gt = lambda fd, bins: SimpleGoodTuringProbDist(fd, bins=1e5)
def lidstone(gamma):
return lambda fd, bins: LidstoneProbDist(fd, gamma, bins)
########################################################################################################################################################################################
## EXTENDED FEATURES : GAZETTERS
from nltk.stem.snowball import SpanishStemmer
span_stemmer = SpanishStemmer()
def getCareerGazetter():
'''
:return: return list with STEMMED careers unigrams
'''
careers = [span_stemmer.stem(line.strip()) for line in open(os.path.join(GAZETTERS_DIR, 'carreras'))]
careers = [w for w in careers if len(w) > 3]
careers.extend(['mina', 'mecan', 'moda', 'ing'])
careers = list(set(careers))
return careers
def getPlacesGazetter():
return list(set([line.strip() for line in open(os.path.join(GAZETTERS_DIR, 'lugares'))]))
########################################################################################################################################################################################
import codecs
import gzip
from sequences.label_dictionary import *
from sequences.sequence import *
from sequences.sequence_list import *
from os.path import dirname
import numpy as np
from sklearn.cross_validation import train_test_split
## Directorie where the data files are located.
careers_dir = "careers tagged/"
random_dir = "random/"
WINDOW = 3
TAGS_PROY = ['AREA', 'REQ', 'JOB', 'CARR']
RANDOM_STATE = 42
np.random.seed(RANDOM_STATE)
class JobDBCorpus(object):
def __init__(self):
# Word dictionary.
self.word_dict = LabelDictionary()
self.pos_dict = LabelDictionary(['nc'])
self.ne_dict = LabelDictionary()
self.ne_dict.add(BR)
# Initialize sequence list.
self.sequence_list = SequenceList(self.word_dict, self.pos_dict, self.ne_dict)
# Initialize word & tag dicts
self.word_dict.add(START)
self.word_dict.add(END)
self.ne_dict.add(START_TAG)
self.ne_dict.add(END_TAG)
self.pos_dict.add(START_TAG)
self.pos_dict.add(END_TAG)
def read_sequence_list(self, target='BIO', START_END_TAGS=True, entities=TAGS_PROY):
'''
:param target: BIO : IBO tagging, Y = B,I,O
NE : Y = NE names
:return: list of sentences
'''
seq_list = []
file_ids = []
for i in range(1, 401):
sent_x = []
sent_y = []
sent_pos = []
if START_END_TAGS:
sent_x = [START, START]
sent_y = [START_TAG, START_TAG]
sent_pos = [START_TAG, START_TAG]
for line in open(careers_dir + str(i) + '.tsv'):
line = line.strip('\n')
x = ''
y = ''
pos = ''
if len(line) > 0:
temp = line.split('\t')
pos = temp[1]
x = temp[0]
if temp[-1][2:] in entities:
if target == 'BIO':
y = temp[-1][0]
else:
y = temp[-1] # temp[-1][2:]
else:
y = 'O'
else:
x, pos, y = (BR, BR, BR)
if x not in self.word_dict:
self.word_dict.add(x)
if y not in self.ne_dict:
self.ne_dict.add(y)
if pos not in self.pos_dict:
self.pos_dict.add(pos)
sent_x.append(x)
sent_y.append(y)
sent_pos.append(pos)
if sent_x[-1] == BR:
sent_x.pop()
sent_y.pop()
sent_pos.pop()
if START_END_TAGS:
sent_x.append(END)
sent_y.append(END_TAG)
sent_pos.append(END_TAG)
seq_list.append([sent_x, sent_y, sent_pos])
file_ids.append("car_tag_" + str(i))
for i in range(1, 401):
sent_x = []
sent_y = []
sent_pos = []
if START_END_TAGS:
sent_x = [START, START]
sent_y = [START_TAG, START_TAG]
sent_pos = [START_TAG, START_TAG]
for line in open(random_dir + str(i) + '.tsv'):
line = line.strip('\n')
x = ''
y = ''
pos = ''
if len(line) > 0:
temp = line.split('\t')
x = temp[0]
pos = temp[1]
if temp[-1][2:] in entities:
if target == 'BIO':
y = temp[-1][0]
else:
y = temp[-1] # temp[-1][2:]
else:
y = 'O'
else:
x, pos, y = (BR, BR, BR)
if x not in self.word_dict:
self.word_dict.add(x)
if y not in self.ne_dict:
self.ne_dict.add(y)
if pos not in self.pos_dict:
self.pos_dict.add(pos)
sent_x.append(x)
sent_y.append(y)
sent_pos.append(pos)
if sent_x[-1] == BR:
sent_x.pop()
sent_y.pop()
sent_pos.pop()
if START_END_TAGS:
sent_x.append(END)
sent_y.append(END_TAG)
sent_pos.append(END_TAG)
seq_list.append([sent_x, sent_y, sent_pos])
file_ids.append("random_" + str(i))
self.sequence_list = SequenceList(self.word_dict, self.pos_dict, self.ne_dict)
for i, (x, y, pos) in enumerate(seq_list):
self.sequence_list.add_sequence(x, y, pos, file_ids[i])
return self.sequence_list
def train_test_data(self, test_size=0.1):
train = SequenceList(self.word_dict, self.pos_dict, self.ne_dict)
test = SequenceList(self.word_dict, self.pos_dict, self.ne_dict)
tn, tt = train_test_split(self.sequence_list.seq_list, test_size=test_size)
train.seq_list = tn
test.seq_list = tt
return train, test
def TTCV_data(self, test_size=0.2, cv_size=0.2):
train = SequenceList(self.word_dict, self.pos_dict, self.ne_dict)
test = SequenceList(self.word_dict, self.pos_dict, self.ne_dict)
cv = SequenceList(self.word_dict, self.pos_dict, self.ne_dict)
tn, temp = train_test_split(self.sequence_list.seq_list, test_size=test_size + cv_size,
random_state=RANDOM_STATE)
tst, cvt = train_test_split(temp, test_size=cv_size / (cv_size + test_size), random_state=RANDOM_STATE)
train.seq_list = tn
test.seq_list = tst
cv.seq_list = cvt
return train, test, cv
## Dumps a corpus into a file
def save_corpus(self, name):
with open(name + '_corpus.pickle', 'wb') as fd:
pickle.dump(self, fd, protocol=pickle.HIGHEST_PROTOCOL)
## Loads a corpus from a file
def load_corpus(self, name):
with open(name + '_corpus.pickle', 'rb') as fd:
loaded = pickle.load(fd)
self.word_dict = loaded.word_dict
self.pos_dict = loaded.pos_dict
self.ne_dict = loaded.ne_dict
self.sequence_list = loaded.sequence_list
def trimTrain(self, data, percentage=1.0):
'''
:param data: Seq List object
:param num_docs: porcentaje a conservar
:return: Sequence List Object
'''
train = SequenceList(self.word_dict, self.pos_dict, self.ne_dict)
res, _ = train_test_split(data.seq_list, test_size=1.0 - percentage, random_state=RANDOM_STATE)
train.seq_list = res
return train
##############################################################################################
########## NEC UTILS
from sklearn.decomposition import TruncatedSVD as SVD
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.lda import LDA
from scipy import sparse
import classifiers.id_features_NEC as featNEC
class Chunk:
def __init__(self, sequence_id, pos, length, entity):
self.sequence_id = sequence_id
self.pos = pos
self.length = length
self.entity = entity # NOMBRE, NO ID
def __repr__(self):
return "<\nSequence id:%i\n" \
"Pos inicial: %i\n" \
"Longitud: %i\n" \
"Entidad: %s\n>" % (self.sequence_id, self.pos, self.length, self.entity)
class ChunkSet:
def __init__(self, dataset):
self.dataset = dataset
self.chunk_list = []
self.entity_classes = LabelDictionary()
self.chunk_data()
def chunk_data(self):
for seq_id in range(len(self.dataset)):
sequence = self.dataset.seq_list[seq_id]
pos = 0
open = False
n = len(sequence.x)
ne = ''
for (i, w) in enumerate(sequence.x):
tag = self.dataset.y_dict.get_label_name(sequence.y[i])
if len(tag) > 1:
ne = tag[2:]
else:
ne = tag
if ne != 'O' and tag != START_TAG and tag != END_TAG and tag != BR:
self.entity_classes.add(ne)
prev_ne = ne
if i > 0:
prev_tag = self.dataset.y_dict.get_label_name(sequence.y[i - 1])
if len(prev_tag) > 1:
prev_ne = prev_tag[2:]
if tag.find('B') == 0:
if open and i > 0:
chunk = Chunk(sequence_id=seq_id, pos=pos, length=i - pos, entity=prev_ne)
self.chunk_list.append(chunk)
pos = i
open = True
elif tag.find('I') != 0 and open:
open = False
chunk = Chunk(sequence_id=seq_id, pos=pos, length=i - pos, entity=prev_ne)
self.chunk_list.append(chunk)
if open:
chunk = Chunk(sequence_id=seq_id, pos=pos, length=n - pos, entity=ne)
self.chunk_list.append(chunk)
def getStandart(chunks, feature_mapper, mode='BINARY'):
X = np.zeros((len(chunks.chunk_list), len(feature_mapper.feature_dict)))
Y = []
if mode == 'BINARY':
Y = np.zeros(len(chunks.chunk_list), dtype=np.int_)
else:
Y = np.zeros((len(chunks.chunk_list), len(chunks.entity_classes))) # CUATRO ENTIDADES
for i, chunk in enumerate(chunks.chunk_list):
features = feature_mapper.get_features(chunk, chunks.dataset)
X[i, features] = 1
ne_id = chunks.entity_classes.get_label_id(chunk.entity)
if mode == 'BINARY':
Y[i] = ne_id
else:
Y[i, ne_id] = 1
return X, Y
def getData_NEC():
reader = JobDBCorpus()
data = reader.read_sequence_list(target='TODO')
np.seterr(all='ignore')
train, test = reader.train_test_data(test_size=0.2)
print("Reading chunks...")
chunks_train = ChunkSet(dataset=train)
chunks_test = ChunkSet(dataset=test)
print("Building features...")
idf = featNEC.IDFeatures(dataset=train, chunkset=chunks_train)
idf.build_features()
###############################################################################
print("Standarizing dataset...")
X_train, Y_train = getStandart(chunks_train, idf)
X_test, Y_test = getStandart(chunks_test, idf)
# sparse representation and normalize
X_train = sparse.csr_matrix(X_train)
X_train = normalize(X_train, copy=False)
X_test = sparse.csr_matrix(X_test)
X_test = normalize(X_test, copy=False)
return X_train, Y_train, X_test, Y_test, chunks_train
##############################################################################################
import classifiers.id_features_NERC as featNERC
def getStandart_NERC(data, feature_mapper):
BR_x_id = data.x_dict.get_label_id(BR)
n = 0
for sequence in data.seq_list:
for pos in range(2, len(sequence.x) - 1):
x = sequence.x[pos]
if x == BR_x_id:
continue
n += 1
n_features = feature_mapper.get_num_features()
row = []
col = []
values = []
Y = np.zeros(n, dtype=np.int_)
sample = 0
for sequence in data.seq_list:
for pos in range(2, len(sequence.x) - 1):
x = sequence.x[pos]
if x == BR_x_id:
continue
y_1 = sequence.y[pos - 1]
y_2 = sequence.y[pos - 2]
y = sequence.y[pos]
features = feature_mapper.get_features(sequence, pos, y_1, y_2)
row.extend([sample for i in range(len(features))])
col.extend(features)
Y[sample] = y
sample += 1
values = np.ones(len(row))
X = sparse.csr_matrix((values, (row, col)), shape=(n, n_features))
return X, Y
def toSeqList(data, seq_array):
ST_id = data.y_dict.get_label_id(START_TAG)
END_id = data.y_dict.get_label_id(END_TAG)
BR_id = data.y_dict.get_label_id(BR)
BR_x_id = data.x_dict.get_label_id(BR)
res = []
pos = 0
for sequence in data.seq_list:
y = [ST_id, ST_id]
for i in range(2, len(sequence.y) - 1):
id = sequence.y[i]
if id == BR_id:
y.append(BR_id)
else:
y.append(seq_array[pos])
pos += 1
y.append(END_id)
seq = sequence.copy_sequence()
seq.y = y
res.append(seq)
return res
| mit |
RiccardoPecora/MP | Lib/site-packages/numpy/doc/creation.py | 94 | 5411 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard
to convert are libraries like PIL (able to read and write many image formats
such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
| gpl-3.0 |
subutai/NAB | tests/integration/corpuslabel_test.py | 7 | 7661 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-15, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import os
import pandas
import tempfile
import shutil
import unittest
import nab.corpus
import nab.labeler
from nab.util import strp
from nab.test_helpers import writeCorpus, writeCorpusLabel, generateTimestamps
class CorpusLabelTest(unittest.TestCase):
def setUp(self):
self.tempDir = os.path.join(tempfile.mkdtemp(), "test")
self.tempCorpusPath = os.path.join(self.tempDir, "data")
self.tempCorpusLabelPath = os.path.join(
self.tempDir, "labels", "label.json")
def tearDown(self):
shutil.rmtree(self.tempDir)
def testWindowTimestampsNotInDataFileThrowsError(self):
"""
A ValueError should be thrown when label windows contain timestamps
that do no exist in the data file.
"""
data = pandas.DataFrame({"timestamp" :
generateTimestamps(strp("2014-01-01"), None, 1)})
windows = [["2015-01-01", "2015-01-01"]]
writeCorpus(self.tempCorpusPath, {"test_data_file.csv" : data})
writeCorpusLabel(self.tempCorpusLabelPath, {"test_data_file.csv": windows})
corpus = nab.corpus.Corpus(self.tempCorpusPath)
self.assertRaises(ValueError,
nab.labeler.CorpusLabel, self.tempCorpusLabelPath, corpus)
def testWindowTimestampsNonChronologicalThrowsError(self):
"""
A ValueError should be thrown when a label window's start and end
times are not in chronological order.
"""
data = pandas.DataFrame({"timestamp" :
generateTimestamps(strp("2014-01-01"),
datetime.timedelta(minutes=5), 10)})
# Windows both in and out of order
windows = [["2014-01-01 00:45", "2014-01-01 00:00"],
["2014-01-01 10:15", "2014-01-01 11:15"]]
writeCorpus(self.tempCorpusPath, {"test_data_file.csv" : data})
writeCorpusLabel(self.tempCorpusLabelPath, {"test_data_file.csv": windows})
corpus = nab.corpus.Corpus(self.tempCorpusPath)
self.assertRaises(
ValueError, nab.labeler.CorpusLabel, self.tempCorpusLabelPath, corpus)
def testRowsLabeledAnomalousWithinAWindow(self):
"""
All timestamps labeled as anomalous should be within a label window.
"""
data = pandas.DataFrame({"timestamp" :
generateTimestamps(strp("2014-01-01"),
datetime.timedelta(minutes=5), 10)})
windows = [["2014-01-01 00:15", "2014-01-01 00:30"]]
writeCorpus(self.tempCorpusPath, {"test_data_file.csv": data})
writeCorpusLabel(self.tempCorpusLabelPath, {"test_data_file.csv": windows})
corpus = nab.corpus.Corpus(self.tempCorpusPath)
corpusLabel = nab.labeler.CorpusLabel(self.tempCorpusLabelPath, corpus)
for relativePath, lab in corpusLabel.labels.iteritems():
windows = corpusLabel.windows[relativePath]
for row in lab[lab["label"] == 1].iterrows():
self.assertTrue(
all([w[0] <= row[1]["timestamp"] <= w[1] for w in windows]),
"The label at %s of file %s is not within a label window"
% (row[1]["timestamp"], relativePath))
def testNonexistentDatafileForLabelsThrowsError(self):
data = pandas.DataFrame({"timestamp" :
generateTimestamps(strp("2014-01-01"),
datetime.timedelta(minutes=5), 10)})
windows = [["2014-01-01 00:15", "2014-01-01 00:30"]]
writeCorpus(self.tempCorpusPath, {"test_data_file.csv": data})
writeCorpusLabel(self.tempCorpusLabelPath,
{"test_data_file.csv": windows, "non_existent_data_file.csv": windows})
corpus = nab.corpus.Corpus(self.tempCorpusPath)
self.assertRaises(
KeyError, nab.labeler.CorpusLabel, self.tempCorpusLabelPath, corpus)
def testGetLabels(self):
"""
Labels dictionary generated by CorpusLabel.getLabels() should match the
label windows.
"""
data = pandas.DataFrame({"timestamp" :
generateTimestamps(strp("2014-01-01"),
datetime.timedelta(minutes=5), 10)})
windows = [["2014-01-01 00:00", "2014-01-01 00:10"],
["2014-01-01 00:10", "2014-01-01 00:15"]]
writeCorpus(self.tempCorpusPath, {"test_data_file.csv" : data})
writeCorpusLabel(self.tempCorpusLabelPath, {"test_data_file.csv": windows})
corpus = nab.corpus.Corpus(self.tempCorpusPath)
corpusLabel = nab.labeler.CorpusLabel(self.tempCorpusLabelPath, corpus)
for relativePath, l in corpusLabel.labels.iteritems():
windows = corpusLabel.windows[relativePath]
for t, lab in corpusLabel.labels["test_data_file.csv"].values:
for w in windows:
if (w[0] <= t and t <= w[1]):
self.assertEqual(lab, 1,
"Incorrect label value for timestamp %r" % t)
def testRedundantTimestampsRaiseException(self):
data = pandas.DataFrame({"timestamp" :
generateTimestamps(strp("2015-01-01"),
datetime.timedelta(days=1), 365)})
dataFileName = "test_data_file.csv"
writeCorpus(self.tempCorpusPath, {dataFileName : data})
labels = ["2015-12-25 00:00:00",
"2015-12-26 00:00:00",
"2015-12-31 00:00:00"]
labelsDir = self.tempCorpusLabelPath.replace(
"/label.json", "/raw/label.json")
writeCorpusLabel(labelsDir, {dataFileName: labels})
corpus = nab.corpus.Corpus(self.tempCorpusPath)
labDir = labelsDir.replace("/label.json", "")
labelCombiner = nab.labeler.LabelCombiner(
labDir, corpus, 0.5, 0.10, 0.15, 0)
self.assertRaises(ValueError, labelCombiner.combine)
def testBucketMerge(self):
data = pandas.DataFrame({"timestamp" :
generateTimestamps(strp("2015-12-01"),
datetime.timedelta(days=1), 31)})
dataFileName = "test_data_file.csv"
writeCorpus(self.tempCorpusPath, {dataFileName : data})
rawLabels = (["2015-12-24 00:00:00",
"2015-12-31 00:00:00"],
["2015-12-01 00:00:00",
"2015-12-25 00:00:00",
"2015-12-31 00:00:00"],
["2015-12-25 00:00:00"])
for i, labels in enumerate(rawLabels):
labelsPath = self.tempCorpusLabelPath.replace(
os.path.sep+"label.json", os.path.sep+"raw"+os.path.sep+"label{}.json".format(i))
writeCorpusLabel(labelsPath, {"test_data_file.csv": labels})
labelsDir = labelsPath.replace(os.path.sep+"label{}.json".format(i), "")
corpus = nab.corpus.Corpus(self.tempCorpusPath)
labelCombiner = nab.labeler.LabelCombiner(
labelsDir, corpus, 0.5, 0.10, 0.15, 0)
labelCombiner.getRawLabels()
labelTimestamps, _ = labelCombiner.combineLabels()
expectedLabels = ['2015-12-25 00:00:00', '2015-12-31 00:00:00']
self.assertEqual(expectedLabels, labelTimestamps[dataFileName],
"The combined labels did not bucket and merge as expected.")
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gdk.py | 69 | 15968 | from __future__ import division
import math
import os
import sys
import warnings
def fn_name(): return sys._getframe(1).f_code.co_name
import gobject
import gtk; gdk = gtk.gdk
import pango
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import numpy as npy
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D
from matplotlib.backends._backend_gdk import pixbuf_get_pixels_array
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
# Image formats that this backend supports - for FileChooser and print_figure()
IMAGE_FORMAT = ['eps', 'jpg', 'png', 'ps', 'svg'] + ['bmp'] # , 'raw', 'rgb']
IMAGE_FORMAT.sort()
IMAGE_FORMAT_DEFAULT = 'png'
class RendererGDK(RendererBase):
fontweights = {
100 : pango.WEIGHT_ULTRALIGHT,
200 : pango.WEIGHT_LIGHT,
300 : pango.WEIGHT_LIGHT,
400 : pango.WEIGHT_NORMAL,
500 : pango.WEIGHT_NORMAL,
600 : pango.WEIGHT_BOLD,
700 : pango.WEIGHT_BOLD,
800 : pango.WEIGHT_HEAVY,
900 : pango.WEIGHT_ULTRABOLD,
'ultralight' : pango.WEIGHT_ULTRALIGHT,
'light' : pango.WEIGHT_LIGHT,
'normal' : pango.WEIGHT_NORMAL,
'medium' : pango.WEIGHT_NORMAL,
'semibold' : pango.WEIGHT_BOLD,
'bold' : pango.WEIGHT_BOLD,
'heavy' : pango.WEIGHT_HEAVY,
'ultrabold' : pango.WEIGHT_ULTRABOLD,
'black' : pango.WEIGHT_ULTRABOLD,
}
# cache for efficiency, these must be at class, not instance level
layoutd = {} # a map from text prop tups to pango layouts
rotated = {} # a map from text prop tups to rotated text pixbufs
def __init__(self, gtkDA, dpi):
# widget gtkDA is used for:
# '<widget>.create_pango_layout(s)'
# cmap line below)
self.gtkDA = gtkDA
self.dpi = dpi
self._cmap = gtkDA.get_colormap()
self.mathtext_parser = MathTextParser("Agg")
def set_pixmap (self, pixmap):
self.gdkDrawable = pixmap
def set_width_height (self, width, height):
"""w,h is the figure w,h not the pixmap w,h
"""
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
transform = transform + Affine2D(). \
scale(1.0, -1.0).translate(0, self.height)
polygons = path.to_polygons(transform, self.width, self.height)
for polygon in polygons:
# draw_polygon won't take an arbitrary sequence -- it must be a list
# of tuples
polygon = [(int(round(x)), int(round(y))) for x, y in polygon]
if rgbFace is not None:
saveColor = gc.gdkGC.foreground
gc.gdkGC.foreground = gc.rgb_to_gdk_color(rgbFace)
self.gdkDrawable.draw_polygon(gc.gdkGC, True, polygon)
gc.gdkGC.foreground = saveColor
if gc.gdkGC.line_width > 0:
self.gdkDrawable.draw_lines(gc.gdkGC, polygon)
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
if bbox != None:
l,b,w,h = bbox.bounds
#rectangle = (int(l), self.height-int(b+h),
# int(w), int(h))
# set clip rect?
im.flipud_out()
rows, cols, image_str = im.as_rgba_str()
image_array = npy.fromstring(image_str, npy.uint8)
image_array.shape = rows, cols, 4
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
has_alpha=True, bits_per_sample=8,
width=cols, height=rows)
array = pixbuf_get_pixels_array(pixbuf)
array[:,:,:] = image_array
gc = self.new_gc()
y = self.height-y-rows
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
# unflip
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath):
x, y = int(x), int(y)
if x <0 or y <0: # window has shrunk and text is off the edge
return
if angle not in (0,90):
warnings.warn('backend_gdk: unable to draw text at angles ' +
'other than 0 or 90')
elif ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
elif angle==90:
self._draw_rotated_text(gc, x, y, s, prop, angle)
else:
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
self.gdkDrawable.draw_layout(gc.gdkGC, x, y-h-b, layout)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
if angle==90:
width, height = height, width
x -= width
y -= height
imw = font_image.get_width()
imh = font_image.get_height()
N = imw * imh
# a numpixels by num fonts array
Xall = npy.zeros((N,1), npy.uint8)
image_str = font_image.as_str()
Xall[:,0] = npy.fromstring(image_str, npy.uint8)
# get the max alpha at each pixel
Xs = npy.amax(Xall,axis=1)
# convert it to it's proper shape
Xs.shape = imh, imw
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, has_alpha=True,
bits_per_sample=8, width=imw, height=imh)
array = pixbuf_get_pixels_array(pixbuf)
rgb = gc.get_rgb()
array[:,:,0]=int(rgb[0]*255)
array[:,:,1]=int(rgb[1]*255)
array[:,:,2]=int(rgb[2]*255)
array[:,:,3]=Xs
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
def _draw_rotated_text(self, gc, x, y, s, prop, angle):
"""
Draw the text rotated 90 degrees, other angles are not supported
"""
# this function (and its called functions) is a bottleneck
# Pango 1.6 supports rotated text, but pygtk 2.4.0 does not yet have
# wrapper functions
# GTK+ 2.6 pixbufs support rotation
gdrawable = self.gdkDrawable
ggc = gc.gdkGC
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
x = int(x-h)
y = int(y-w)
if x < 0 or y < 0: # window has shrunk and text is off the edge
return
key = (x,y,s,angle,hash(prop))
imageVert = self.rotated.get(key)
if imageVert != None:
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
return
imageBack = gdrawable.get_image(x, y, w, h)
imageVert = gdrawable.get_image(x, y, h, w)
imageFlip = gtk.gdk.Image(type=gdk.IMAGE_FASTEST,
visual=gdrawable.get_visual(),
width=w, height=h)
if imageFlip == None or imageBack == None or imageVert == None:
warnings.warn("Could not renderer vertical text")
return
imageFlip.set_colormap(self._cmap)
for i in range(w):
for j in range(h):
imageFlip.put_pixel(i, j, imageVert.get_pixel(j,w-i-1) )
gdrawable.draw_image(ggc, imageFlip, 0, 0, x, y, w, h)
gdrawable.draw_layout(ggc, x, y-b, layout)
imageIn = gdrawable.get_image(x, y, w, h)
for i in range(w):
for j in range(h):
imageVert.put_pixel(j, i, imageIn.get_pixel(w-i-1,j) )
gdrawable.draw_image(ggc, imageBack, 0, 0, x, y, w, h)
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
self.rotated[key] = imageVert
def _get_pango_layout(self, s, prop):
"""
Create a pango layout instance for Text 's' with properties 'prop'.
Return - pango layout (from cache if already exists)
Note that pango assumes a logical DPI of 96
Ref: pango/fonts.c/pango_font_description_set_size() manual page
"""
# problem? - cache gets bigger and bigger, is never cleared out
# two (not one) layouts are created for every text item s (then they
# are cached) - why?
key = self.dpi, s, hash(prop)
value = self.layoutd.get(key)
if value != None:
return value
size = prop.get_size_in_points() * self.dpi / 96.0
size = round(size)
font_str = '%s, %s %i' % (prop.get_name(), prop.get_style(), size,)
font = pango.FontDescription(font_str)
# later - add fontweight to font_str
font.set_weight(self.fontweights[prop.get_weight()])
layout = self.gtkDA.create_pango_layout(s)
layout.set_font_description(font)
inkRect, logicalRect = layout.get_pixel_extents()
self.layoutd[key] = layout, inkRect, logicalRect
return layout, inkRect, logicalRect
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if ismath:
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
return w, h+1, h + 1
def new_gc(self):
return GraphicsContextGDK(renderer=self)
def points_to_pixels(self, points):
return points/72.0 * self.dpi
class GraphicsContextGDK(GraphicsContextBase):
# a cache shared by all class instances
_cached = {} # map: rgb color -> gdk.Color
_joind = {
'bevel' : gdk.JOIN_BEVEL,
'miter' : gdk.JOIN_MITER,
'round' : gdk.JOIN_ROUND,
}
_capd = {
'butt' : gdk.CAP_BUTT,
'projecting' : gdk.CAP_PROJECTING,
'round' : gdk.CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.gdkGC = gtk.gdk.GC(renderer.gdkDrawable)
self._cmap = renderer._cmap
def rgb_to_gdk_color(self, rgb):
"""
rgb - an RGB tuple (three 0.0-1.0 values)
return an allocated gtk.gdk.Color
"""
try:
return self._cached[tuple(rgb)]
except KeyError:
color = self._cached[tuple(rgb)] = \
self._cmap.alloc_color(
int(rgb[0]*65535),int(rgb[1]*65535),int(rgb[2]*65535))
return color
#def set_antialiased(self, b):
# anti-aliasing is not supported by GDK
def set_capstyle(self, cs):
GraphicsContextBase.set_capstyle(self, cs)
self.gdkGC.cap_style = self._capd[self._capstyle]
def set_clip_rectangle(self, rectangle):
GraphicsContextBase.set_clip_rectangle(self, rectangle)
if rectangle is None:
return
l,b,w,h = rectangle.bounds
rectangle = (int(l), self.renderer.height-int(b+h)+1,
int(w), int(h))
#rectangle = (int(l), self.renderer.height-int(b+h),
# int(w+1), int(h+2))
self.gdkGC.set_clip_rectangle(rectangle)
def set_dashes(self, dash_offset, dash_list):
GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
if dash_list == None:
self.gdkGC.line_style = gdk.LINE_SOLID
else:
pixels = self.renderer.points_to_pixels(npy.asarray(dash_list))
dl = [max(1, int(round(val))) for val in pixels]
self.gdkGC.set_dashes(dash_offset, dl)
self.gdkGC.line_style = gdk.LINE_ON_OFF_DASH
def set_foreground(self, fg, isRGB=False):
GraphicsContextBase.set_foreground(self, fg, isRGB)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_joinstyle(self, js):
GraphicsContextBase.set_joinstyle(self, js)
self.gdkGC.join_style = self._joind[self._joinstyle]
def set_linewidth(self, w):
GraphicsContextBase.set_linewidth(self, w)
if w == 0:
self.gdkGC.line_width = 0
else:
pixels = self.renderer.points_to_pixels(w)
self.gdkGC.line_width = max(1, int(round(pixels)))
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGDK(thisFig)
manager = FigureManagerBase(canvas, num)
# equals:
#manager = FigureManagerBase (FigureCanvasGDK (Figure(*args, **kwargs),
# num)
return manager
class FigureCanvasGDK (FigureCanvasBase):
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
self._renderer_init()
def _renderer_init(self):
self._renderer = RendererGDK (gtk.DrawingArea(), self.figure.dpi)
def _render_figure(self, pixmap, width, height):
self._renderer.set_pixmap (pixmap)
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
width, height = self.get_width_height()
pixmap = gtk.gdk.Pixmap (None, width, height, depth=24)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8,
width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
pixbuf.save(filename, format)
def get_default_filetype(self):
return 'png'
| agpl-3.0 |
jtwhite79/pyemu | pyemu/mc.py | 1 | 11486 | """pyEMU Monte Carlo module. Supports easy Monte Carlo
and GLUE analyses. The MonteCarlo class inherits from
pyemu.LinearAnalysis
"""
from __future__ import print_function, division
import os
import numpy as np
import warnings
from pyemu.la import LinearAnalysis
from pyemu.en import ObservationEnsemble, ParameterEnsemble
from pyemu.mat import Cov
from .pyemu_warnings import PyemuWarning
# from pyemu.utils.helpers import zero_order_tikhonov
class MonteCarlo(LinearAnalysis):
"""LinearAnalysis derived type for monte carlo analysis
Parameters
----------
**kwargs : dict
dictionary of keyword arguments. See pyemu.LinearAnalysis for
complete definitions
Attributes
----------
parensemble : pyemu.ParameterEnsemble
pyemu object derived from a pandas dataframe, the ensemble
of parameters from the PEST control file with associated
starting value and bounds. Object also exposes methods
relevant to the dataframe and parameters-- see documentation.
obsensemble : pyemu.ObservationEnsemble
pyemu object derived from a pandas dataframe, the ensemble
of observations from the PEST control file with associated
starting weights. Object also exposes methods
relevant to the dataframe and observations-- see documentation.
Returns
-------
MonteCarlo
pyEMU MonteCarlo object
Example
-------
``>>>import pyemu``
``>>>mc = pyemu.MonteCarlo(pst="pest.pst")``
"""
def __init__(self, **kwargs):
warnings.warn(
"pyemu.MonteCarlo class is deprecated. "
+ "Please use the ensemble classes directly",
PyemuWarning,
)
super(MonteCarlo, self).__init__(**kwargs)
assert self.pst is not None, "monte carlo requires a pest control file"
self.parensemble = ParameterEnsemble(pst=self.pst)
self.obsensemble = ObservationEnsemble(pst=self.pst)
@property
def num_reals(self):
"""get the number of realizations in the parameter ensemble
Returns
-------
num_real : int
"""
return self.parensemble.shape[0]
def get_nsing(self, epsilon=1.0e-4):
"""get the number of solution space dimensions given
a ratio between the largest and smallest singular values
Parameters
----------
epsilon: float
singular value ratio
Returns
-------
nsing : float
number of singular components above the epsilon ratio threshold
Note
-----
If nsing == nadj_par, then None is returned
"""
mx = self.xtqx.shape[0]
nsing = mx - np.searchsorted(
np.sort((self.xtqx.s.x / self.xtqx.s.x.max())[:, 0]), epsilon
)
if nsing == mx:
self.logger.warn("optimal nsing=npar")
nsing = None
return nsing
def get_null_proj(self, nsing=None):
"""get a null-space projection matrix of XTQX
Parameters
----------
nsing: int
optional number of singular components to use
If Nonte, then nsing is determined from
call to MonteCarlo.get_nsing()
Returns
-------
v2_proj : pyemu.Matrix
the null-space projection matrix (V2V2^T)
"""
if nsing is None:
nsing = self.get_nsing()
if nsing is None:
raise Exception("nsing is None")
print("using {0} singular components".format(nsing))
self.log(
"forming null space projection matrix with "
+ "{0} of {1} singular components".format(nsing, self.jco.shape[1])
)
v2_proj = self.xtqx.v[:, nsing:] * self.xtqx.v[:, nsing:].T
self.log(
"forming null space projection matrix with "
+ "{0} of {1} singular components".format(nsing, self.jco.shape[1])
)
return v2_proj
def draw(
self,
num_reals=1,
par_file=None,
obs=False,
enforce_bounds=None,
cov=None,
how="gaussian",
):
"""draw stochastic realizations of parameters and
optionally observations, filling MonteCarlo.parensemble and
optionally MonteCarlo.obsensemble.
Parameters
----------
num_reals : int
number of realization to generate
par_file : str
parameter file to use as mean values. If None,
use MonteCarlo.pst.parameter_data.parval1.
Default is None
obs : bool
add a realization of measurement noise to observation values,
forming MonteCarlo.obsensemble.Default is False
enforce_bounds : str
enforce parameter bounds based on control file information.
options are 'reset', 'drop' or None. Default is None
how : str
type of distribution to draw from. Must be in ["gaussian","uniform"]
default is "gaussian".
Example
-------
``>>>import pyemu``
``>>>mc = pyemu.MonteCarlo(pst="pest.pst")``
``>>>mc.draw(1000)``
"""
if par_file is not None:
self.pst.parrep(par_file)
how = how.lower().strip()
assert how in ["gaussian", "uniform"]
if cov is not None:
assert isinstance(cov, Cov)
if how == "uniform":
raise Exception(
"MonteCarlo.draw() error: 'how'='uniform',"
+ " 'cov' arg cannot be passed"
)
else:
cov = self.parcov
self.log("generating {0:d} parameter realizations".format(num_reals))
if how == "gaussian":
self.parensemble = ParameterEnsemble.from_gaussian_draw(
pst=self.pst,
cov=cov,
num_reals=num_reals,
use_homegrown=True,
enforce_bounds=False,
)
elif how == "uniform":
self.parensemble = ParameterEnsemble.from_uniform_draw(
pst=self.pst, num_reals=num_reals
)
else:
raise Exception(
"MonteCarlo.draw(): unrecognized 'how' arg: {0}".format(how)
)
# self.parensemble = ParameterEnsemble(pst=self.pst)
# self.obsensemble = ObservationEnsemble(pst=self.pst)
# self.parensemble.draw(cov,num_reals=num_reals, how=how,
# enforce_bounds=enforce_bounds)
if enforce_bounds is not None:
self.parensemble.enforce(enforce_bounds)
self.log("generating {0:d} parameter realizations".format(num_reals))
if obs:
self.log("generating {0:d} observation realizations".format(num_reals))
self.obsensemble = ObservationEnsemble.from_id_gaussian_draw(
pst=self.pst, num_reals=num_reals
)
self.log("generating {0:d} observation realizations".format(num_reals))
def project_parensemble(
self, par_file=None, nsing=None, inplace=True, enforce_bounds="reset"
):
"""perform the null-space projection operations for null-space monte carlo
Parameters
----------
par_file: str
an optional file of parameter values to use
nsing: int
number of singular values to in forming null subspace matrix
inplace: bool
overwrite the existing parameter ensemble with the
projected values
enforce_bounds: str
how to enforce parameter bounds. can be None, 'reset', or 'drop'.
Default is None
Returns
-------
par_en : pyemu.ParameterEnsemble
if inplace is False, otherwise None
Note
----
to use this method, the MonteCarlo instance must have been constructed
with the ``jco`` argument.
Example
-------
``>>>import pyemu``
``>>>mc = pyemu.MonteCarlo(jco="pest.jcb")``
``>>>mc.draw(1000)``
``>>>mc.project_parensemble(par_file="final.par",nsing=100)``
"""
assert self.jco is not None, (
"MonteCarlo.project_parensemble()" + "requires a jacobian attribute"
)
if par_file is not None:
assert os.path.exists(par_file), (
"monte_carlo.draw() error: par_file not found:" + par_file
)
self.parensemble.pst.parrep(par_file)
# project the ensemble
self.log("projecting parameter ensemble")
en = self.parensemble.project(
self.get_null_proj(nsing), inplace=inplace, log=self.log
)
self.log("projecting parameter ensemble")
return en
def write_psts(self, prefix, existing_jco=None, noptmax=None):
"""write parameter and optionally observation realizations
to a series of pest control files
Parameters
----------
prefix: str
pest control file prefix
existing_jco: str
filename of an existing jacobian matrix to add to the
pest++ options in the control file. This is useful for
NSMC since this jco can be used to get the first set of
parameter upgrades for free! Needs to be the path the jco
file as seen from the location where pest++ will be run
noptmax: int
value of NOPTMAX to set in new pest control files
Example
-------
``>>>import pyemu``
``>>>mc = pyemu.MonteCarlo(jco="pest.jcb")``
``>>>mc.draw(1000, obs=True)``
``>>>mc.write_psts("mc_", existing_jco="pest.jcb", noptmax=1)``
"""
self.log("writing realized pest control files")
# get a copy of the pest control file
pst = self.pst.get(par_names=self.pst.par_names, obs_names=self.pst.obs_names)
if noptmax is not None:
pst.control_data.noptmax = noptmax
pst.control_data.noptmax = noptmax
if existing_jco is not None:
pst.pestpp_options["BASE_JACOBIAN"] = existing_jco
# set the indices
pst.parameter_data.index = pst.parameter_data.parnme
pst.observation_data.index = pst.observation_data.obsnme
if self.parensemble.istransformed:
par_en = self.parensemble._back_transform(inplace=False)
else:
par_en = self.parensemble
for i in range(self.num_reals):
pst_name = prefix + "{0:d}.pst".format(i)
self.log("writing realized pest control file " + pst_name)
pst.parameter_data.loc[par_en.columns, "parval1"] = par_en.iloc[i, :].T
# reset the regularization
# if pst.control_data.pestmode == "regularization":
# pst.zero_order_tikhonov(parbounds=True)
# zero_order_tikhonov(pst,parbounds=True)
# add the obs noise realization if needed
if self.obsensemble.shape[0] == self.num_reals:
pst.observation_data.loc[
self.obsensemble.columns, "obsval"
] = self.obsensemble.iloc[i, :].T
# write
pst.write(pst_name)
self.log("writing realized pest control file " + pst_name)
self.log("writing realized pest control files")
| bsd-3-clause |
lazywei/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
gargleblaster/trading-with-python | lib/cboe.py | 76 | 4433 | # -*- coding: utf-8 -*-
"""
toolset working with cboe data
@author: Jev Kuznetsov
Licence: BSD
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index
from pandas.core import datetools
import numpy as np
import pandas as pd
def monthCode(month):
"""
perform month->code and back conversion
Input: either month nr (int) or month code (str)
Returns: code or month nr
"""
codes = ('F','G','H','J','K','M','N','Q','U','V','X','Z')
if isinstance(month,int):
return codes[month-1]
elif isinstance(month,str):
return codes.index(month)+1
else:
raise ValueError('Function accepts int or str')
def vixExpiration(year,month):
"""
expriration date of a VX future
"""
t = datetime(year,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_exp = t_new-datetools.relativedelta(days=30)
return t_exp
def getPutCallRatio():
""" download current Put/Call ratio"""
urlStr = 'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/totalpc.csv'
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
headerLine = 2
header = lines[headerLine].strip().split(',')
data = [[] for i in range(len(header))]
for line in lines[(headerLine+1):]:
fields = line.rstrip().split(',')
data[0].append(datetime.strptime(fields[0],'%m/%d/%Y'))
for i,field in enumerate(fields[1:]):
data[i+1].append(float(field))
return DataFrame(dict(zip(header[1:],data[1:])), index = Index(data[0]))
def getHistoricData(symbols = ['VIX','VXV','VXMT','VVIX']):
''' get historic data from CBOE
return dataframe
'''
if not isinstance(symbols,list):
symbols = [symbols]
urls = {'VIX':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vixcurrent.csv',
'VXV':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/vxvdailyprices.csv',
'VXMT':'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/vxmtdailyprices.csv',
'VVIX':'http://www.cboe.com/publish/scheduledtask/mktdata/datahouse/VVIXtimeseries.csv'}
startLines = {'VIX':1,'VXV':2,'VXMT':2,'VVIX':1}
cols = {'VIX':'VIX Close','VXV':'CLOSE','VXMT':'Close','VVIX':'VVIX'}
data = {}
for symbol in symbols:
urlStr = urls[symbol]
print 'Downloading %s from %s' % (symbol,urlStr)
data[symbol] = pd.read_csv(urllib2.urlopen(urlStr), header=startLines[symbol],index_col=0,parse_dates=True)[cols[symbol]]
return pd.DataFrame(data)
#---------------------classes--------------------------------------------
class VixFuture(object):
"""
Class for easy handling of futures data.
"""
def __init__(self,year,month):
self.year = year
self.month = month
def expirationDate(self):
return vixExpiration(self.year,self.month)
def daysLeft(self,date):
""" business days to expiration date """
from pandas import DateRange # this will cause a problem with pandas 0.14 and higher... Method is depreciated and replaced by DatetimeIndex
r = DateRange(date,self.expirationDate())
return len(r)
def __repr__(self):
return 'VX future [%i-%i %s] Exprires: %s' % (self.year,self.month,monthCode(self.month),
self.expirationDate())
#-------------------test functions---------------------------------------
def testDownload():
vix = getHistoricData('VIX')
vxv = getHistoricData('VXV')
vix.plot()
vxv.plot()
def testExpiration():
for month in xrange(1,13):
d = vixExpiration(2011,month)
print d.strftime("%B, %d %Y (%A)")
if __name__ == '__main__':
#testExpiration()
v = VixFuture(2011,11)
print v
print v.daysLeft(datetime(2011,11,10))
| bsd-3-clause |
flightgong/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 43 | 1791 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
steffengraber/nest-simulator | pynest/nest/voltage_trace.py | 18 | 7432 | # -*- coding: utf-8 -*-
#
# voltage_trace.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions to plot voltage traces.
"""
import nest
import numpy
__all__ = [
'from_device',
'from_file',
]
def from_file(fname, title=None, grayscale=False):
"""Plot voltage trace from file.
Parameters
----------
fname : str or list
Filename or list of filenames to load from
title : str, optional
Plot title
grayscale : bool, optional
Plot in grayscale
Raises
------
ValueError
"""
import matplotlib.pyplot as plt
if isinstance(fname, (list, tuple)):
data = None
for f in fname:
if data is None:
data = numpy.loadtxt(f)
else:
data = numpy.concatenate((data, numpy.loadtxt(f)))
else:
data = numpy.loadtxt(fname)
if grayscale:
line_style = "k"
else:
line_style = ""
if len(data.shape) == 1:
print("INFO: only found 1 column in the file. \
Assuming that only one neuron was recorded.")
plotid = plt.plot(data, line_style)
plt.xlabel("Time (steps of length interval)")
elif data.shape[1] == 2:
print("INFO: found 2 columns in the file. Assuming \
them to be node ID, pot.")
plotid = []
data_dict = {}
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[1]]
else:
data_dict[d[0]].append(d[1])
for d in data_dict:
plotid.append(
plt.plot(data_dict[d], line_style, label="Neuron %i" % d)
)
plt.xlabel("Time (steps of length interval)")
plt.legend()
elif data.shape[1] == 3:
plotid = []
data_dict = {}
g = data[0][0]
t = []
for d in data:
if not d[0] in data_dict:
data_dict[d[0]] = [d[2]]
else:
data_dict[d[0]].append(d[2])
if d[0] == g:
t.append(d[1])
for d in data_dict:
plotid.append(
plt.plot(t, data_dict[d], line_style, label="Neuron %i" % d)
)
plt.xlabel("Time (ms)")
plt.legend()
else:
raise ValueError("Inappropriate data shape %i!" % data.shape)
if not title:
title = "Membrane potential from file '%s'" % fname
plt.title(title)
plt.ylabel("Membrane potential (mV)")
plt.draw()
return plotid
def from_device(detec, neurons=None, title=None, grayscale=False,
timeunit="ms"):
"""Plot the membrane potential of a set of neurons recorded by
the given voltmeter or multimeter.
Parameters
----------
detec : list
Global id of voltmeter or multimeter in a list, e.g. [1]
neurons : list, optional
Indices of of neurons to plot
title : str, optional
Plot title
grayscale : bool, optional
Plot in grayscale
timeunit : str, optional
Unit of time
Raises
------
nest.kernel.NESTError
Description
"""
import matplotlib.pyplot as plt
if len(detec) > 1:
raise nest.kernel.NESTError("Please provide a single voltmeter.")
type_id = nest.GetDefaults(detec.get('model'), 'type_id')
if type_id not in ('voltmeter', 'multimeter'):
raise nest.kernel.NESTError("Please provide a voltmeter or a \
multimeter measuring V_m.")
elif type_id == 'multimeter':
if "V_m" not in detec.get("record_from"):
raise nest.kernel.NESTError("Please provide a multimeter \
measuring V_m.")
elif (not detec.get("record_to") == "memory" and
len(detec.get("record_from")) > 1):
raise nest.kernel.NESTError("Please provide a multimeter \
measuring only V_m or record to memory!")
if detec.get("record_to") == "memory":
timefactor = 1.0
if not detec.get('time_in_steps'):
if timeunit == "s":
timefactor = 1000.0
else:
timeunit = "ms"
times, voltages = _from_memory(detec)
if not len(times):
raise nest.NESTError("No events recorded!")
if neurons is None:
neurons = voltages.keys()
plotids = []
for neuron in neurons:
time_values = numpy.array(times[neuron]) / timefactor
if grayscale:
line_style = "k"
else:
line_style = ""
try:
plotids.append(
plt.plot(time_values, voltages[neuron],
line_style, label="Neuron %i" % neuron)
)
except KeyError:
print("INFO: Wrong ID: {0}".format(neuron))
if not title:
title = "Membrane potential"
plt.title(title)
plt.ylabel("Membrane potential (mV)")
if nest.GetStatus(detec)[0]['time_in_steps']:
plt.xlabel("Steps")
else:
plt.xlabel("Time (%s)" % timeunit)
plt.legend(loc="best")
plt.draw()
return plotids
elif detec.get("record_to") == "ascii":
fname = detec.get("filenames")
return from_file(fname, title, grayscale)
else:
raise nest.kernel.NESTError("Provided devices neither record to \
ascii file, nor to memory.")
def _from_memory(detec):
"""Get voltage traces from memory.
----------
detec : list
Global id of voltmeter or multimeter
"""
import array
ev = detec.get('events')
potentials = ev['V_m']
senders = ev['senders']
v = {}
t = {}
if 'times' in ev:
times = ev['times']
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = array.array('f')
v[currentsender].append(float(potentials[s]))
t[currentsender].append(float(times[s]))
else:
# reconstruct the time vector, if not stored explicitly
origin = detec.get('origin')
start = detec.get('start')
interval = detec.get('interval')
senders_uniq = numpy.unique(senders)
num_intvls = len(senders) / len(senders_uniq)
times_s = origin + start + interval + \
interval * numpy.array(range(num_intvls))
for s, currentsender in enumerate(senders):
if currentsender not in v:
v[currentsender] = array.array('f')
t[currentsender] = times_s
v[currentsender].append(float(potentials[s]))
return t, v
| gpl-2.0 |
DGrady/pandas | doc/source/conf.py | 5 | 19809 | # -*- coding: utf-8 -*-
#
# pandas documentation build configuration file, created by
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
import inspect
import importlib
from pandas.compat import u, PY3
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
# https://github.com/sphinx-doc/sphinx/pull/2325/files
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000.
sys.setrecursionlimit(5000)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.extend([
# numpy standard doc extensions
os.path.join(os.path.dirname(__file__),
'..', '../..',
'sphinxext')
])
# -- General configuration -----------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sphinxext.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'numpydoc', # used to parse numpy-style docstrings for autodoc
'ipython_sphinxext.ipython_directive',
'ipython_sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_console_highlighting', # lowercase didn't work
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode',
'nbsphinx',
]
exclude_patterns = ['**.ipynb_checkpoints']
with open("index.rst") as f:
index_rst_lines = f.readlines()
# only include the slow autosummary feature if we're building the API section
# of the docs
# JP: added from sphinxdocs
autosummary_generate = False
if any([re.match("\s*api\s*", l) for l in index_rst_lines]):
autosummary_generate = True
files_to_delete = []
for f in os.listdir(os.path.dirname(__file__)):
if (not f.endswith(('.ipynb', '.rst')) or
f.startswith('.') or os.path.basename(f) == 'index.rst'):
continue
_file_basename = os.path.splitext(f)[0]
_regex_to_match = "\s*{}\s*$".format(_file_basename)
if not any([re.match(_regex_to_match, line) for line in index_rst_lines]):
files_to_delete.append(f)
if files_to_delete:
print("I'm about to DELETE the following:\n%s\n" % list(sorted(files_to_delete)))
sys.stdout.write("WARNING: I'd like to delete those to speed up processing (yes/no)? ")
if PY3:
answer = input()
else:
answer = raw_input()
if answer.lower().strip() in ('y','yes'):
for f in files_to_delete:
f = os.path.join(os.path.join(os.path.dirname(__file__),f))
f= os.path.abspath(f)
try:
print("Deleting %s" % f)
os.unlink(f)
except:
print("Error deleting %s" % f)
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('pandas')
copyright = u('2008-2014, the pandas development team')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pandas
# version = '%s r%s' % (pandas.__version__, svn_version())
version = '%s' % (pandas.__version__)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature_with_gtoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'statsmodels.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# Add redirect for previously existing API pages
# each item is like `(from_old, to_new)`
# To redirect a class and all its methods, see below
# https://github.com/pandas-dev/pandas/issues/16186
moved_api_pages = [
('pandas.core.common.isnull', 'pandas.isna'),
('pandas.core.common.notnull', 'pandas.notna'),
('pandas.core.reshape.get_dummies', 'pandas.get_dummies'),
('pandas.tools.merge.concat', 'pandas.concat'),
('pandas.tools.merge.merge', 'pandas.merge'),
('pandas.tools.pivot.pivot_table', 'pandas.pivot_table'),
('pandas.tseries.tools.to_datetime', 'pandas.to_datetime'),
('pandas.io.clipboard.read_clipboard', 'pandas.read_clipboard'),
('pandas.io.excel.ExcelFile.parse', 'pandas.ExcelFile.parse'),
('pandas.io.excel.read_excel', 'pandas.read_excel'),
('pandas.io.gbq.read_gbq', 'pandas.read_gbq'),
('pandas.io.html.read_html', 'pandas.read_html'),
('pandas.io.json.read_json', 'pandas.read_json'),
('pandas.io.parsers.read_csv', 'pandas.read_csv'),
('pandas.io.parsers.read_fwf', 'pandas.read_fwf'),
('pandas.io.parsers.read_table', 'pandas.read_table'),
('pandas.io.pickle.read_pickle', 'pandas.read_pickle'),
('pandas.io.pytables.HDFStore.append', 'pandas.HDFStore.append'),
('pandas.io.pytables.HDFStore.get', 'pandas.HDFStore.get'),
('pandas.io.pytables.HDFStore.put', 'pandas.HDFStore.put'),
('pandas.io.pytables.HDFStore.select', 'pandas.HDFStore.select'),
('pandas.io.pytables.read_hdf', 'pandas.read_hdf'),
('pandas.io.sql.read_sql', 'pandas.read_sql'),
('pandas.io.sql.read_frame', 'pandas.read_frame'),
('pandas.io.sql.write_frame', 'pandas.write_frame'),
('pandas.io.stata.read_stata', 'pandas.read_stata'),
]
# Again, tuples of (from_old, to_new)
moved_classes = [
('pandas.tseries.resample.Resampler', 'pandas.core.resample.Resampler'),
('pandas.formats.style.Styler', 'pandas.io.formats.style.Styler'),
]
for old, new in moved_classes:
# the class itself...
moved_api_pages.append((old, new))
mod, classname = new.rsplit('.', 1)
klass = getattr(importlib.import_module(mod), classname)
methods = [x for x in dir(klass)
if not x.startswith('_') or x in ('__iter__', '__array__')]
for method in methods:
# ... and each of its public methods
moved_api_pages.append(
("{old}.{method}".format(old=old, method=method),
"{new}.{method}".format(new=new, method=method))
)
html_additional_pages = {
'generated/' + page[0]: 'api_redirect.html'
for page in moved_api_pages
}
html_context = {
'redirects': {old: new for old, new in moved_api_pages}
}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas'
# -- Options for nbsphinx ------------------------------------------------
nbsphinx_allow_errors = True
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pandas.tex',
u('pandas: powerful Python data analysis toolkit'),
u('Wes McKinney\n\& PyData Development Team'), 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'statsmodels': ('http://www.statsmodels.org/devel/', None),
'matplotlib': ('http://matplotlib.org/', None),
'python': ('http://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'py': ('https://pylib.readthedocs.io/en/latest/', None)
}
import glob
autosummary_generate = glob.glob("*.rst")
# extlinks alias
extlinks = {'issue': ('https://github.com/pandas-dev/pandas/issues/%s',
'GH'),
'wiki': ('https://github.com/pandas-dev/pandas/wiki/%s',
'wiki ')}
ipython_exec_lines = [
'import numpy as np',
'import pandas as pd',
# This ensures correct rendering on system with console encoding != utf8
# (windows). It forces pandas to encode its output reprs using utf8
# whereever the docs are built. The docs' target is the browser, not
# the console, so this is fine.
'pd.options.display.encoding="utf8"'
]
# Add custom Documenter to handle attributes/methods of an AccessorProperty
# eg pandas.Series.str and pandas.Series.dt (see GH9322)
import sphinx
from sphinx.util import rpartition
from sphinx.ext.autodoc import Documenter, MethodDocumenter, AttributeDocumenter
from sphinx.ext.autosummary import Autosummary
class AccessorDocumenter(MethodDocumenter):
"""
Specialized Documenter subclass for accessors.
"""
objtype = 'accessor'
directivetype = 'method'
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
def format_signature(self):
# this method gives an error/warning for the accessors, therefore
# overriding it (accessor has no arguments)
return ''
class AccessorLevelDocumenter(Documenter):
"""
Specialized Documenter subclass for objects on accessor level (methods,
attributes).
"""
# This is the simple straightforward version
# modname is None, base the last elements (eg 'hour')
# and path the part before (eg 'Series.dt')
# def resolve_name(self, modname, parents, path, base):
# modname = 'pandas'
# mod_cls = path.rstrip('.')
# mod_cls = mod_cls.split('.')
#
# return modname, mod_cls + [base]
def resolve_name(self, modname, parents, path, base):
if modname is None:
if path:
mod_cls = path.rstrip('.')
else:
mod_cls = None
# if documenting a class-level object without path,
# there must be a current class, either from a parent
# auto directive ...
mod_cls = self.env.temp_data.get('autodoc:class')
# ... or from a class directive
if mod_cls is None:
mod_cls = self.env.temp_data.get('py:class')
# ... if still None, there's no way to know
if mod_cls is None:
return None, []
# HACK: this is added in comparison to ClassLevelDocumenter
# mod_cls still exists of class.accessor, so an extra
# rpartition is needed
modname, accessor = rpartition(mod_cls, '.')
modname, cls = rpartition(modname, '.')
parents = [cls, accessor]
# if the module name is still missing, get it like above
if not modname:
modname = self.env.temp_data.get('autodoc:module')
if not modname:
if sphinx.__version__ > '1.3':
modname = self.env.ref_context.get('py:module')
else:
modname = self.env.temp_data.get('py:module')
# ... else, it stays None, which means invalid
return modname, parents + [base]
class AccessorAttributeDocumenter(AccessorLevelDocumenter, AttributeDocumenter):
objtype = 'accessorattribute'
directivetype = 'attribute'
# lower than AttributeDocumenter so this is not chosen for normal attributes
priority = 0.6
class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter):
objtype = 'accessormethod'
directivetype = 'method'
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
class AccessorCallableDocumenter(AccessorLevelDocumenter, MethodDocumenter):
"""
This documenter lets us removes .__call__ from the method signature for
callable accessors like Series.plot
"""
objtype = 'accessorcallable'
directivetype = 'method'
# lower than MethodDocumenter; otherwise the doc build prints warnings
priority = 0.5
def format_name(self):
return MethodDocumenter.format_name(self).rstrip('.__call__')
class PandasAutosummary(Autosummary):
"""
This alternative autosummary class lets us override the table summary for
Series.plot and DataFrame.plot in the API docs.
"""
def _replace_pandas_items(self, display_name, sig, summary, real_name):
# this a hack: ideally we should extract the signature from the
# .__call__ method instead of hard coding this
if display_name == 'DataFrame.plot':
sig = '([x, y, kind, ax, ....])'
summary = 'DataFrame plotting accessor and method'
elif display_name == 'Series.plot':
sig = '([kind, ax, figsize, ....])'
summary = 'Series plotting accessor and method'
return (display_name, sig, summary, real_name)
def get_items(self, names):
items = Autosummary.get_items(self, names)
items = [self._replace_pandas_items(*item) for item in items]
return items
# based on numpy doc/source/conf.py
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if '+' in pandas.__version__:
return "http://github.com/pandas-dev/pandas/blob/master/pandas/%s%s" % (
fn, linespec)
else:
return "http://github.com/pandas-dev/pandas/blob/v%s/pandas/%s%s" % (
pandas.__version__, fn, linespec)
# remove the docstring of the flags attribute (inherited from numpy ndarray)
# because these give doc build errors (see GH issue 5331)
def remove_flags_docstring(app, what, name, obj, options, lines):
if what == "attribute" and name.endswith(".flags"):
del lines[:]
def setup(app):
app.connect("autodoc-process-docstring", remove_flags_docstring)
app.add_autodocumenter(AccessorDocumenter)
app.add_autodocumenter(AccessorAttributeDocumenter)
app.add_autodocumenter(AccessorMethodDocumenter)
app.add_autodocumenter(AccessorCallableDocumenter)
app.add_directive('autosummary', PandasAutosummary)
| bsd-3-clause |
SkRobo/Eurobot-2017 | old year/RESET-master/CommunicationWithRobot/control4_debug.py | 2 | 7940 | from serial.tools import list_ports
import numpy as np
import serialWrapper
import packetBuilder
import packetParser
import sys
import time
import socket
import math
import random
import traceback
import multiprocessing
import lidar3_debug
import lidarGui
from ctypes import Structure, c_double
import matplotlib.pyplot as plt
# STM32 USB microcontroller ID
#VID = 1155
#PID = 22336
#SNR = '336234893534'
VID = 1155
PID = 22336
SNR = '3677346C3034'
####################
# CONTROL #
####################
def initPTC():
"""Initialize PID, Trajectory, Kinematics"""
# Build packet for sending to robot
packet = packetBuilder.BuildPacket(commands.switchOnPid)
# send packet to port. sendRequest method will wait answer from robot. In case
# if you don't need answer possible to use 'sendData' method (just send data,
# without waiting for answer)
startT = time.time()
recievedPacket = computerPort.sendRequest(packet.bytearray)
endT = time.time()
print 'Recieved ', (endT - startT)
if recievedPacket.reply == 'Ok':
print 'PID controller On'
else:
raise Exception('switchOnPid failed')
packet = packetBuilder.BuildPacket(commands.switchOnTrajectoryRegulator)
startT = time.time()
recievedPacket = computerPort.sendRequest(packet.bytearray)
endT = time.time()
print 'Recieved ', (endT - startT)
if recievedPacket.reply == 'Ok':
print 'Trajectory regulator ON'
else:
raise Exception('switchOnTrajectoryRegulator failed')
packet = packetBuilder.BuildPacket(commands.switchOnKinematicCalculation)
startT = time.time()
recievedPacket = computerPort.sendRequest(packet.bytearray)
endT = time.time()
print 'Recieved ', (endT - startT)
if recievedPacket.reply == 'Ok':
print 'Kinematics ON'
else:
raise Exception('switchOnKinematicCalculation failed')
def portNumber():
"""Find all ports, and returns one with defined STM values"""
for port in list_ports.comports():
print port
print port.serial_number, port.pid, port.vid
if (port.serial_number == SNR) and (port.pid == PID) and (port.vid == VID):
return port.name
def globMov():
print '\nInput coordinates and speed type'
# Get user input for movement
x = float(raw_input('X: '))
y = float(raw_input('Y: '))
fi = float(raw_input('angle: '))
speed = int(raw_input('speed type (0 = normal, 1 = stop, 2 = stand): '))
coordinates = [x, y, fi, speed]
#print 'Movement command: ', coordinates
packet = packetBuilder.BuildPacket(commands.addPointToStack, coordinates)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
if recievedPacket.reply != 'Ok':
raise Exception('add PointToStack failed')
def relMov(x = False, y = False, fi = False, speed = False):
"""Move robot relative to its current coord"""
print '\nInput robot displacement and speed type'
if x == False:
x = float(raw_input('X: '))
if y == False:
y = float(raw_input('Y: '))
if fi == False:
fi = float(raw_input('angle: '))
if speed == False:
speed = int(raw_input('speed type (0 = normal, 1 = stop, 2 = stand): '))
dsplcmnt = [x, y, fi, speed]
#startT = time.time()
oldCoord = getCoord()
newCoord = [oldCoord[0] + dsplcmnt[0], oldCoord[1] + dsplcmnt[1], oldCoord[2]
+ dsplcmnt[2], speed]
#endT = time.time()
#print 'Rel Mov ', (endT - startT)
#print 'Displacement: ', dsplcmnt
#print 'Old Coord: ', oldCoord
#print 'New Coord: ', newCoord
packet = packetBuilder.BuildPacket(commands.addPointToStack, newCoord)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
def setStart(x,y,fi):
coordinates = [x, y, fi]
packet = packetBuilder.BuildPacket(commands.setCoordinates, coordinates)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
if recievedPacket.reply != 'Ok':
raise Exception('setCoordinates failed')
def setCoord():
print '\nSet current robot coordinates'
x = float(raw_input('X: '))
y = float(raw_input('Y: '))
fi = float(raw_input('angle: '))
coordinates = [x, y, fi]
packet = packetBuilder.BuildPacket(commands.setCorectCoordinates, coordinates)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
if recievedPacket.reply != 'Ok':
raise Exception('setCoordinates failed')
def setCCoord(x,y,fi):
sharedcor.value = 1
coordinates = [x, y, fi]
while sharedcor.value == 0:
continue
packet = packetBuilder.BuildPacket(commands.setCorectCoordinates, coordinates)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
if recievedPacket.reply != 'Ok':
raise Exception('setCoordinates failed')
def getCoord():
"""Return robot current coordinates"""
packet = packetBuilder.BuildPacket(commands.getCurentCoordinates)
#startT = time.time()
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
#endT = time.time()
#print 'GetCoord time: ', (endT - startT)
#print 'Current robot coordinates: ', recievedPacket.reply
return recievedPacket.reply
#packet = packetBuilder.BuildPacket(commands.switchOnKinematicCalculation)
#recievedPacket = computerPort.sendRequest(packet.bytearray)
def correction():
#robot = getCoord()
#print robot
lidar = shared[:]
print 'lidar correction: ', lidar
#diff = [lidar[0]/1000-robot[0],lidar[1]/1000-robot[1],lidar[2]-robot[2]]
setCCoord(lidar[0]/1000, lidar[1]/1000, lidar[2])
def getLidar():
return shared[:]
def detectStop():
old = getCoord()
time.sleep(0.5)
new = getCoord()
suma = sum([new[0]-old[0], new[1]-old[1],new[2]-old[2]])
if suma < 0.001:
return True
return False
class Pose(Structure):
_fields_ = [('x', c_double), ('y', c_double), ('fi', c_double)]
def weights():
""" setup an XY plot canvas """
global plidar, wlidar
pipx = [i.x for i in plidar]
pipy = [i.y for i in plidar]
pipfi =[math.degrees(i.fi) for i in plidar]
#we = [(wlidar[i],wlidar[i],wlidar[i]) for i in xrange(100)]
#print we
#print pip
plt.ion()
n, bins, patches = plt.hist((pipx,pipy,pipfi), bins = 50, weights = (wlidar,wlidar,wlidar),
rwidth = 0.9, color = ('b','r','g'))
plt.grid(True)
plt.draw()
raw_input("<Hit Enter To Close>")
plt.close()
################
## START ##
################
port = '/dev/'+portNumber()
if port:
print 'STM32 found on port %s' %port
else:
print 'No STM32 found. Aborting'
sys.exit()
# COM port initialization
computerPort = serialWrapper.SerialWrapper(port)
# we will choose commands which we want to send from this list
commands = packetBuilder.CommandsList()
# Initialize PID, Trajectory and Kinematics
initPTC()
iteration = 0
lock = multiprocessing.Lock()
shared = multiprocessing.Array('d', [0.0, 0.0, 0.0])
sharedcor = multiprocessing.Value('i', 0)
wlidar = multiprocessing.Array('d', 200)
plidar = multiprocessing.Array(Pose, [(0.0, 0.0, 0.0) for i in xrange(200)])
l = multiprocessing.Process(target=lidar3_debug.localisation, args =(lock,shared,computerPort,commands,plidar,wlidar,sharedcor))
l.start()
g = multiprocessing.Process(target=lidarGui.begin, args =(shared,))
g.start()
setStart(0.152,0.72,0.0)
#figure, lines = init_xy_plot()
comm_list = {1: globMov, 2: relMov, 3: setCoord, 4: getCoord, 5: getLidar,
6: correction, 7: weights}
while True:
try:
iteration += 1
print '\nList of available commands: \n1 Global Movement\n2 Relative Movement'\
'\n3 Set Coordinates\n4 Get Coordinates\n5 Get Lidar\n6 Correction'\
'\n7 Histogram'
command = int(raw_input('Command number: '))
print comm_list[command]()
print 'Command ended'
#print shared[:]
except:
print 'Traceback line in main: '
traceback.print_exc()
sys.exit()
#Communication test
#getCoord()
#print 'Iteration: ', iteration
#relMov(0.1, 0.1, 1.571, 2)
#print 'new'
#relMov(-0.1, -0.1, -1.571, 2)
#print 'new'
#relMov(0.1, 0.1, 1.571, 2)
#print 'new'
#relMov(-0.1, -0.1, -1.571, 2)
#print 'new'
#relMov(0.1, 0.1, 1.571, 2)
#print 'new'
#relMov(-0.1, -0.1, -1.571, 2)
#print 'stop' | mit |
rs2/pandas | pandas/tests/extension/arrow/test_bool.py | 1 | 2726 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.extension import base
pytest.importorskip("pyarrow", minversion="0.13.0")
from .arrays import ArrowBoolArray, ArrowBoolDtype # isort:skip
@pytest.fixture
def dtype():
return ArrowBoolDtype()
@pytest.fixture
def data():
values = np.random.randint(0, 2, size=100, dtype=bool)
values[1] = ~values[0]
return ArrowBoolArray.from_scalars(values)
@pytest.fixture
def data_missing():
return ArrowBoolArray.from_scalars([None, True])
def test_basic_equals(data):
# https://github.com/pandas-dev/pandas/issues/34660
assert pd.Series(data).equals(pd.Series(data))
class BaseArrowTests:
pass
class TestDtype(BaseArrowTests, base.BaseDtypeTests):
def test_array_type_with_arg(self, data, dtype):
pytest.skip("GH-22666")
class TestInterface(BaseArrowTests, base.BaseInterfaceTests):
def test_copy(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.copy()
def test_view(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.view()
class TestConstructors(BaseArrowTests, base.BaseConstructorsTests):
def test_from_dtype(self, data):
pytest.skip("GH-22666")
# seems like some bug in isna on empty BoolArray returning floats.
@pytest.mark.xfail(reason="bad is-na for empty data")
def test_from_sequence_from_cls(self, data):
super().test_from_sequence_from_cls(data)
@pytest.mark.xfail(reason="pa.NULL is not recognised as scalar, GH-33899")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
# pyarrow.lib.ArrowInvalid: only handle 1-dimensional arrays
super().test_series_constructor_no_data_with_index(dtype, na_value)
@pytest.mark.xfail(reason="pa.NULL is not recognised as scalar, GH-33899")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
# pyarrow.lib.ArrowInvalid: only handle 1-dimensional arrays
super().test_series_constructor_scalar_na_with_index(dtype, na_value)
@pytest.mark.xfail(reason="raises AssertionError")
def test_construct_empty_dataframe(self, dtype):
super().test_construct_empty_dataframe(dtype)
class TestReduce(base.BaseNoReduceTests):
def test_reduce_series_boolean(self):
pass
class TestReduceBoolean(base.BaseBooleanReduceTests):
pass
def test_is_bool_dtype(data):
assert pd.api.types.is_bool_dtype(data)
assert pd.core.common.is_bool_indexer(data)
s = pd.Series(range(len(data)))
result = s[data]
expected = s[np.asarray(data)]
tm.assert_series_equal(result, expected)
| bsd-3-clause |
abimannans/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 98 | 20870 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
sankar-mukherjee/CoFee | laurent/coFeeClassif.py | 1 | 17162 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 20:02:02 2015
@author: prevot
"""
import sys
#sys.path.append('/Users/prevot/Ubuntu One/Code')
import nltk
import csv
import sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn import metrics
from sklearn import preprocessing
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier
from numpy import array
from random import shuffle
from pandas import DataFrame
from pandas import Series
WORKING_DIR = 'C:/Users/mukherjee/Desktop/CoFee/analysis/laurent/'
CORPORA_LIST = ['CID','MTR']#,'MTX','DVD']
SPEAKER_LIST = ['AB','CM','AG','YM','EB','SR','NH','LL','AC','MB','BX','MG','LJ','AP','ML','IM','BLX','BLY','ROX','ROY','VEX','VEY','MAX','MAY']
ROLE_LIST = ['F','G','-']
SESSION_LIST = ['1','2','3','4','5','6','7','8']
def readData(filename):
res = []
with open(filename, 'r') as filename:
reader = csv.DictReader(filename, delimiter=',', quotechar='"')
for row in reader:
res.append(row)
return res
def filterData(data,filterField,filterValues,positive=True):
'''
Example filterData(data,'base0.65',['None'],False)
Example filterData(data,'corpus',['MTX',MTR'],True)
'''
res = []
for line in data:
if positive:
if line[filterField] in filterValues:
res.append(line)
else:
if line[filterField] not in filterValues:
res.append(line)
return res
def cutTextfeat(data,feature,threshold):
listValues = [line[feature] for line in data]
distrib = nltk.FreqDist(listValues)
frequentValues = [item for item in distrib if distrib[item]>=threshold]
print "size lexicon "+feature+" :"+str(len(frequentValues))
for line in data:
if line[feature] not in frequentValues:
line[feature] = 'hapax'
return data
def buildFilteredLexica(data,featureList,threshold):
res = {}
for feat in featureList:
listValues= [line[feat] for line in data]
distrib = nltk.FreqDist(listValues)
frequentValues = [item for item in distrib if distrib[item] >= threshold]
res[feat] = frequentValues
return res
def cleanData(data,lexica):
res = []
for line in data:
lineres = {}
for item in line.keys():
if item in ACO_FEAT+POS_FEAT+INTER_ACO_FEAT:
if line[item] == 'NaN':
lineres[item] = 0 #TODO
elif line[item] == 'Inf':
lineres[item] = -1 #TODO
else:
lineres[item] = float(line[item])
elif item in FUNCTIONS:
lineres[item] = line[item]
elif item in ['sa','osa','osb','pa','pb','opa','opb']:
if float(line[item]) > 4.0:
lineres[item] =4.0
elif float(line[item]) < 0.0:
lineres[item] =0.0
else:
lineres[item] = float(line[item])
elif item in NUM_LEX_FEAT + NUM_CTX_FEAT + NUM_INTER_FEAT + CTX_MAN_FEAT:
lineres[item] = float(line[item])
elif item in BIN_FEAT:
lineres[item] = int(line[item])
elif item in TXT_FEAT:
lb = preprocessing.LabelBinarizer()
lb.fit(lexica[item])
binSimple = lb.transform([(line[item])])
for i in range(len(binSimple[0])):
lineres[item+'_'+str(i)] = binSimple[0][i]
elif item in META_FEAT:
lb = preprocessing.LabelBinarizer()
lb.fit(lexica[item])
binSimple = lb.transform([(line[item])])
for i in range(len(binSimple[0])):
lineres[item+'_'+str(i)] = binSimple[0][i]
lineres['ndo'] = float(line['do'])/float(line['dur'])
res.append(lineres)
return res
def prepdata(rawdata):
for feature in TOKENS_FEAT:
tempdata = cutTextfeat(rawdata,feature,30)
for feature in BIGRAMS_FEAT:
processedData = cutTextfeat(tempdata,feature,15)
lexiUni = buildFilteredLexica(processedData,TOKENS_FEAT,30)
lexiBi = buildFilteredLexica(processedData,BIGRAMS_FEAT,15)
lexicaCut = dict(lexiUni.items() + lexiBi.items())
lexicaCut['corpus']= CORPORA_LIST
lexicaCut['spk']= SPEAKER_LIST
lexicaCut['sess']= SESSION_LIST
lexicaCut['rol']= ROLE_LIST
cleaneddata = cleanData(processedData,lexicaCut)
return cleaneddata
def runExpeClassifiers(data,features,target,fileout):
allres = {}
shuffle(data)
# Prep features
featData = array([[sample[feat] for feat in features] for sample in data])
# featData_scaled = preprocessing.scale(featData)
min_max_scaler = preprocessing.MinMaxScaler()
featData_MinMax = min_max_scaler.fit_transform(featData)
#Prep target
labelBase = [sample[target] for sample in data]
X_train, X_test, y_train, y_test = cross_validation.train_test_split(featData_MinMax, labelBase, test_size=0.1, random_state=0)
classifiers = [
# ('ada-10', AdaBoostClassifier(n_estimators=10)),
# ('ada-50', AdaBoostClassifier(n_estimators=50)),
# ('ada-100', AdaBoostClassifier(n_estimators=100)),
# ('svm-1', svm.SVC(cache_size=1000,C=1.0)),
# ('svm-05', svm.SVC(cache_size=1000,C=0.5)),
# ('forest-10', RandomForestClassifier(n_estimators=10)),
# ('forest-20', RandomForestClassifier(n_estimators=20)),
('forest-50', RandomForestClassifier(n_estimators=50)),
('forest-50-min5', RandomForestClassifier(n_estimators=50,min_samples_leaf=5)),
('forest-50-min10', RandomForestClassifier(n_estimators=50,min_samples_leaf=10)),
# ('forest-5010', RandomForestClassifier(n_estimators=50,max_features=10)),
# ('forest-5020', RandomForestClassifier(n_estimators=50,max_features=20)),
]
for name,clf in classifiers:
print name
clf = clf.fit(X_train, y_train)
res = clf.predict(X_test)
scores = cross_validation.cross_val_score(clf,featData_MinMax,labelBase,cv=10)
print scores.mean()
print scores.std()
print(metrics.classification_report(y_test, res))
allres[name] = scores
if name[0:6]=='forest':
print"Feature Importance"
showFeatureImportance(clf.feature_importances_,clf.estimators_,features,WORKING_DIR+'importance.png')
cols = [x for (x,y) in classifiers]
df = pd.DataFrame(allres,columns=cols)
means = df.mean()
errors = df.std()
fig, ax = plt.subplots()
ax = means.plot(yerr=errors, ax=ax, kind='bar')
ax.set_ylabel('Accuracy')
ax.set_ylim(0.4,1)
fig = ax.get_figure()
fig.savefig(fileout)
return allres
def showFeatureImportance(importances,estimators,features,fileout):
# std = np.std([tr.feature_importances_ for tr in estimators],
# axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
forsumming = {}
for f in range(len(features)):
forsumming[features[indices[f]]] = importances[indices[f]]
lex = aco = pos = aut = man = met = bid = 0 # oth =
for feat in forsumming.keys():
if feat in REAL_LEX_FEAT:
lex = lex + forsumming[feat]
elif feat in REAL_ACO_FEAT:
aco = aco + forsumming[feat]
elif feat in REAL_POS_FEAT:
pos = pos + forsumming[feat]
elif feat in REAL_CTX_AUT_FEAT:
aut = aut + forsumming[feat]
elif feat in REAL_CTX_MAN_FEAT:
man = man + forsumming[feat]
elif feat in REAL_MET_FEAT:
met = met + forsumming[feat]
# elif feat in REAL_OTH_FEAT:
# oth = oth + forsumming[feat]
else:
bid = bid + forsumming[feat]
df = pd.Series({'lex':lex,'aco':aco,'pos':pos,'ctx-aut':aut,'ctx-man':man,'met':met,'oth':bid})#inter':oth,
# Plot the feature importances of the forest
fig, ax = plt.subplots()
ax = df.plot(ax=ax, kind='bar')
ax.set_ylabel('Accuracy')
fig = ax.get_figure()
fig.savefig(fileout)
return 0
def runOneExperiment(data,classifier,features,target):
# Prep data
shuffle(data)
featData = array([[sample[feat] for feat in features] for sample in data])
min_max_scaler = preprocessing.MinMaxScaler()
featData_MinMax = min_max_scaler.fit_transform(featData)
#Prep target
labelBase = [sample[target] for sample in data]
X_train, X_test, y_train, y_test = cross_validation.train_test_split(featData_MinMax, labelBase, test_size=0.1, random_state=0)
clf = classifier
clf = clf.fit(X_train, y_train)
res = clf.predict(X_test)
scores = cross_validation.cross_val_score(clf,featData_MinMax,labelBase,cv=10)
print scores.mean()
print scores.std()
print(metrics.classification_report(y_test, res))
return scores
def runExpeFeatures(rawdata,classifier,target):
'''
Featsets is a list of pair name,featset
'''
# filtereddata = filterData(rawdata,'baseFun0.65','None',False)
# filtereddata = rawdata
# filtereddata = filterData(rawdata,'EvalFun0.49','None',False)
filtereddata = filterData(rawdata,'EvalFun0.65','None',False)
corpora = [('cid',prepdata(filterData(filtereddata,'corpus','CID',True))),
('mtr',prepdata(filterData(filtereddata,'corpus','MTR',True))),
('all',prepdata(filtereddata))
]
globalres = {}
for namec,corpus in corpora:
print '*******'
print namec
print '*******'
allfeatures = corpus[0].keys()
finalfeatures = set(allfeatures)
REAL_LEX_FEAT = [item for item in finalfeatures if item[0:5]=='trans'] + NUM_LEX_FEAT + BIN_LEX_FEAT
REAL_CTX_AUT_FEAT = [item for item in finalfeatures if item[0:7]=='othLast'] + [item for item in finalfeatures if item[0:8]=='prevLast'] + [item for item in finalfeatures if item[0:9]=='prevFirst'] + [item for item in finalfeatures if item[0:9]=='prevfirst'] + [item for item in finalfeatures if item[0:8]=='othFirst'] + [item for item in finalfeatures if item[0:8]=='othfirst']+ NUM_CTX_FEAT + NUM_INTER_FEAT
REAL_MET_FEAT = [item for item in finalfeatures if item[0:4]=='spk_'] + [item for item in finalfeatures if item[0:6]=='corpus'] + [item for item in finalfeatures if item[0:4]=='sess'] + [item for item in finalfeatures if item[0:3]=='rol']
featsets = [#('bas',GEST_FEAT),
# ('lpa+inter',REAL_LEX_FEAT+REAL_POS_FEAT+REAL_ACO_FEAT+REAL_CTX_AUT_FEAT+REAL_INTER_FEAT),
('lex',REAL_LEX_FEAT),
('pos',REAL_POS_FEAT),
('aco',REAL_ACO_FEAT),
('lex+pos',REAL_LEX_FEAT+REAL_POS_FEAT),
('lex+aco',REAL_LEX_FEAT+REAL_ACO_FEAT),
('lex+met',REAL_LEX_FEAT+REAL_MET_FEAT),
('aco+pos',REAL_ACO_FEAT+REAL_POS_FEAT),
('lpa',REAL_LEX_FEAT+REAL_POS_FEAT+REAL_ACO_FEAT),
('lpa+aut',REAL_LEX_FEAT+REAL_POS_FEAT+REAL_ACO_FEAT+REAL_CTX_AUT_FEAT),
('lpa+aut+met',REAL_LEX_FEAT+REAL_POS_FEAT+REAL_ACO_FEAT+REAL_CTX_AUT_FEAT+REAL_MET_FEAT),
('lpa+man',REAL_LEX_FEAT+REAL_POS_FEAT+REAL_ACO_FEAT+REAL_CTX_MAN_FEAT),
('lpa+man+met',REAL_LEX_FEAT+REAL_POS_FEAT+REAL_ACO_FEAT+REAL_CTX_MAN_FEAT+REAL_MET_FEAT),
# ('all',REAL_LEX_FEAT+REAL_POS_FEAT+REAL_ACO_FEAT+REAL_CTX_MAN_FEAT+REAL_MET_FEAT+REAL_INTER_FEAT)
]
allres = {}
for name,featset in featsets:
print name
allres[name] = runOneExperiment(corpus,classifier,featset,target)
globalres[namec] = allres
corpcol = ['cid','mtr', 'all']
featcol = ['lex', 'pos', 'aco', 'lex+pos','lex+aco','lex+met','aco+pos','lpa', 'lpa+aut','lpa+aut+met','lpa+man','lpa+man+met']
means = DataFrame(columns=corpcol, index=featcol)
errors = DataFrame(columns=corpcol, index=featcol)
for corpus in globalres.keys():
print corpus
for featset in globalres[corpus].keys():
print featset
means[corpus][featset] = Series(globalres[corpus][featset]).mean()
errors[corpus][featset] = Series(globalres[corpus][featset]).std()
fig, ax = plt.subplots()
ax.set_ylim(0.5,0.9)
means.plot(yerr=errors, ax=ax, kind='bar')
fig.savefig(WORKING_DIR+'corpus-eval-featset.png',dpi=100)
return globalres
##############################
##############################
##############################
##############################
DATA_FILE = WORKING_DIR+'allmerged.csv'
rawdata = readData(DATA_FILE)
# Remove None Values (if desired)
filtereddata = filterData(rawdata,'baseFun0.65','None',False)
TOKENS_FEAT = ['prevLastTok','prevFirstTok','othLastTok','othFirstTok','trans']
BIGRAMS_FEAT = ['prevLastBi','prevfirstBi','othLastBi','othfirstBi']#! prevfirstBi != prevFirstBi, othfirstBi
FUNCTIONS = ['baseFun0.49','baseFun0.65', 'baseFun0.74', 'baseFun1','EvalFun0.49','EvalFun0.65', 'EvalFun0.74','EvalFun1']
###################################
# LEX FEATURES
###################################
NUM_LEX_FEAT = ['nbmh','nbouais','size']
BIN_LEX_FEAT = ['ouais','mh', 'laugh','ah','bon','oui','mais','ok','dac','voila','non','et']
TXT_LEX_FEAT = ['trans']
LEX_FEAT = NUM_LEX_FEAT + BIN_LEX_FEAT + TXT_LEX_FEAT
###################################
# ACO FEATURES
###################################
PITCH_FEAT = ['slope','f0max','f0min','f0stdev','NanRatio','span','steepness','height']
INTENSITY_FEAT = ['intQ1','intQ3','intQ2']#,'intQ1raw','intQ2raw','intQ3raw'
PHON_FEAT = ['phonSplit','form1','form2','form3']
ACO_FEAT = PITCH_FEAT + INTENSITY_FEAT + PHON_FEAT + ['duration'] + ['aperiodAV']
###################################
# INTER FEATURES
###################################
INTER_ACO_FEAT = ['steepnessInterl','spanInterl','f0maxInterl','intQ2Interl','aperiodAVInterl','f0stdevInterl','heightInterl','f0minInterl','NanRatioInterl',
'intQ1Interl', 'intQ2Interl','intQ3Interl','slopeInterl','durationInterl']
###################################
# POS FEATURES
###################################
POS_FEAT = ['sa','pa','pb','opa','opb','osa','osb','do','ndo','posDial']
###################################
# CTX-AUTO FEATURES
###################################
NUM_CTX_FEAT = ['prevNbTu','prevNbJe','prevSize','prevNbFeedback','prevRed']
TXT_CTX_FEAT = ['prevLastTok','prevFirstTok','prevLastBi','prevfirstBi'] #! prevfirstBi != prevFirstBi
NUM_INTER_FEAT = ['othNbTu','othNbJe','othNbFeedback','othSize','othRed']
TXT_INTER_FEAT = ['othLastTok','othLastBi','othFirstTok','othfirstBi']#! prevfirstBi != prevFirstBi
###################################
# CTX-MAN FEATURES
###################################
CTX_MAN_FEAT = ['inint','quest','feedback','ordre','essai','assert','incomp']
###################################
# META FEATURES
###################################
META_FEAT = ['spk','sess','rol','corpus']
###################################
# OTHER FEATURES
###################################
SIM_ACOU_FEAT = ['SimScore']
GEST_FEAT = ['gest']
###################################
# TYPE of FEATURES
###################################
TXT_FEAT = TXT_INTER_FEAT + TXT_CTX_FEAT + TXT_LEX_FEAT
BIN_FEAT = BIN_LEX_FEAT
data = prepdata(filtereddata)
allfeatures = data[0].keys()
finalfeatures = set(allfeatures)
# REAL FEATURES (Binarized,...)
REAL_LEX_FEAT = [item for item in finalfeatures if item[0:5]=='trans'] + NUM_LEX_FEAT + BIN_LEX_FEAT
REAL_ACO_FEAT = ACO_FEAT
REAL_POS_FEAT = POS_FEAT
REAL_CTX_MAN_FEAT = CTX_MAN_FEAT
REAL_CTX_AUT_FEAT = [item for item in finalfeatures if item[0:7]=='othLast'] + [item for item in finalfeatures if item[0:8]=='prevLast'] + [item for item in finalfeatures if item[0:9]=='prevFirst'] + [item for item in finalfeatures if item[0:9]=='prevfirst'] + [item for item in finalfeatures if item[0:8]=='othFirst'] + [item for item in finalfeatures if item[0:8]=='othfirst']+ NUM_CTX_FEAT + NUM_INTER_FEAT
REAL_MET_FEAT = [item for item in finalfeatures if item[0:4]=='spk_'] + [item for item in finalfeatures if item[0:6]=='corpus'] + [item for item in finalfeatures if item[0:4]=='sess'] + [item for item in finalfeatures if item[0:3]=='rol']
REAL_INTER_FEAT = INTER_ACO_FEAT
#RUN classifier testing experiment
runExpeClassifiers(data,REAL_LEX_FEAT+REAL_POS_FEAT+REAL_ACO_FEAT+REAL_CTX_AUT_FEAT+REAL_CTX_AUT_FEAT+REAL_MET_FEAT,'baseFun0.65',WORKING_DIR+'classifier.png')
runOneExperiment(data,classifier,REAL_POS_FEAT,'baseFun0.65')
| apache-2.0 |
pylayers/pylayers | pylayers/em/openems/animfield.py | 2 | 1199 | import vtk
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.rcParams['animation.ffmpeg_path']='/usr/bin/ffmpeg'
from vtk.util.numpy_support import vtk_to_numpy
reader = vtk.vtkXMLRectilinearGridReader()
fig,ax = plt.subplots()
NrTs = 1500
ims = []
for k in np.arange(0,NrTs,1):
# create the filename from sequence number
if k<10:
filename="tmp/Et_000000000"+str(k)+'.vtr'
elif k<100:
filename="tmp/Et_00000000"+str(k)+'.vtr'
elif k<1000:
filename="tmp/Et_0000000"+str(k)+'.vtr'
reader.SetFileName(filename)
reader.Update()
Efield = reader.GetOutput().GetPointData().GetArray(0)
u=vtk_to_numpy(Efield)
im = ax.imshow(u,interpolation='nearest')
a = im.get_axes()
# Draw colorbar only once
if k==0:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right",size="5%",pad=0.05)
clb = fig.colorbar(im,cax)
clb.set_label("Efield V/m")
ax.axis('tight')
ims.append([im])
ani = animation.ArtistAnimation(fig,ims,interval=50,blit=True,repeat_delay=1000)
#ani.save('Efield.mp4')
| mit |
bsipocz/scikit-image | doc/examples/plot_ihc_color_separation.py | 18 | 1925 | """
==============================================
Immunohistochemical staining colors separation
==============================================
In this example we separate the immunohistochemical (IHC) staining from the
hematoxylin counterstaining. The separation is achieved with the method
described in [1]_, known as "color deconvolution".
The IHC staining expression of the FHL2 protein is here revealed with
Diaminobenzidine (DAB) which gives a brown color.
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.color import rgb2hed
ihc_rgb = data.immunohistochemistry()
ihc_hed = rgb2hed(ihc_rgb)
fig, axes = plt.subplots(2, 2, figsize=(7, 6))
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.imshow(ihc_rgb)
ax0.set_title("Original image")
ax1.imshow(ihc_hed[:, :, 0], cmap=plt.cm.gray)
ax1.set_title("Hematoxylin")
ax2.imshow(ihc_hed[:, :, 1], cmap=plt.cm.gray)
ax2.set_title("Eosin")
ax3.imshow(ihc_hed[:, :, 2], cmap=plt.cm.gray)
ax3.set_title("DAB")
for ax in axes.ravel():
ax.axis('off')
fig.subplots_adjust(hspace=0.3)
"""
.. image:: PLOT2RST.current_figure
Now we can easily manipulate the hematoxylin and DAB "channels":
"""
import numpy as np
from skimage.exposure import rescale_intensity
# Rescale hematoxylin and DAB signals and give them a fluorescence look
h = rescale_intensity(ihc_hed[:, :, 0], out_range=(0, 1))
d = rescale_intensity(ihc_hed[:, :, 2], out_range=(0, 1))
zdh = np.dstack((np.zeros_like(h), d, h))
fig, ax = plt.subplots()
ax.imshow(zdh)
ax.set_title("Stain separated image (rescaled)")
ax.axis('off')
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| bsd-3-clause |
yonglehou/scikit-learn | sklearn/linear_model/ridge.py | 89 | 39360 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
huzq/scikit-learn | sklearn/tree/tests/test_tree.py | 4 | 72223 | """
Testing for the tree module (sklearn.tree).
"""
import copy
import pickle
from itertools import product
import struct
import pytest
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import _sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import create_memmap_backed_data
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF, TREE_UNDEFINED
from sklearn.tree._classes import CRITERIA_CLF
from sklearn.tree._classes import CRITERIA_REG
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae", "friedman_mse")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the diabetes dataset
# and randomly permute it
diabetes = datasets.load_diabetes()
perm = rng.permutation(diabetes.target.size)
diabetes.data = diabetes.data[perm]
diabetes.target = diabetes.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
# NB: despite their names X_sparse_* are numpy arrays (and not sparse matrices)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = _sparse_random_matrix(20, 10, density=0.25,
random_state=0).toarray()
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"diabetes": {"X": diabetes.data, "y": diabetes.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert s.node_count == d.node_count, (
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.full(len(X), 0.5))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert clf.score(X, y) == 1.0, "Failed with {0}".format(name)
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert clf.score(X, y) == 1.0, "Failed with {0}".format(name)
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert score > 0.9, (
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert score > 0.5, (
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
@pytest.mark.parametrize("name, Tree", REG_TREES.items())
@pytest.mark.parametrize("criterion", REG_CRITERIONS)
def test_diabetes_overfit(name, Tree, criterion):
# check consistency of overfitted trees on the diabetes dataset
# since the trees will overfit, we expect an MSE of 0
reg = Tree(criterion=criterion, random_state=0)
reg.fit(diabetes.data, diabetes.target)
score = mean_squared_error(diabetes.target, reg.predict(diabetes.data))
assert score == pytest.approx(0), (
f"Failed with {name}, criterion = {criterion} and score = {score}"
)
@skip_if_32bit
@pytest.mark.parametrize("name, Tree", REG_TREES.items())
@pytest.mark.parametrize(
"criterion, max_depth",
[("mse", 15), ("mae", 20), ("friedman_mse", 15)]
)
def test_diabetes_underfit(name, Tree, criterion, max_depth):
# check consistency of trees when the depth and the number of features are
# limited
reg = Tree(
criterion=criterion, max_depth=max_depth,
max_features=6, random_state=0
)
reg.fit(diabetes.data, diabetes.target)
score = mean_squared_error(diabetes.target, reg.predict(diabetes.data))
assert score < 60 and score > 0, (
f"Failed with {name}, criterion = {criterion} and score = {score}"
)
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(reg.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=5000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert importances.shape[0] == 10, "Failed with {0}".format(name)
assert n_important == 3, "Failed with {0}".format(name)
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
with pytest.raises(ValueError):
getattr(clf, 'feature_importances_')
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(diabetes.data, diabetes.target)
assert reg.max_features_ == diabetes.data.shape[1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert clf.max_features_ == 2
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert (est.max_features_ ==
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert (est.max_features_ ==
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert est.max_features_ == 1
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert est.max_features_ == 3
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert est.max_features_ == 1
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert (est.max_features_ ==
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert est.max_features_ == iris.data.shape[1]
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert est.max_features_ == iris.data.shape[1]
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
with pytest.raises(ValueError):
est.fit(X, y)
est = TreeEstimator(max_features=-1)
with pytest.raises(ValueError):
est.fit(X, y)
est = TreeEstimator(max_features=0.0)
with pytest.raises(ValueError):
est.fit(X, y)
est = TreeEstimator(max_features=1.5)
with pytest.raises(ValueError):
est.fit(X, y)
est = TreeEstimator(max_features="foobar")
with pytest.raises(ValueError):
est.fit(X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
with pytest.raises(NotFittedError):
est.predict_proba(X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
with pytest.raises(ValueError):
est.predict_proba(X2)
for name, TreeEstimator in ALL_TREES.items():
with pytest.raises(ValueError):
TreeEstimator(min_samples_leaf=-1).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_samples_leaf=.6).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_samples_leaf=0.).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_samples_leaf=3.).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_weight_fraction_leaf=-1).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_weight_fraction_leaf=0.51).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_samples_split=-1).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_samples_split=0.0).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_samples_split=1.1).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_samples_split=2.5).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(max_depth=-1).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(max_features=42).fit(X, y)
# min_impurity_split warning
with ignore_warnings(category=FutureWarning):
with pytest.raises(ValueError):
TreeEstimator(min_impurity_split=-1.0).fit(X, y)
with pytest.raises(ValueError):
TreeEstimator(min_impurity_decrease=-1.0).fit(X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
with pytest.raises(ValueError):
est.fit(X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
with pytest.raises(NotFittedError):
est.predict(T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
with pytest.raises(ValueError):
est.predict(t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
with pytest.raises(ValueError):
est.predict(X)
with pytest.raises(ValueError):
est.apply(X)
clf = TreeEstimator()
clf.fit(X, y)
with pytest.raises(ValueError):
clf.predict(Xt)
with pytest.raises(ValueError):
clf.apply(Xt)
# apply before fitting
est = TreeEstimator()
with pytest.raises(NotFittedError):
est.apply(T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE)
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert np.min(node_samples) > 9, "Failed with {0}".format(name)
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert np.min(node_samples) > 9, "Failed with {0}".format(name)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE)
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > 4, "Failed with {0}".format(name)
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > 4, "Failed with {0}".format(name)
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert (
np.min(leaf_weights) >=
total_weight * est.min_weight_fraction_leaf), (
"Failed with {0} min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
# test case with no weights passed in
total_weight = X.shape[0]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert (
np.min(leaf_weights) >=
total_weight * est.min_weight_fraction_leaf), (
"Failed with {0} min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
@pytest.mark.parametrize("name", ALL_TREES)
def test_min_weight_fraction_leaf_on_dense_input(name):
check_min_weight_fraction_leaf(name, "iris")
@pytest.mark.parametrize("name", SPARSE_TREES)
def test_min_weight_fraction_leaf_on_sparse_input(name):
check_min_weight_fraction_leaf(name, "multilabel", True)
def check_min_weight_fraction_leaf_with_min_samples_leaf(name, datasets,
sparse=False):
"""Test the interaction between min_weight_fraction_leaf and min_samples_leaf
when sample_weights is not provided in fit."""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
total_weight = X.shape[0]
TreeEstimator = ALL_TREES[name]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test integer min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=5,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert (
np.min(leaf_weights) >=
max((total_weight *
est.min_weight_fraction_leaf), 5)), (
"Failed with {0} min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(
name, est.min_weight_fraction_leaf,
est.min_samples_leaf))
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test float min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=.1,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert (
np.min(leaf_weights) >=
max((total_weight * est.min_weight_fraction_leaf),
(total_weight * est.min_samples_leaf))), (
"Failed with {0} min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
@pytest.mark.parametrize("name", ALL_TREES)
def test_min_weight_fraction_leaf_with_min_samples_leaf_on_dense_input(name):
check_min_weight_fraction_leaf_with_min_samples_leaf(name, "iris")
@pytest.mark.parametrize("name", SPARSE_TREES)
def test_min_weight_fraction_leaf_with_min_samples_leaf_on_sparse_input(name):
check_min_weight_fraction_leaf_with_min_samples_leaf(
name, "multilabel", True)
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE)
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert est.min_impurity_split is None, (
"Failed, min_impurity_split = {0} != None".format(
est.min_impurity_split))
try:
assert_warns(FutureWarning, est.fit, X, y)
except AssertionError:
pass
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert est.tree_.impurity[node] == 0., (
"Failed with {0} min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using
# min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
assert_warns_message(FutureWarning,
"Use the min_impurity_decrease",
est.fit, X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert est.tree_.impurity[node] >= 0, (
"Failed with {0}, min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert est.tree_.impurity[node] <= min_impurity_split, (
"Failed with {0}, min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_min_impurity_decrease():
# test if min_impurity_decrease ensure that a split is made only if
# if the impurity decrease is atleast that value
X, y = datasets.make_classification(n_samples=10000, random_state=42)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# Check default value of min_impurity_decrease, 1e-7
est1 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0)
# Check with explicit value of 0.05
est2 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.05, random_state=0)
# Check with a much lower value of 0.0001
est3 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.0001, random_state=0)
# Check with a much lower value of 0.1
est4 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.1, random_state=0)
for est, expected_decrease in ((est1, 1e-7), (est2, 0.05),
(est3, 0.0001), (est4, 0.1)):
assert est.min_impurity_decrease <= expected_decrease, (
"Failed, min_impurity_decrease = {0} > {1}".format(
est.min_impurity_decrease,
expected_decrease))
est.fit(X, y)
for node in range(est.tree_.node_count):
# If current node is a not leaf node, check if the split was
# justified w.r.t the min_impurity_decrease
if est.tree_.children_left[node] != TREE_LEAF:
imp_parent = est.tree_.impurity[node]
wtd_n_node = est.tree_.weighted_n_node_samples[node]
left = est.tree_.children_left[node]
wtd_n_left = est.tree_.weighted_n_node_samples[left]
imp_left = est.tree_.impurity[left]
wtd_imp_left = wtd_n_left * imp_left
right = est.tree_.children_right[node]
wtd_n_right = est.tree_.weighted_n_node_samples[right]
imp_right = est.tree_.impurity[right]
wtd_imp_right = wtd_n_right * imp_right
wtd_avg_left_right_imp = wtd_imp_right + wtd_imp_left
wtd_avg_left_right_imp /= wtd_n_node
fractional_node_weight = (
est.tree_.weighted_n_node_samples[node] / X.shape[0])
actual_decrease = fractional_node_weight * (
imp_parent - wtd_avg_left_right_imp)
assert actual_decrease >= expected_decrease, (
"Failed with {0} expected min_impurity_decrease={1}"
.format(actual_decrease,
expected_decrease))
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = diabetes.data, diabetes.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert type(est2) == est.__class__
score2 = est2.score(X, y)
assert score == score2, (
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert (getattr(est2.tree_, attribute) ==
fitted_attribute[attribute]), (
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert y_hat.shape == (4, 2)
proba = clf.predict_proba(T)
assert len(proba) == 2
assert proba[0].shape == (4, 2)
assert proba[1].shape == (4, 4)
log_proba = clf.predict_log_proba(T)
assert len(log_proba) == 2
assert log_proba[0].shape == (4, 2)
assert log_proba[1].shape == (4, 4)
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert y_hat.shape == (4, 2)
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert clf.n_classes_ == 2
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert len(clf.n_classes_) == 2
assert len(clf.classes_) == 2
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert clf.tree_.threshold[0] == 149.5
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert clf.tree_.threshold[0] == 49.5 # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
with pytest.raises(ValueError):
clf.fit(X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
expected_err = r"Singleton.* cannot be considered a valid collection"
with pytest.raises(TypeError, match=expected_err):
clf.fit(X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
@pytest.mark.parametrize("name", CLF_TREES)
def test_class_weights(name):
check_class_weights(name)
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
with pytest.raises(ValueError):
clf.fit(X, y)
with pytest.raises(ValueError):
clf.fit(X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
with pytest.raises(ValueError):
clf.fit(X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
with pytest.raises(ValueError):
clf.fit(X, _y)
@pytest.mark.parametrize("name", CLF_TREES)
def test_class_weight_errors(name):
check_class_weight_errors(name)
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
assert est.get_n_leaves() == k + 1
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
with pytest.raises(ValueError):
est.fit(X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
with pytest.raises(ValueError):
est.fit(X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
with pytest.raises(ValueError):
est.fit(X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
assert est.get_depth() == 1
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]],
[0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert -3 <= value.flat[0] < 3, \
'Array points to arbitrary memory'
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert est.tree_.max_depth == 0
def test_behaviour_constant_feature_after_splits():
X = np.transpose(np.vstack(([[0, 0, 0, 0, 0, 1, 2, 4, 5, 6, 7]],
np.zeros((4, 11)))))
y = [0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3]
for name, TreeEstimator in ALL_TREES.items():
# do not check extra random trees
if "ExtraTree" not in name:
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert est.tree_.max_depth == 2
assert est.tree_.node_count == 5
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert est.tree_.max_depth == 1
assert_array_equal(est.predict_proba(X), np.full((4, 2), 0.5))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert est.tree_.max_depth == 1
assert_array_equal(est.predict(X), np.full((4, ), 0.5))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert "float32" in str(e)
def test_realloc():
from sklearn.tree._utils import _realloc_test
with pytest.raises(MemoryError):
_realloc_test()
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
with pytest.raises(Exception):
clf.fit(X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
with pytest.raises(MemoryError):
clf.fit(X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "diabetes"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
@pytest.mark.parametrize("tree_type", SPARSE_TREES)
@pytest.mark.parametrize(
"dataset",
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")
)
def test_sparse_input(tree_type, dataset):
max_depth = 3 if dataset == "digits" else None
check_sparse_input(tree_type, dataset, max_depth)
@pytest.mark.parametrize("tree_type",
sorted(set(SPARSE_TREES).intersection(REG_TREES)))
@pytest.mark.parametrize("dataset", ["diabetes", "reg_small"])
def test_sparse_input_reg_trees(tree_type, dataset):
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
check_sparse_input(tree_type, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
@pytest.mark.parametrize("tree_type", SPARSE_TREES)
@pytest.mark.parametrize("dataset",
["sparse-pos", "sparse-neg", "sparse-mix", "zeros"])
@pytest.mark.parametrize("check",
[check_sparse_parameters, check_sparse_criterion])
def test_sparse(tree_type, dataset, check):
check(tree_type, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert (X_sparse.data == 0.).sum() > 0
assert (X_sparse_test.data == 0.).sum() > 0
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
@pytest.mark.parametrize("tree_type", SPARSE_TREES)
def test_explicit_sparse_zeros(tree_type):
check_explicit_sparse_zeros(tree_type)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
with pytest.raises(ValueError):
TreeEstimator(random_state=0).fit(X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
with pytest.raises(ValueError):
est.predict([X])
@pytest.mark.parametrize("name", ALL_TREES)
def test_1d_input(name):
with ignore_warnings():
check_raise_error_on_1d_input(name)
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert est.tree_.max_depth == 1
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert est.tree_.max_depth == 0
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
@pytest.mark.parametrize("name", ALL_TREES)
def test_min_weight_leaf_split_level(name):
check_min_weight_leaf_split_level(name)
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE, copy=False)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE, copy=False))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
@pytest.mark.parametrize("name", ALL_TREES)
def test_public_apply_all_trees(name):
check_public_apply(name)
@pytest.mark.parametrize("name", SPARSE_TREES)
def test_public_apply_sparse_trees(name):
check_public_apply_sparse(name)
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert node_indicator.shape == (n_samples, est.tree_.node_count)
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert est.tree_.max_depth <= max_depth
@pytest.mark.parametrize("name", ALL_TREES)
def test_decision_path(name):
check_decision_path(name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
with pytest.raises(TypeError):
TreeEstimator(random_state=0).fit(X, y)
@pytest.mark.parametrize("name", ALL_TREES)
def test_no_sparse_y_support(name):
# Currently we don't support sparse y
check_no_sparse_y_support(name)
def test_mae():
"""Check MAE criterion produces correct results on small toy dataset:
------------------
| X | y | weight |
------------------
| 3 | 3 | 0.1 |
| 5 | 3 | 0.3 |
| 8 | 4 | 1.0 |
| 3 | 6 | 0.6 |
| 5 | 7 | 0.3 |
------------------
|sum wt:| 2.3 |
------------------
Because we are dealing with sample weights, we cannot find the median by
simply choosing/averaging the centre value(s), instead we consider the
median where 50% of the cumulative weight is found (in a y sorted data set)
. Therefore with regards to this test data, the cumulative weight is >= 50%
when y = 4. Therefore:
Median = 4
For all the samples, we can get the total error by summing:
Absolute(Median - y) * weight
I.e., total error = (Absolute(4 - 3) * 0.1)
+ (Absolute(4 - 3) * 0.3)
+ (Absolute(4 - 4) * 1.0)
+ (Absolute(4 - 6) * 0.6)
+ (Absolute(4 - 7) * 0.3)
= 2.5
Impurity = Total error / total weight
= 2.5 / 2.3
= 1.08695652173913
------------------
From this root node, the next best split is between X values of 3 and 5.
Thus, we have left and right child nodes:
LEFT RIGHT
------------------ ------------------
| X | y | weight | | X | y | weight |
------------------ ------------------
| 3 | 3 | 0.1 | | 5 | 3 | 0.3 |
| 3 | 6 | 0.6 | | 8 | 4 | 1.0 |
------------------ | 5 | 7 | 0.3 |
|sum wt:| 0.7 | ------------------
------------------ |sum wt:| 1.6 |
------------------
Impurity is found in the same way:
Left node Median = 6
Total error = (Absolute(6 - 3) * 0.1)
+ (Absolute(6 - 6) * 0.6)
= 0.3
Left Impurity = Total error / total weight
= 0.3 / 0.7
= 0.428571428571429
-------------------
Likewise for Right node:
Right node Median = 4
Total error = (Absolute(4 - 3) * 0.3)
+ (Absolute(4 - 4) * 1.0)
+ (Absolute(4 - 7) * 0.3)
= 1.2
Right Impurity = Total error / total weight
= 1.2 / 1.6
= 0.75
------
"""
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
# Test MAE where sample weights are non-uniform (as illustrated above):
dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3],
sample_weight=[0.6, 0.3, 0.1, 1.0, 0.3])
assert_allclose(dt_mae.tree_.impurity, [2.5 / 2.3, 0.3 / 0.7, 1.2 / 1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
# Test MAE where all sample weights are uniform:
dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3],
sample_weight=np.ones(5))
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
# Test MAE where a `sample_weight` is not explicitly provided.
# This is equivalent to providing uniform sample weights, though
# the internal logic is different:
dt_mae.fit(X=[[3], [5], [3], [8], [5]], y=[6, 7, 3, 4, 3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0 / 3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
def test_criterion_copy():
# Let's check whether copy of our criterion has the same type
# and properties as original
n_outputs = 3
n_classes = np.arange(3, dtype=np.intp)
n_samples = 100
def _pickle_copy(obj):
return pickle.loads(pickle.dumps(obj))
for copy_func in [copy.copy, copy.deepcopy, _pickle_copy]:
for _, typename in CRITERIA_CLF.items():
criteria = typename(n_outputs, n_classes)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_classes_), _ = result
assert typename == typename_
assert n_outputs == n_outputs_
assert_array_equal(n_classes, n_classes_)
for _, typename in CRITERIA_REG.items():
criteria = typename(n_outputs, n_samples)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_samples_), _ = result
assert typename == typename_
assert n_outputs == n_outputs_
assert n_samples == n_samples_
def test_empty_leaf_infinite_threshold():
# try to make empty leaf by using near infinite value.
data = np.random.RandomState(0).randn(100, 11) * 2e38
data = np.nan_to_num(data.astype('float32'))
X_full = data[:, :-1]
X_sparse = csc_matrix(X_full)
y = data[:, -1]
for X in [X_full, X_sparse]:
tree = DecisionTreeRegressor(random_state=0).fit(X, y)
terminal_regions = tree.apply(X)
left_leaf = set(np.where(tree.tree_.children_left == TREE_LEAF)[0])
empty_leaf = left_leaf.difference(terminal_regions)
infinite_threshold = np.where(~np.isfinite(tree.tree_.threshold))[0]
assert len(infinite_threshold) == 0
assert len(empty_leaf) == 0
@pytest.mark.parametrize("criterion", CLF_CRITERIONS)
@pytest.mark.parametrize(
"dataset", sorted(set(DATASETS.keys()) - {"reg_small", "diabetes"}))
@pytest.mark.parametrize(
"tree_cls", [DecisionTreeClassifier, ExtraTreeClassifier])
def test_prune_tree_classifier_are_subtrees(criterion, dataset, tree_cls):
dataset = DATASETS[dataset]
X, y = dataset["X"], dataset["y"]
est = tree_cls(max_leaf_nodes=20, random_state=0)
info = est.cost_complexity_pruning_path(X, y)
pruning_path = info.ccp_alphas
impurities = info.impurities
assert np.all(np.diff(pruning_path) >= 0)
assert np.all(np.diff(impurities) >= 0)
assert_pruning_creates_subtree(tree_cls, X, y, pruning_path)
@pytest.mark.parametrize("criterion", REG_CRITERIONS)
@pytest.mark.parametrize("dataset", DATASETS.keys())
@pytest.mark.parametrize(
"tree_cls", [DecisionTreeRegressor, ExtraTreeRegressor])
def test_prune_tree_regression_are_subtrees(criterion, dataset, tree_cls):
dataset = DATASETS[dataset]
X, y = dataset["X"], dataset["y"]
est = tree_cls(max_leaf_nodes=20, random_state=0)
info = est.cost_complexity_pruning_path(X, y)
pruning_path = info.ccp_alphas
impurities = info.impurities
assert np.all(np.diff(pruning_path) >= 0)
assert np.all(np.diff(impurities) >= 0)
assert_pruning_creates_subtree(tree_cls, X, y, pruning_path)
def test_prune_single_node_tree():
# single node tree
clf1 = DecisionTreeClassifier(random_state=0)
clf1.fit([[0], [1]], [0, 0])
# pruned single node tree
clf2 = DecisionTreeClassifier(random_state=0, ccp_alpha=10)
clf2.fit([[0], [1]], [0, 0])
assert_is_subtree(clf1.tree_, clf2.tree_)
def assert_pruning_creates_subtree(estimator_cls, X, y, pruning_path):
# generate trees with increasing alphas
estimators = []
for ccp_alpha in pruning_path:
est = estimator_cls(
max_leaf_nodes=20, ccp_alpha=ccp_alpha, random_state=0).fit(X, y)
estimators.append(est)
# A pruned tree must be a subtree of the previous tree (which had a
# smaller ccp_alpha)
for prev_est, next_est in zip(estimators, estimators[1:]):
assert_is_subtree(prev_est.tree_, next_est.tree_)
def assert_is_subtree(tree, subtree):
assert tree.node_count >= subtree.node_count
assert tree.max_depth >= subtree.max_depth
tree_c_left = tree.children_left
tree_c_right = tree.children_right
subtree_c_left = subtree.children_left
subtree_c_right = subtree.children_right
stack = [(0, 0)]
while stack:
tree_node_idx, subtree_node_idx = stack.pop()
assert_array_almost_equal(tree.value[tree_node_idx],
subtree.value[subtree_node_idx])
assert_almost_equal(tree.impurity[tree_node_idx],
subtree.impurity[subtree_node_idx])
assert_almost_equal(tree.n_node_samples[tree_node_idx],
subtree.n_node_samples[subtree_node_idx])
assert_almost_equal(tree.weighted_n_node_samples[tree_node_idx],
subtree.weighted_n_node_samples[subtree_node_idx])
if (subtree_c_left[subtree_node_idx] ==
subtree_c_right[subtree_node_idx]):
# is a leaf
assert_almost_equal(TREE_UNDEFINED,
subtree.threshold[subtree_node_idx])
else:
# not a leaf
assert_almost_equal(tree.threshold[tree_node_idx],
subtree.threshold[subtree_node_idx])
stack.append((tree_c_left[tree_node_idx],
subtree_c_left[subtree_node_idx]))
stack.append((tree_c_right[tree_node_idx],
subtree_c_right[subtree_node_idx]))
def test_prune_tree_raises_negative_ccp_alpha():
clf = DecisionTreeClassifier()
msg = "ccp_alpha must be greater than or equal to 0"
with pytest.raises(ValueError, match=msg):
clf.set_params(ccp_alpha=-1.0)
clf.fit(X, y)
clf.set_params(ccp_alpha=0.0)
clf.fit(X, y)
with pytest.raises(ValueError, match=msg):
clf.set_params(ccp_alpha=-1.0)
clf._prune_tree()
def check_apply_path_readonly(name):
X_readonly = create_memmap_backed_data(X_small.astype(tree._tree.DTYPE,
copy=False))
y_readonly = create_memmap_backed_data(np.array(y_small,
dtype=tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_readonly, y_readonly)
assert_array_equal(est.predict(X_readonly),
est.predict(X_small))
assert_array_equal(est.decision_path(X_readonly).todense(),
est.decision_path(X_small).todense())
@pytest.mark.parametrize("name", ALL_TREES)
def test_apply_path_readonly_all_trees(name):
check_apply_path_readonly(name)
# TODO: Remove in v0.26
@pytest.mark.parametrize("TreeEstimator", [DecisionTreeClassifier,
DecisionTreeRegressor])
def test_X_idx_sorted_deprecated(TreeEstimator):
X_idx_sorted = np.argsort(X, axis=0)
tree = TreeEstimator()
with pytest.warns(FutureWarning,
match="The parameter 'X_idx_sorted' is deprecated"):
tree.fit(X, y, X_idx_sorted=X_idx_sorted)
| bsd-3-clause |
jundongl/scikit-feature | skfeature/example/test_reliefF.py | 3 | 1587 | import scipy.io
from sklearn import cross_validation
from sklearn import svm
from sklearn.metrics import accuracy_score
from skfeature.function.similarity_based import reliefF
def main():
# load data
mat = scipy.io.loadmat('../data/COIL20.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 100 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the score of each feature on the training set
score = reliefF.reliefF(X[train], y[train])
# rank features in descending order according to score
idx = reliefF.feature_ranking(score)
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main() | gpl-2.0 |
shenzebang/scikit-learn | sklearn/neighbors/base.py | 22 | 31143 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
tawsifkhan/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 130 | 22974 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
saketkc/statsmodels | statsmodels/tsa/statespace/tests/test_representation.py | 3 | 41354 | """
Tests for python wrapper of state space representation and filtering
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import pandas as pd
import os
from statsmodels.tsa.statespace.representation import Representation
from statsmodels.tsa.statespace.kalman_filter import KalmanFilter, FilterResults, PredictionResults
from statsmodels.tsa.statespace import tools, sarimax
from .results import results_kalman_filter
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
clark1989_path = 'results' + os.sep + 'results_clark1989_R.csv'
clark1989_results = pd.read_csv(current_path + os.sep + clark1989_path)
class Clark1987(object):
"""
Clark's (1987) univariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, **kwargs):
self.true = results_kalman_filter.uc_uni
self.true_states = pd.DataFrame(self.true['states'])
# GDP, Quarterly, 1947.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Construct the statespace representation
k_states = 4
self.model = KalmanFilter(k_endog=1, k_states=k_states, **kwargs)
self.model.bind(data['lgdp'].values)
self.model.design[:, :, 0] = [1, 1, 0, 0]
self.model.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
self.model.selection = np.eye(self.model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
self.true['parameters']
)
self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.model.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, sigma_w**2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states)*100
# Initialization: modification
initial_state_cov = np.dot(
np.dot(self.model.transition[:, :, 0], initial_state_cov),
self.model.transition[:, :, 0].T
)
self.model.initialize_known(initial_state, initial_state_cov)
def run_filter(self):
# Filter the data
self.results = self.model.filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[self.true['start']:].sum(),
self.true['loglike'], 5
)
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[3][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
class TestClark1987Single(Clark1987):
"""
Basic single precision test for the loglikelihood and filtered states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987Single, self).__init__(
dtype=np.float32, conserve_memory=0
)
self.run_filter()
class TestClark1987Double(Clark1987):
"""
Basic double precision test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Double, self).__init__(
dtype=float, conserve_memory=0
)
self.run_filter()
class TestClark1987SingleComplex(Clark1987):
"""
Basic single precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987SingleComplex, self).__init__(
dtype=np.complex64, conserve_memory=0
)
self.run_filter()
class TestClark1987DoubleComplex(Clark1987):
"""
Basic double precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987DoubleComplex, self).__init__(
dtype=complex, conserve_memory=0
)
self.run_filter()
class TestClark1987Conserve(Clark1987):
"""
Memory conservation test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class Clark1987Forecast(Clark1987):
"""
Forecasting test for the loglikelihood and filtered states.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1987Forecast, self).__init__(
dtype=dtype, conserve_memory=conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self.model.endog = np.array(
np.r_[self.model.endog[0, :], [np.nan]*nforecast],
ndmin=2, dtype=dtype, order="F"
)
self.model.nobs = self.model.endog.shape[1]
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[3][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
class TestClark1987ForecastDouble(Clark1987Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987ForecastDouble, self).__init__()
self.run_filter()
class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.run_filter()
class TestClark1987ForecastConserve(Clark1987Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class TestClark1987ConserveAll(Clark1987):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
self.model.loglikelihood_burn = self.true['start']
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[0], self.true['loglike'], 5
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.results.filtered_state[0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][-1],
self.true_states.iloc[end-1, 1], 4
)
class Clark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, **kwargs):
self.true = results_kalman_filter.uc_bi
self.true_states = pd.DataFrame(self.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
k_states = 6
self.model = KalmanFilter(k_endog=2, k_states=k_states, **kwargs)
self.model.bind(np.ascontiguousarray(data.values))
# Statespace representation
self.model.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
self.model.transition[
([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])
] = [1, 1, 0, 0, 1, 1, 1, 1]
self.model.selection = np.eye(self.model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
self.true['parameters'],
)
self.model.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.model.obs_cov[1, 1, 0] = sigma_ec**2
self.model.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states)*100
# Initialization: self.modelification
initial_state_cov = np.dot(
np.dot(self.model.transition[:, :, 0], initial_state_cov),
self.model.transition[:, :, 0].T
)
self.model.initialize_known(initial_state, initial_state_cov)
def run_filter(self):
# Filter the data
self.results = self.model.filter()
def test_loglike(self):
assert_almost_equal(
# self.results.llf_obs[self.true['start']:].sum(),
self.results.llf_obs[0:].sum(),
self.true['loglike'], 2
)
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][self.true['start']:],
self.true_states.iloc[:, 3], 4
)
class TestClark1989(Clark1989):
"""
Basic double precision test for the loglikelihood and filtered
states with two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989, self).__init__(dtype=float, conserve_memory=0)
self.run_filter()
def test_kalman_gain(self):
assert_allclose(self.results.kalman_gain.sum(axis=1).sum(axis=0),
clark1989_results['V1'], atol=1e-4)
class TestClark1989Conserve(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class Clark1989Forecast(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1989Forecast, self).__init__(
dtype=dtype, conserve_memory=conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self.model.endog = np.array(
np.c_[
self.model.endog,
np.r_[[np.nan, np.nan]*nforecast].reshape(2, nforecast)
],
ndmin=2, dtype=dtype, order="F"
)
self.model.nobs = self.model.endog.shape[1]
self.run_filter()
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 3], 4
)
class TestClark1989ForecastDouble(Clark1989Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1989ForecastDouble, self).__init__()
self.run_filter()
class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.run_filter()
class TestClark1989ForecastConserve(Clark1989Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class TestClark1989ConserveAll(Clark1989):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
# self.model.loglikelihood_burn = self.true['start']
self.model.loglikelihood_burn = 0
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[0], self.true['loglike'], 2
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.results.filtered_state[0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][-1],
self.true_states.iloc[end-1, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][-1],
self.true_states.iloc[end-1, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][-1],
self.true_states.iloc[end-1, 3], 4
)
class TestClark1989PartialMissing(Clark1989):
def __init__(self):
super(TestClark1989PartialMissing, self).__init__()
endog = self.model.endog
endog[1,-51:] = np.NaN
self.model.bind(endog)
self.run_filter()
def test_loglike(self):
assert_allclose(self.results.llf_obs[0:].sum(), 1232.113456)
def test_filtered_state(self):
# Could do this, but no need really.
pass
def test_predicted_state(self):
assert_allclose(
self.results.predicted_state.T[1:], clark1989_results.iloc[:,1:],
atol=1e-8
)
# Miscellaneous coverage-related tests
def test_slice_notation():
# Test setting and getting state space representation matrices using the
# slice notation.
endog = np.arange(10)*1.0
mod = KalmanFilter(k_endog=1, k_states=2)
mod.bind(endog)
# Test invalid __setitem__
def set_designs():
mod['designs'] = 1
def set_designs2():
mod['designs',0,0] = 1
def set_designs3():
mod[0] = 1
assert_raises(IndexError, set_designs)
assert_raises(IndexError, set_designs2)
assert_raises(IndexError, set_designs3)
# Test invalid __getitem__
assert_raises(IndexError, lambda: mod['designs'])
assert_raises(IndexError, lambda: mod['designs',0,0,0])
assert_raises(IndexError, lambda: mod[0])
# Test valid __setitem__, __getitem__
assert_equal(mod.design[0,0,0], 0)
mod['design',0,0,0] = 1
assert_equal(mod['design'].sum(), 1)
assert_equal(mod.design[0,0,0], 1)
assert_equal(mod['design',0,0,0], 1)
# Test valid __setitem__, __getitem__ with unspecified time index
mod['design'] = np.zeros(mod['design'].shape)
assert_equal(mod.design[0,0], 0)
mod['design',0,0] = 1
assert_equal(mod.design[0,0], 1)
assert_equal(mod['design',0,0], 1)
def test_representation():
# Test Representation construction
# Test an invalid number of states
def zero_kstates():
mod = Representation(1, 0)
assert_raises(ValueError, zero_kstates)
# Test an invalid endogenous array
def empty_endog():
endog = np.zeros((0,0))
mod = Representation(endog, k_states=2)
assert_raises(ValueError, empty_endog)
# Test a Fortran-ordered endogenous array (which will be assumed to be in
# wide format: k_endog x nobs)
nobs = 10
k_endog = 2
endog = np.asfortranarray(np.arange(nobs*k_endog).reshape(k_endog,nobs)*1.)
mod = Representation(endog, k_states=2)
assert_equal(mod.nobs, nobs)
assert_equal(mod.k_endog, k_endog)
# Test a C-ordered endogenous array (which will be assumed to be in
# tall format: nobs x k_endog)
nobs = 10
k_endog = 2
endog = np.arange(nobs*k_endog).reshape(nobs,k_endog)*1.
mod = Representation(endog, k_states=2)
assert_equal(mod.nobs, nobs)
assert_equal(mod.k_endog, k_endog)
# Test getting the statespace representation
assert_equal(mod._statespace, None)
mod._initialize_representation()
assert_equal(mod._statespace is not None, True)
def test_bind():
# Test binding endogenous data to Kalman filter
mod = Representation(1, k_states=2)
# Test invalid endogenous array (it must be ndarray)
assert_raises(ValueError, lambda: mod.bind([1,2,3,4]))
# Test valid (nobs x 1) endogenous array
mod.bind(np.arange(10)*1.)
assert_equal(mod.nobs, 10)
# Test valid (k_endog x 0) endogenous array
mod.bind(np.zeros(0,dtype=np.float64))
# Test invalid (3-dim) endogenous array
assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2,2,3)*1.))
# Test valid F-contiguous
mod.bind(np.asfortranarray(np.arange(10).reshape(1,10)))
assert_equal(mod.nobs, 10)
# Test valid C-contiguous
mod.bind(np.arange(10).reshape(10,1))
assert_equal(mod.nobs, 10)
# Test invalid F-contiguous
assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(10,1))))
# Test invalid C-contiguous
assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(1,10)))
def test_initialization():
# Test Kalman filter initialization
mod = Representation(1, k_states=2)
# Test invalid state initialization
assert_raises(RuntimeError, lambda: mod._initialize_state())
# Test valid initialization
initial_state = np.zeros(2,) + 1.5
initial_state_cov = np.eye(2) * 3.
mod.initialize_known(initial_state, initial_state_cov)
assert_equal(mod._initial_state.sum(), 3)
assert_equal(mod._initial_state_cov.diagonal().sum(), 6)
# Test invalid initial_state
initial_state = np.zeros(10,)
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
initial_state = np.zeros((10,10))
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
# Test invalid initial_state_cov
initial_state = np.zeros(2,) + 1.5
initial_state_cov = np.eye(3)
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
def test_no_endog():
# Test for RuntimeError when no endog is provided by the time filtering
# is initialized.
mod = KalmanFilter(k_endog=1, k_states=1)
# directly call the _initialize_filter function
assert_raises(RuntimeError, mod._initialize_filter)
# indirectly call it through filtering
mod.initialize_approximate_diffuse()
assert_raises(RuntimeError, mod.filter)
def test_cython():
# Test the cython _kalman_filter creation, re-creation, calling, etc.
# Check that datatypes are correct:
for prefix, dtype in tools.prefix_dtype_map.items():
endog = np.array(1., ndmin=2, dtype=dtype)
mod = KalmanFilter(k_endog=1, k_states=1, dtype=dtype)
# Bind data and initialize the ?KalmanFilter object
mod.bind(endog)
mod._initialize_filter()
# Check that the dtype and prefix are correct
assert_equal(mod.prefix, prefix)
assert_equal(mod.dtype, dtype)
# Test that a dKalmanFilter instance was created
assert_equal(prefix in mod._kalman_filters, True)
kf = mod._kalman_filters[prefix]
assert_equal(isinstance(kf, tools.prefix_kalman_filter_map[prefix]), True)
# Test that the default returned _kalman_filter is the above instance
assert_equal(mod._kalman_filter, kf)
# Check that upcasting datatypes / ?KalmanFilter works (e.g. d -> z)
mod = KalmanFilter(k_endog=1, k_states=1)
# Default dtype is float
assert_equal(mod.prefix, 'd')
assert_equal(mod.dtype, np.float64)
# Prior to initialization, no ?KalmanFilter exists
assert_equal(mod._kalman_filter, None)
# Bind data and initialize the ?KalmanFilter object
endog = np.ascontiguousarray(np.array([1., 2.], dtype=np.float64))
mod.bind(endog)
mod._initialize_filter()
kf = mod._kalman_filters['d']
# Rebind data, still float, check that we haven't changed
mod.bind(endog)
mod._initialize_filter()
assert_equal(mod._kalman_filter, kf)
# Force creating new ?Statespace and ?KalmanFilter, by changing the
# time-varying character of an array
mod.design = np.zeros((1,1,2))
mod._initialize_filter()
assert_equal(mod._kalman_filter == kf, False)
kf = mod._kalman_filters['d']
# Rebind data, now complex, check that the ?KalmanFilter instance has
# changed
endog = np.ascontiguousarray(np.array([1., 2.], dtype=np.complex128))
mod.bind(endog)
assert_equal(mod._kalman_filter == kf, False)
def test_filter():
# Tests of invalid calls to the filter function
endog = np.ones((10,1))
mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse')
mod['design', :] = 1
mod['selection', :] = 1
mod['state_cov', :] = 1
# Test default filter results
res = mod.filter()
assert_equal(isinstance(res, FilterResults), True)
# Test specified invalid results class
assert_raises(ValueError, mod.filter, results=object)
# Test specified valid results class
res = mod.filter(results=FilterResults)
assert_equal(isinstance(res, FilterResults), True)
def test_loglike():
# Tests of invalid calls to the loglike function
endog = np.ones((10,1))
mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse')
mod['design', :] = 1
mod['selection', :] = 1
mod['state_cov', :] = 1
# Test that self.memory_no_likelihood = True raises an error
mod.memory_no_likelihood = True
assert_raises(RuntimeError, mod.loglike)
assert_raises(RuntimeError, mod.loglikeobs)
def test_predict():
# Tests of invalid calls to the predict function
warnings.simplefilter("always")
endog = np.ones((10,1))
mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse')
mod['design', :] = 1
mod['obs_intercept'] = np.zeros((1,10))
mod['selection', :] = 1
mod['state_cov', :] = 1
# Check that we need both forecasts and predicted output for prediction
mod.memory_no_forecast = True
res = mod.filter()
assert_raises(ValueError, res.predict)
mod.memory_no_forecast = False
mod.memory_no_predicted = True
res = mod.filter()
assert_raises(ValueError, res.predict)
mod.memory_no_predicted = False
# Now get a clean filter object
res = mod.filter()
# Check that start < 0 is an error
assert_raises(ValueError, res.predict, start=-1)
# Check that end < start is an error
assert_raises(ValueError, res.predict, start=2, end=1)
# Check that dynamic < 0 is an error
assert_raises(ValueError, res.predict, dynamic=-1)
# Check that dynamic > end is an warning
with warnings.catch_warnings(record=True) as w:
res.predict(end=1, dynamic=2)
message = ('Dynamic prediction specified to begin after the end of'
' prediction, and so has no effect.')
assert_equal(str(w[0].message), message)
# Check that dynamic > nobs is an warning
with warnings.catch_warnings(record=True) as w:
res.predict(end=11, dynamic=11, obs_intercept=np.zeros((1,1)))
message = ('Dynamic prediction specified to begin during'
' out-of-sample forecasting period, and so has no'
' effect.')
assert_equal(str(w[0].message), message)
# Check for a warning when providing a non-used statespace matrix
with warnings.catch_warnings(record=True) as w:
res.predict(end=res.nobs+1, design=True, obs_intercept=np.zeros((1,1)))
message = ('Model has time-invariant design matrix, so the design'
' argument to `predict` has been ignored.')
assert_equal(str(w[0].message), message)
# Check that an error is raised when a new time-varying matrix is not
# provided
assert_raises(ValueError, res.predict, end=res.nobs+1)
# Check that an error is raised when a non-two-dimensional obs_intercept
# is given
assert_raises(ValueError, res.predict, end=res.nobs+1,
obs_intercept=np.zeros(1))
# Check that an error is raised when an obs_intercept with incorrect length
# is given
assert_raises(ValueError, res.predict, end=res.nobs+1,
obs_intercept=np.zeros(2))
# Check that start=None gives start=0 and end=None gives end=nobs
assert_equal(res.predict().forecasts.shape, (1,res.nobs))
# Check that dynamic=True begins dynamic prediction immediately
# TODO just a smoke test
res.predict(dynamic=True)
# Check that on success, PredictionResults object is returned
prediction_results = res.predict(start=3, end=5)
assert_equal(isinstance(prediction_results, PredictionResults), True)
# Check for correctly subset representation arrays
# (k_endog, npredictions) = (1, 2)
assert_equal(prediction_results.endog.shape, (1, 2))
# (k_endog, npredictions) = (1, 2)
assert_equal(prediction_results.obs_intercept.shape, (1, 2))
# (k_endog, k_states) = (1, 1)
assert_equal(prediction_results.design.shape, (1, 1))
# (k_endog, k_endog) = (1, 1)
assert_equal(prediction_results.obs_cov.shape, (1, 1))
# (k_state,) = (1,)
assert_equal(prediction_results.state_intercept.shape, (1,))
# (k_state, npredictions) = (1, 2)
assert_equal(prediction_results.obs_intercept.shape, (1, 2))
# (k_state, k_state) = (1, 1)
assert_equal(prediction_results.transition.shape, (1, 1))
# (k_state, k_posdef) = (1, 1)
assert_equal(prediction_results.selection.shape, (1, 1))
# (k_posdef, k_posdef) = (1, 1)
assert_equal(prediction_results.state_cov.shape, (1, 1))
# Check for correctly subset filter output arrays
# (k_endog, npredictions) = (1, 2)
assert_equal(prediction_results.forecasts.shape, (1, 2))
assert_equal(prediction_results.forecasts_error.shape, (1, 2))
# (k_states, npredictions) = (1, 2)
assert_equal(prediction_results.filtered_state.shape, (1, 2))
assert_equal(prediction_results.predicted_state.shape, (1, 2))
# (k_endog, k_endog, npredictions) = (1, 1, 2)
assert_equal(prediction_results.forecasts_error_cov.shape, (1, 1, 2))
# (k_states, k_states, npredictions) = (1, 1, 2)
assert_equal(prediction_results.filtered_state_cov.shape, (1, 1, 2))
assert_equal(prediction_results.predicted_state_cov.shape, (1, 1, 2))
# Check for invalid attribute
assert_raises(AttributeError, getattr, prediction_results, 'test')
# Check that an error is raised when a non-two-dimensional obs_cov
# is given
# ...and...
# Check that an error is raised when an obs_cov with incorrect length
# is given
mod = KalmanFilter(endog, k_states=1, initialization='approximate_diffuse')
mod['design', :] = 1
mod['obs_cov'] = np.zeros((1,1,10))
mod['selection', :] = 1
mod['state_cov', :] = 1
res = mod.filter()
assert_raises(ValueError, res.predict, end=res.nobs+1,
obs_cov=np.zeros((1,1)))
assert_raises(ValueError, res.predict, end=res.nobs+1,
obs_cov=np.zeros((1,1,2)))
def test_standardized_forecasts_error():
# Simple test that standardized forecasts errors are calculated correctly.
# Just uses a different calculation method on a univariate series.
# Get the dataset
true = results_kalman_filter.uc_uni
data = pd.DataFrame(
true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Fit an ARIMA(1,1,0) to log GDP
mod = sarimax.SARIMAX(data['lgdp'], order=(1,1,0))
res = mod.fit(disp=-1)
standardized_forecasts_error = (
res.filter_results.forecasts_error[0] /
np.sqrt(res.filter_results.forecasts_error_cov[0,0])
)
assert_allclose(
res.filter_results.standardized_forecasts_error[0],
standardized_forecasts_error,
)
def test_simulate():
# Test for simulation of new time-series
from scipy.signal import lfilter
# Common parameters
nsimulations = 10
sigma2 = 2
measurement_shocks = np.zeros(nsimulations)
state_shocks = np.random.normal(scale=sigma2**0.5, size=nsimulations)
# Random walk model, so simulated series is just the cumulative sum of
# the shocks
mod = KalmanFilter(k_endog=1, k_states=1)
mod['design', 0, 0] = 1.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
actual = mod.simulate(
nsimulations, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)[0].squeeze()
desired = np.r_[0, np.cumsum(state_shocks)[:-1]]
assert_allclose(actual, desired)
# Local level model, so simulated series is just the cumulative sum of
# the shocks plus the measurement shock
mod = KalmanFilter(k_endog=1, k_states=1)
mod['design', 0, 0] = 1.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
actual = mod.simulate(
nsimulations, measurement_shocks=np.ones(nsimulations),
state_shocks=state_shocks)[0].squeeze()
desired = np.r_[1, np.cumsum(state_shocks)[:-1] + 1]
assert_allclose(actual, desired)
# Local level-like model with observation and state intercepts, so
# simulated series is just the cumulative sum of the shocks minus the state
# intercept, plus the observation intercept and the measurement shock
mod = KalmanFilter(k_endog=1, k_states=1)
mod['obs_intercept', 0, 0] = 5.
mod['design', 0, 0] = 1.
mod['state_intercept', 0, 0] = -2.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
actual = mod.simulate(
nsimulations, measurement_shocks=np.ones(nsimulations),
state_shocks=state_shocks)[0].squeeze()
desired = np.r_[1 + 5, np.cumsum(state_shocks - 2)[:-1] + 1 + 5]
assert_allclose(actual, desired)
# Model with time-varying observation intercept
mod = KalmanFilter(k_endog=1, k_states=1, nobs=10)
mod['obs_intercept'] = (np.arange(10)*1.).reshape(1, 10)
mod['design', 0, 0] = 1.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
actual = mod.simulate(
nsimulations, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)[0].squeeze()
desired = np.r_[0, np.cumsum(state_shocks)[:-1] + np.arange(1,10)]
assert_allclose(actual, desired)
# Model with time-varying observation intercept, check that error is raised
# if more simulations are requested than are nobs.
mod = KalmanFilter(k_endog=1, k_states=1, nobs=10)
mod['obs_intercept'] = (np.arange(10)*1.).reshape(1, 10)
mod['design', 0, 0] = 1.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
assert_raises(ValueError, mod.simulate, nsimulations+1, measurement_shocks,
state_shocks)
# ARMA(1,1): phi = [0.1], theta = [0.5], sigma^2 = 2
phi = np.r_[0.1]
theta = np.r_[0.5]
mod = sarimax.SARIMAX([0], order=(1,0,1))
mod.update(np.r_[phi, theta, sigma2])
actual = mod.ssm.simulate(
nsimulations, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)[0].squeeze()
desired = lfilter([1, theta], [1, -phi], np.r_[0, state_shocks[:-1]])
assert_allclose(actual, desired)
# SARIMAX(1,0,1)x(1,0,1,4), this time using the results object call
mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1,0,1),
seasonal_order=(1,0,1,4))
res = mod.filter([0.1, 0.5, 0.2, -0.3, 1])
actual = res.simulate(
nsimulations, measurement_shocks=measurement_shocks,
state_shocks=state_shocks)[0].squeeze()
desired = lfilter(
res.polynomial_reduced_ma, res.polynomial_reduced_ar,
np.r_[0, state_shocks[:-1]])
assert_allclose(actual, desired)
def test_impulse_responses():
# Test for impulse response functions
# Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1
# for all periods
mod = KalmanFilter(k_endog=1, k_states=1)
mod['design', 0, 0] = 1.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
mod['state_cov', 0, 0] = 2.
actual = mod.impulse_responses(steps=10)
desired = np.ones((11, 1))
assert_allclose(actual, desired)
# Random walk: 2-unit impulse response (i.e. non-orthogonalized irf) is 2
# for all periods
mod = KalmanFilter(k_endog=1, k_states=1)
mod['design', 0, 0] = 1.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
mod['state_cov', 0, 0] = 2.
actual = mod.impulse_responses(steps=10, impulse=[2])
desired = np.ones((11, 1)) * 2
assert_allclose(actual, desired)
# Random walk: 1-standard-deviation response (i.e. orthogonalized irf) is
# sigma for all periods (here sigma^2 = 2)
mod = KalmanFilter(k_endog=1, k_states=1)
mod['design', 0, 0] = 1.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
mod['state_cov', 0, 0] = 2.
actual = mod.impulse_responses(steps=10, orthogonalized=True)
desired = np.ones((11, 1)) * 2**0.5
assert_allclose(actual, desired)
# Random walk: 1-standard-deviation cumulative response (i.e. cumulative
# orthogonalized irf)
mod = KalmanFilter(k_endog=1, k_states=1)
mod['design', 0, 0] = 1.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
mod['state_cov', 0, 0] = 2.
actual = mod.impulse_responses(steps=10, orthogonalized=True,
cumulative=True)
desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis]
actual = mod.impulse_responses(steps=10, impulse=[1], orthogonalized=True,
cumulative=True)
desired = np.cumsum(np.ones((11, 1)) * 2**0.5)[:, np.newaxis]
assert_allclose(actual, desired)
# Random walk: 1-unit impulse response (i.e. non-orthogonalized irf) is 1
# for all periods, even when intercepts are present
mod = KalmanFilter(k_endog=1, k_states=1)
mod['state_intercept', 0] = 100.
mod['design', 0, 0] = 1.
mod['obs_intercept', 0] = -1000.
mod['transition', 0, 0] = 1.
mod['selection', 0, 0] = 1.
mod['state_cov', 0, 0] = 2.
actual = mod.impulse_responses(steps=10)
desired = np.ones((11, 1))
assert_allclose(actual, desired)
# Univariate model (random walk): test that an error is thrown when
# a multivariate or empty "impulse" is sent
mod = KalmanFilter(k_endog=1, k_states=1)
assert_raises(ValueError, mod.impulse_responses, impulse=1)
assert_raises(ValueError, mod.impulse_responses, impulse=[1,1])
assert_raises(ValueError, mod.impulse_responses, impulse=[])
# Univariate model with two uncorrelated shocks
mod = KalmanFilter(k_endog=1, k_states=2)
mod['design', 0, 0:2] = 1.
mod['transition', :, :] = np.eye(2)
mod['selection', :, :] = np.eye(2)
mod['state_cov', :, :] = np.eye(2)
desired = np.ones((11, 1))
actual = mod.impulse_responses(steps=10, impulse=0)
assert_allclose(actual, desired)
actual = mod.impulse_responses(steps=10, impulse=[1,0])
assert_allclose(actual, desired)
actual = mod.impulse_responses(steps=10, impulse=1)
assert_allclose(actual, desired)
actual = mod.impulse_responses(steps=10, impulse=[0,1])
assert_allclose(actual, desired)
# In this case (with sigma=sigma^2=1), orthogonalized is the same as not
actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
assert_allclose(actual, desired)
actual = mod.impulse_responses(steps=10, impulse=[1,0], orthogonalized=True)
assert_allclose(actual, desired)
actual = mod.impulse_responses(steps=10, impulse=[0,1], orthogonalized=True)
assert_allclose(actual, desired)
# Univariate model with two correlated shocks
mod = KalmanFilter(k_endog=1, k_states=2)
mod['design', 0, 0:2] = 1.
mod['transition', :, :] = np.eye(2)
mod['selection', :, :] = np.eye(2)
mod['state_cov', :, :] = np.array([[1, 0.5], [0.5, 1.25]])
desired = np.ones((11, 1))
# Non-orthogonalized (i.e. 1-unit) impulses still just generate 1's
actual = mod.impulse_responses(steps=10, impulse=0)
assert_allclose(actual, desired)
actual = mod.impulse_responses(steps=10, impulse=1)
assert_allclose(actual, desired)
# Orthogonalized (i.e. 1-std-dev) impulses now generate different responses
actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
assert_allclose(actual, desired + desired * 0.5)
actual = mod.impulse_responses(steps=10, impulse=1, orthogonalized=True)
assert_allclose(actual, desired)
# Multivariate model with two correlated shocks
mod = KalmanFilter(k_endog=2, k_states=2)
mod['design', :, :] = np.eye(2)
mod['transition', :, :] = np.eye(2)
mod['selection', :, :] = np.eye(2)
mod['state_cov', :, :] = np.array([[1, 0.5], [0.5, 1.25]])
ones = np.ones((11, 1))
zeros = np.zeros((11, 1))
# Non-orthogonalized (i.e. 1-unit) impulses still just generate 1's, but
# only for the appropriate series
actual = mod.impulse_responses(steps=10, impulse=0)
assert_allclose(actual, np.c_[ones, zeros])
actual = mod.impulse_responses(steps=10, impulse=1)
assert_allclose(actual, np.c_[zeros, ones])
# Orthogonalized (i.e. 1-std-dev) impulses now generate different
# responses, and only for the appropriate series
actual = mod.impulse_responses(steps=10, impulse=0, orthogonalized=True)
assert_allclose(actual, np.c_[ones, ones * 0.5])
actual = mod.impulse_responses(steps=10, impulse=1, orthogonalized=True)
assert_allclose(actual, np.c_[zeros, ones])
# AR(1) model generates a geometrically declining series
mod = sarimax.SARIMAX([0.1, 0.5, -0.2], order=(1,0,0))
phi = 0.5
mod.update([phi, 1])
desired = np.cumprod(np.r_[1, [phi]*10])[:, np.newaxis]
# Test going through the model directly
actual = mod.ssm.impulse_responses(steps=10)
assert_allclose(actual, desired)
# Test going through the results object
res = mod.filter([phi, 1.])
actual = res.impulse_responses(steps=10)
assert_allclose(actual, desired)
| bsd-3-clause |
fhennecker/semiteleporter | src/gui/tabs.py | 1 | 8387 | import os
import glob
import logging
import threading
import Tkinter, ttk, tkFileDialog
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from mesher.voxel import Point
class Tab(Tkinter.Frame):
def __init__(self, rootTab, title=""):
""" Create a new 'abstract' Tab object
rootTab = the Notebook reference
title = the name of the tab
"""
self.rootTab = rootTab
Tkinter.Frame.__init__(self, self.rootTab, width=800, height=600)
self.rootTab.add(self, text=title)
class SetupTab(Tab):
def __init__(self, rootTab, config):
""" Create a new SetupTab object
rootTab = the Notebook reference
config = the reference to the Config object from the Scanner
"""
Tab.__init__(self, rootTab, "Setup")
self.config = config
self.entries = dict()
for i in range(6):
self.rowconfigure(i, weight=1)
for i in range(3):
self.columnconfigure(i, weight=1)
self.createSectionFrame("File").grid(row=0, column=0, columnspan=3)
self.createSectionFrame("Arduino").grid(row=1, column=0, columnspan=3)
self.createSectionFrame("Camera").grid(row=2, column=0, columnspan=3)
self.createSectionFrame("LaserLeft").grid(row=3, column=0, columnspan=3)
self.createSectionFrame("LaserRight").grid(row=4, column=0, columnspan=3)
self.createSectionFrame("TurnTable").grid(row=5, column=0, columnspan=3)
Tkinter.Button(self, text="Open", command=self.openConfigFile).grid(row=6, column=0, sticky='e')
Tkinter.Button(self, text="Load", command=self.loadConfigFile).grid(row=6, column=1)
Tkinter.Button(self, text="Save", command=self.saveConfigFile).grid(row=6, column=2, sticky='w')
def createSectionFrame(self, section):
frame = Tkinter.LabelFrame(self, text=section, font=("bold"))
self.entries[section] = dict()
line = 0
for option in self.config[section]:
value = self.config.getToStr(section, option)
if(type(value) == list):
varList = []
Tkinter.Label(frame, text=option+" :").grid(row=line, column=0)
line += 1
for item,letter,col in zip(value, range(123-len(value),123), range(0,2*len(value),2)):
Tkinter.Label(frame, text=(chr(letter).upper()+" :")).grid(row=line, column=col)
varList.append(Tkinter.StringVar(frame, item))
Tkinter.Entry(frame, background="white", textvariable=varList[-1]).grid(row=line, column=col+1)
self.entries[section][option] = varList
else:
Tkinter.Label(frame, text=option).grid(row=line, column=0)
self.entries[section][option] = Tkinter.StringVar(frame, value)
if(option == "port"):
path = self.entries[section][option].get()[:-1]+"*"
ttk.Combobox(frame, textvariable=self.entries[section][option], values=glob.glob(path), state='readonly', background="white").grid(row=line, column=1, padx=5, pady=5)
else:
Tkinter.Entry(frame, background="white", textvariable=self.entries[section][option]).grid(row=line, column=1)
line += 1
return frame
def refresh(self):
for section in self.entries:
for option in self.entries[section]:
res = self.entries[section][option]
if(type(res) == list):
for entry,idx in zip(res,range(len(res))):
entry.set(self.config[section][option][idx])
else:
res.set(self.config[section][option])
def openConfigFile(self):
ext = None
filename = None
while(ext !=".cfg" and filename != ''):
filename = tkFileDialog.askopenfilename(defaultextension=".cfg")
ext = os.path.splitext(filename)[-1]
if(filename != ''):
self.config.load(filename)
self.refresh()
def loadConfigFile(self):
logging.info("Loading Gui config to Config object")
for section in self.entries:
for option in self.entries[section]:
res = self.entries[section][option]
if(type(res) == list):
res = [res[0].get(),res[1].get(),res[2].get()]
self.config[section][option] = np.array(res, dtype=np.float32)
else:
try:
self.config[section][option] = float(res.get())
except:
self.config[section][option] = res.get()
def saveConfigFile(self):
self.loadConfigFile()
self.config.save()
class ViewerTab(Tab):
def __init__(self, rootTab, scanner):
""" Create a new ViewerTab object
rootTab = the Notebook reference
scanner = the Scanner reference
"""
Tab.__init__(self, rootTab, "Viewer")
self.scanner = scanner
self.graph = None
self.axis = None
self.createGraph()
self.createOptions()
def createGraph(self, init=True):
if(init):
self.figure = plt.figure()
self.graph = FigureCanvasTkAgg(self.figure, master=self)
self.graph.get_tk_widget().grid(row=0, column=0)
self.axis = self.figure.add_subplot(111, projection='3d')
else:
self.axis.clear()
self.axis.set_xlabel('X axis')
self.axis.set_xlim3d(-250,250)
self.axis.set_ylabel('Y axis')
self.axis.set_ylim3d(-250,250)
self.axis.set_zlabel('Z axis')
self.axis.set_zlim3d(0,500)
self.graph.show()
def createOptions(self):
frame = Tkinter.LabelFrame(self, text="Options", font=("bold"))
frame.grid(row=0, column=1)
Tkinter.Button(frame, text="Start", command=self.start).grid(row=0, column=0)
Tkinter.Button(frame, text="Export", command=self.export).grid(row=1, column=0)
Tkinter.Button(frame, text="Mesh", command=self.mesh).grid(row=2, column=0)
Tkinter.Button(frame, text="Mesh with Delaunay3D", command=self.meshDelaunay).grid(row=3, column=0)
Tkinter.Button(frame, text="Mesh with BPA", command=self.meshBPA).grid(row=4, column=0)
Tkinter.Button(frame, text="Quit", command=self.winfo_toplevel().destroy).grid(row=5, column=0)
def _objSaveDialog(self, extension=".obj"):
filename, ext = None, None
while ext != extension and filename != '':
filename = tkFileDialog.asksaveasfilename(defaultextension=extension)
ext = os.path.splitext(filename)[-1]
return filename
def export(self):
filename = self._objSaveDialog()
if filename != "":
self.scanner.exportToObjFile(filename)
def start(self):
self.scanner.startScan()
self.plot()
def plot(self, scene=None, lock=None):
if(lock == None):
self.createGraph(False)
logging.info("Start plotting")
lock = threading.Lock()
thread_left = threading.Thread(target=self.plot, args=(self.scanner.sceneLeft, lock))
thread_right = threading.Thread(target=self.plot, args=(self.scanner.sceneRight, lock))
thread_left.start()
thread_right.start()
else:
for slice in scene:
arrays = map(Point.toNPArray, slice[0])
colors = map(Point.toRGB, slice[0])
if(len(arrays) != 0):
x, y, z = zip(*arrays)
lock.acquire()
self.axis.scatter(x, y, z, c=colors)
self.graph.draw()
lock.release()
def mesh(self):
filename = self._objSaveDialog()
if filename != "":
self.scanner.meshToObjFile(filename)
def meshDelaunay(self):
filename = self._objSaveDialog()
if filename != "":
self.scanner.meshDelaunay3D(filename)
def meshBPA(self):
filename = self._objSaveDialog(".ply")
if filename != "":
self.scanner.meshBPA(filename)
| mit |
cademarkegard/airflow | airflow/contrib/hooks/bigquery_hook.py | 3 | 35020 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
from builtins import range
from past.builtins import basestring
import logging
import time
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from apiclient.discovery import build, HttpError
from pandas.io.gbq import GbqConnector, \
_parse_data as gbq_parse_data, \
_check_google_client_version as gbq_check_google_client_version, \
_test_google_api_imports as gbq_test_google_api_imports
from pandas.tools.merge import concat
logging.getLogger("bigquery").setLevel(logging.INFO)
class BigQueryHook(GoogleCloudBaseHook, DbApiHook):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None):
super(BigQueryHook, self).__init__(
conn_id=bigquery_conn_id,
delegate_to=delegate_to)
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(service=service, project_id=project)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, bql, parameters=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param bql: The BigQuery SQL to execute.
:type bql: string
"""
service = self.get_service()
project = self._get_field('project')
connector = BigQueryPandasConnector(project, service)
schema, pages = connector.run_query(bql)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(gbq_parse_data(schema, page))
if len(dataframe_list) > 0:
return concat(dataframe_list, ignore_index=True)
else:
return gbq_parse_data(schema, [])
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self, project_id, service, reauth=False, verbose=False):
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(object):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id):
self.service = service
self.project_id = project_id
def run_query(
self, bql, destination_dataset_table = False,
write_disposition = 'WRITE_EMPTY',
allow_large_results=False,
udf_config = False,
use_legacy_sql=True):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: The BigQuery SQL to execute.
:type bql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
"""
configuration = {
'query': {
'query': bql,
'useLegacySql': use_legacy_sql
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'writeDisposition': write_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
return self.run_with_configuration(configuration)
def run_extract( # noqa
self, source_project_dataset_table, destination_cloud_storage_uris,
compression='NONE', export_format='CSV', field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields, source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=','):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table> BigQuery table to load
data into. If <project> is not included, project will be the project defined
in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
"""
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'schema': {
'fields': schema_fields
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
}
}
if source_format == 'CSV':
configuration['load']['skipLeadingRows'] = skip_leading_rows
configuration['load']['fieldDelimiter'] = field_delimiter
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {
'configuration': configuration
}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
job_id = query_reply['jobReference']['jobId']
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Wait for query to finish.
while not job['status']['state'] == 'DONE':
logging.info('Waiting for job to complete: %s, %s', self.project_id, job_id)
time.sleep(5)
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.format(
job['status']['errorResult'], job
)
)
return job_id
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, page_token=None, start_index=None):
"""
Get the data of a given dataset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (
self.service.tabledata()
.list(
projectId=self.project_id, datasetId=dataset_id,
tableId=table_id, **optional_params)
.execute()
)
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
tables_resource = self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
logging.info('Deleted table %s:%s.%s.',
deletion_project, deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception(
'Table deletion failed. Table does not exist.')
else:
logging.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(projectId=project_id,
datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
logging.info('table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
logging.info('table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project = None,
view_project = None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(projectId=source_project,
datasetId=source_dataset).execute()
access = source_dataset_resource['access'] if 'access' in source_dataset_resource else []
view_access = {'view': {'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table}}
# check to see if the view we want to add already exists.
if view_access not in access:
logging.info('granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
access.append(view_access)
return self.service.datasets().patch(projectId=source_project,
datasetId=source_dataset,
body={'access': access}).execute()
else:
# if view is already in access, do nothing.
logging.info('table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id):
super(BigQueryCursor, self).__init__(service=service, project_id=project_id)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
bql = _bind_parameters(operation, parameters) if parameters else operation
self.job_id = self.run_query(bql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param parameters: List of dictionary parameters to substitute into the
query.
:type parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token)
.execute()
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP':
return int(string_field)
elif bq_type == 'FLOAT':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
cmpt = table_input.split(':')
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, (
"{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
logging.info(
'project not included in {var}: '
'{input}; using project "{project}"'.format(
var=var_name, input=table_input, project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
| apache-2.0 |
tracierenea/gnuradio | gr-digital/examples/berawgn.py | 32 | 4886 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
BER simulation for QPSK signals, compare to theoretical values.
Change the N_BITS value to simulate more bits per Eb/N0 value,
thus allowing to check for lower BER values.
Lower values will work faster, higher values will use a lot of RAM.
Also, this app isn't highly optimized--the flow graph is completely
reinstantiated for every Eb/N0 value.
Of course, expect the maximum value for BER to be one order of
magnitude below what you chose for N_BITS.
"""
import math
import numpy
from gnuradio import gr, digital
from gnuradio import analog
from gnuradio import blocks
import sys
try:
from scipy.special import erfc
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
# Best to choose powers of 10
N_BITS = 1e7
RAND_SEED = 42
def berawgn(EbN0):
""" Calculates theoretical bit error rate in AWGN (for BPSK and given Eb/N0) """
return 0.5 * erfc(math.sqrt(10**(float(EbN0)/10)))
class BitErrors(gr.hier_block2):
""" Two inputs: true and received bits. We compare them and
add up the number of incorrect bits. Because integrate_ff()
can only add up a certain number of values, the output is
not a scalar, but a sequence of values, the sum of which is
the BER. """
def __init__(self, bits_per_byte):
gr.hier_block2.__init__(self, "BitErrors",
gr.io_signature(2, 2, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_int))
# Bit comparison
comp = blocks.xor_bb()
intdump_decim = 100000
if N_BITS < intdump_decim:
intdump_decim = int(N_BITS)
self.connect(self,
comp,
blocks.unpack_k_bits_bb(bits_per_byte),
blocks.uchar_to_float(),
blocks.integrate_ff(intdump_decim),
blocks.multiply_const_ff(1.0/N_BITS),
self)
self.connect((self, 1), (comp, 1))
class BERAWGNSimu(gr.top_block):
" This contains the simulation flow graph "
def __init__(self, EbN0):
gr.top_block.__init__(self)
self.const = digital.qpsk_constellation()
# Source is N_BITS bits, non-repeated
data = map(int, numpy.random.randint(0, self.const.arity(), N_BITS/self.const.bits_per_symbol()))
src = blocks.vector_source_b(data, False)
mod = digital.chunks_to_symbols_bc((self.const.points()), 1)
add = blocks.add_vcc()
noise = analog.noise_source_c(analog.GR_GAUSSIAN,
self.EbN0_to_noise_voltage(EbN0),
RAND_SEED)
demod = digital.constellation_decoder_cb(self.const.base())
ber = BitErrors(self.const.bits_per_symbol())
self.sink = blocks.vector_sink_f()
self.connect(src, mod, add, demod, ber, self.sink)
self.connect(noise, (add, 1))
self.connect(src, (ber, 1))
def EbN0_to_noise_voltage(self, EbN0):
""" Converts Eb/N0 to a complex noise voltage (assuming unit symbol power) """
return 1.0 / math.sqrt(self.const.bits_per_symbol() * 10**(float(EbN0)/10))
def simulate_ber(EbN0):
""" All the work's done here: create flow graph, run, read out BER """
print "Eb/N0 = %d dB" % EbN0
fg = BERAWGNSimu(EbN0)
fg.run()
return numpy.sum(fg.sink.data())
if __name__ == "__main__":
EbN0_min = 0
EbN0_max = 15
EbN0_range = range(EbN0_min, EbN0_max+1)
ber_theory = [berawgn(x) for x in EbN0_range]
print "Simulating..."
ber_simu = [simulate_ber(x) for x in EbN0_range]
f = pylab.figure()
s = f.add_subplot(1,1,1)
s.semilogy(EbN0_range, ber_theory, 'g-.', label="Theoretical")
s.semilogy(EbN0_range, ber_simu, 'b-o', label="Simulated")
s.set_title('BER Simulation')
s.set_xlabel('Eb/N0 (dB)')
s.set_ylabel('BER')
s.legend()
s.grid()
pylab.show()
| gpl-3.0 |
wkfwkf/statsmodels | statsmodels/datasets/committee/data.py | 25 | 2583 | """First 100 days of the US House of Representatives 1995"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unifited Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of bill assignments in the 104th House in 1995"""
DESCRLONG = """The example in Gill, seeks to explain the number of bill
assignments in the first 100 days of the US' 104th House of Representatives.
The response variable is the number of bill assignments in the first 100 days
over 20 Committees. The explanatory variables in the example are the number of
assignments in the first 100 days of the 103rd House, the number of members on
the committee, the number of subcommittees, the log of the number of staff
assigned to the committee, a dummy variable indicating whether
the committee is a high prestige committee, and an interaction term between
the number of subcommittees and the log of the staff size.
The data returned by load are not cleaned to represent the above example.
"""
NOTE = """::
Number of Observations - 20
Number of Variables - 6
Variable name definitions::
BILLS104 - Number of bill assignments in the first 100 days of the
104th House of Representatives.
SIZE - Number of members on the committee.
SUBS - Number of subcommittees.
STAFF - Number of staff members assigned to the committee.
PRESTIGE - PRESTIGE == 1 is a high prestige committee.
BILLS103 - Number of bill assignments in the first 100 days of the
103rd House of Representatives.
Committee names are included as a variable in the data file though not
returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""Load the committee data and returns a data class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/committee.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
| bsd-3-clause |
Erotemic/ibeis | ibeis/algo/smk/script_smk.py | 1 | 46896 | # -*- coding: utf-8 -*-
"""
Results so far without SV / fancyness
Using standard descriptors / vocabulary
proot=bow,nWords=1E6 -> .594
proot=asmk,nWords=1E6 -> .529
Note:
* Results from SMK Oxford Paper (mAP)
ASMK nAssign=1, SV=False: .78
ASMK nAssign=5, SV=False: .82
Philbin with tf-idf ranking SV=False
SIFT: .636, RootSIFT: .683 (+.05)
Philbin with tf-idf ranking SV=True
SIFT: .672, RootSIFT: .720 (+.05)
* My Results (WITH BAD QUERY BBOXES)
smk:nAssign=1,SV=True,: .58
smk:nAssign=1,SV=False,: .38
Yesterday I got
.22 when I fixed the bounding boxes
And now I'm getting
.08 and .32 (sv=[F,T]) after deleting and redoing everything (also removing junk images)
After fix of normalization I get
.38 and .44
Using oxford descriptors I get .51ish
Then changing to root-sift I
smk-bow = get=0.56294936807700813
Then using tfidf-bow2=0.56046968275748565
asmk-gets 0.54146
Going down to 8K words smk-BOW gets .153
Going down to 8K words tfidf-BOW gets .128
Going down to 8K words smk-asmk gets 0.374
Ok the 65K vocab smk-asmk gets mAP=0.461...
Ok, after recomputing a new 65K vocab with centered and root-sifted
descriptors, using float32 precision (in most places), asmk
gets a new map score of:
mAP=.5275... :(
This is with permissive query kpts and oxford vocab.
Next step: ensure everything is float32.
Ensured float32
mAP=.5279, ... better but indiciative of real error
After that try again at Jegou's data.
Ensure there are no smk algo bugs. There must be one.
FINALLY!
Got Jegou's data working.
With jegou percmopute oxford feats, words, and assignments
And float32 version
asmk = .78415
bow = .545
asmk got 0.78415 with float32 version
bow got .545
bot2 got .551
vecs07, root_sift, approx assign, (either jegou or my words)
mAP=.673
Weird:
vecs07, root_sift, exact assign,
Maybe jegou words or maybe my words. Can't quite tell.
Might have messed with a config.
mAP=0.68487357885738664
October 8
Still using the same descriptors, but my own vocab with approx assign
mAP = 0.78032
my own vocab approx assign, no center
map = .793
The problem was minibatch params. Need higher batch size and init size.
Needed to modify sklearn to handle this requirement.
Using my own descriptors I got 0.7460. Seems good.
Now, back to the HS pipeline.
Getting a 0.638, so there is an inconsistency.
Should be getting .7460. Maybe I gotta root_sift it up?
Turned off root_sift in script
got .769, so there is a problem in system script
minibatch 29566/270340... rate=0.86 Hz, eta=0:00:00, total=9:44:35, wall=05:24 EST inertia: mean batch=53730.923812, ewa=53853.439903
now need to try turning off float32
Differences Between this and SMK:
* No RootSIFT
* No SIFT Centering
* No Independent Vocab
* Chip RESIZE
Differences between this and VLAD
* residual vectors are normalized
* larger default vocabulary size
Feat Info
==========
name | num_vecs | n_annots |
=================================
Oxford13 | 12,534,635 | |
Oxford07 | 16,334,970 | |
mine1 | 8,997,955 | |
mine2 | 13,516,721 | 5063 |
mine3 | 8,371,196 | 4728 |
mine4 | 8,482,137 | 4783 |
Cluster Algo Config
===================
name | algo | init | init_size | batch size |
==========================================================================|
minibatch1 | minibatch kmeans | kmeans++ | num_words * 4 | 100 |
minibatch2 | minibatch kmeans | kmeans++ | num_words * 4 | 1000 |
given13 | Lloyd? | kmeans++? | num_words * 8? | nan? |
Assign Algo Config
==================
name | algo | trees | checks |
======================================
approx | kdtree | 8 | 1024 |
exact | linear | nan | nan |
exact | linear | nan | nan |
SMK Results
===========
tagid | mAP | train_feats | test_feats | center | rootSIFT | assign | num_words | cluster methods | int | only_xy |
=================================================================================================================
| 0.38 | mine1 | mine1 | | | approx | 64000 | minibatch1 | | |
| 0.541 | oxford07 | oxford07 | | X | approx | 2 ** 16 | minibatch1 | | X |
| 0.673 | oxford13 | oxford13 | X | X | approx | 2 ** 16 | minibatch1 | | X |
| 0.684 | oxford13 | oxford13 | X | X | exact | 2 ** 16 | minibatch1 | | X |
----------------------------------------------------------------------------------------------------------------
mybest | 0.793 | oxford13 | oxford13 | | X | approx | 2 ** 16 | minibatch2 | | X |
| 0.780 | oxford13 | oxford13 | X | X | approx | 2 ** 16 | minibatch2 | | X |
| 0.788 | paras13 | oxford13 | X | X | approx | 2 ** 16 | given13 | | X |
allgiven | 0.784 | paras13 | oxford13 | X | X | given13 | 2 ** 16 | given13 | | X |
reported13 | 0.781 | paras13 | oxford13 | X | X | given13 | 2 ** 16 | given13 | | X |
-----------------------------------------------------------------------------------------------------------------
inhouse1 | 0.746 | mine2 | mine2 | | X | approx | 2 ** 16 | minibatch2 | | X |
inhouse2 | 0.769 | mine2 | mine2 | | | approx | 2 ** 16 | minibatch2 | | X |
inhouse3 | 0.769 | mine2 | mine2 | | | approx | 2 ** 16 | minibatch2 | X | X |
inhouse4 | 0.751 | mine2 | mine2 | | | approx | 2 ** 16 | minibatch2 | X | |
sysharn1 | 0.638 | mine3 | mine3 | | | approx | 64000 | minibatch2 | X | |
sysharn2 | 0.713 | mine3 | mine4 | | | approx | 64000 | minibatch2 | X | |
In the SMK paper they report 0.781 as shown in the table, but they also report a score of 0.820 when increasing
the number of features to from 12.5M to 19.2M by lowering feature detection thresholds.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import numpy as np
from ibeis.algo.smk import inverted_index
from ibeis.algo.smk import smk_funcs
from ibeis.algo.smk import smk_pipeline
from six.moves import zip, map
(print, rrr, profile) = ut.inject2(__name__)
class SMK(ut.NiceRepr):
def __nice__(smk):
return smk.method
def __init__(smk, wx_to_weight, method='asmk', **kwargs):
smk.wx_to_weight = wx_to_weight
smk.method = method
if method == 'asmk':
smk.match_score = smk.match_score_agg
elif method == 'smk':
smk.match_score = smk.match_score_sep
elif method == 'bow':
smk.match_score = smk.match_score_bow
if method in ['asmk', 'smk']:
smk.alpha = kwargs.pop('alpha', 0.0)
smk.thresh = kwargs.pop('thresh', 0.0)
if method == 'bow2':
smk.kernel = smk.kernel_bow_tfidf
else:
smk.kernel = smk.kernel_smk
assert len(kwargs) == 0, 'unexpected kwargs=%r' % (kwargs,)
def gamma(smk, X):
""" gamma(X) = (M(X, X)) ** (-1/2) """
score = smk.match_score(X, X)
sccw = np.reciprocal(np.sqrt(score))
return sccw
def kernel_bow_tfidf(smk, X, Y):
return X.bow.dot(Y.bow)
def kernel_smk(smk, X, Y):
score = smk.match_score(X, Y)
score = X.gamma * Y.gamma * score
return score
def word_isect(smk, X, Y):
isect_wxs = X.wx_set.intersection(Y.wx_set)
X_idx = ut.take(X.wx_to_idx, isect_wxs)
Y_idx = ut.take(Y.wx_to_idx, isect_wxs)
weights = ut.take(smk.wx_to_weight, isect_wxs)
return X_idx, Y_idx, weights
def match_score_agg(smk, X, Y):
X_idx, Y_idx, weights = smk.word_isect(X, Y)
PhisX, flagsX = X.Phis_flags(X_idx)
PhisY, flagsY = Y.Phis_flags(Y_idx)
scores = smk_funcs.match_scores_agg(
PhisX, PhisY, flagsX, flagsY, smk.alpha, smk.thresh)
scores = np.multiply(scores, weights, out=scores)
score = scores.sum()
return score
def match_score_sep(smk, X, Y):
X_idx, Y_idx, weights = smk.word_isect(X, Y)
phisX_list, flagsY_list = X.phis_flags_list(X_idx)
phisY_list, flagsX_list = Y.phis_flags_list(Y_idx)
scores_list = smk_funcs.match_scores_sep(
phisX_list, phisY_list, flagsX_list, flagsY_list, smk.alpha,
smk.thresh)
for scores, w in zip(scores_list, weights):
np.multiply(scores, w, out=scores)
score = np.sum([s.sum() for s in scores_list])
return score
def match_score_bow(smk, X, Y):
isect_words = X.wx_set.intersection(Y.wx_set)
weights = ut.take(smk.wx_to_weight, isect_words)
score = np.sum(weights)
return score
class SparseVector(ut.NiceRepr):
def __init__(self, _dict):
self._dict = _dict
def __nice__(self):
return '%d nonzero values' % (len(self._dict),)
def __getitem__(self, keys):
vals = ut.take(self._dict, keys)
return vals
def dot(self, other):
keys1 = set(self._dict.keys())
keys2 = set(other._dict.keys())
keys = keys1.intersection(keys2)
vals1 = np.array(self[keys])
vals2 = np.array(other[keys])
return np.multiply(vals1, vals2).sum()
# class StackedLists(object):
# def __init__(self, list_, offsets):
# self.list_ = list_
# self.offsets = offsets
# def split(self):
# return [self._list_[l:r] for l, r in ut.itertwo(self.offsets)]
# stacked_vecs = StackedLists(all_vecs, offset_list)
# vecs_list = stacked_vecs.split()
def load_oxford_2007():
"""
Loads data from
http://www.robots.ox.ac.uk:5000/~vgg/publications/2007/Philbin07/philbin07.pdf
>>> from ibeis.algo.smk.script_smk import * # NOQA
"""
from os.path import join, basename, splitext
import pandas as pd
import vtool_ibeis as vt
dbdir = ut.truepath('/raid/work/Oxford/')
data_fpath0 = join(dbdir, 'data_2007.pkl')
if ut.checkpath(data_fpath0):
data = ut.load_data(data_fpath0)
return data
else:
word_dpath = join(dbdir, 'word_oxc1_hesaff_sift_16M_1M')
_word_fpath_list = ut.ls(word_dpath)
imgid_to_word_fpath = {
splitext(basename(word_fpath))[0]: word_fpath
for word_fpath in _word_fpath_list
}
readme_fpath = join(dbdir, 'README2.txt')
imgid_order = ut.readfrom(readme_fpath).split('\n')[20:-1]
imgid_order = imgid_order
data_uri_order = [x.replace('oxc1_', '') for x in imgid_order]
imgid_to_df = {}
for imgid in ut.ProgIter(imgid_order, label='reading kpts'):
word_fpath = imgid_to_word_fpath[imgid]
row_gen = (map(float, line.strip('\n').split(' '))
for line in ut.read_lines_from(word_fpath)[2:])
rows = [(int(word_id), x, y, e11, e12, e22)
for (word_id, x, y, e11, e12, e22) in row_gen]
df = pd.DataFrame(rows, columns=['word_id', 'x', 'y', 'e11', 'e12', 'e22'])
imgid_to_df[imgid] = df
df_list = ut.take(imgid_to_df, imgid_order)
nfeat_list = [len(df_) for df_ in df_list]
offset_list = [0] + ut.cumsum(nfeat_list)
shape = (offset_list[-1], 128)
#shape = (16334970, 128)
sift_fpath = join(dbdir, 'OxfordSIFTDescriptors',
'feat_oxc1_hesaff_sift.bin')
try:
file_ = open(sift_fpath, 'rb')
with ut.Timer('Reading SIFT binary file'):
nbytes = np.prod(shape)
all_vecs = np.fromstring(file_.read(nbytes), dtype=np.uint8)
all_vecs = all_vecs.reshape(shape)
finally:
file_.close()
kpts_list = [df_.loc[:, ('x', 'y', 'e11', 'e12', 'e22')].values
for df_ in df_list]
wordid_list = [df_.loc[:, 'word_id'].values for df_ in df_list]
kpts_Z = np.vstack(kpts_list)
idx_to_wx = np.hstack(wordid_list)
# assert len(np.unique(idx_to_wx)) == 1E6
# Reqd standard query order
query_files = sorted(ut.glob(dbdir + '/oxford_groundtruth', '*_query.txt'))
query_uri_order = []
for qpath in query_files:
text = ut.readfrom(qpath, verbose=0)
query_uri = text.split(' ')[0].replace('oxc1_', '')
query_uri_order.append(query_uri)
print('converting to invV')
all_kpts = vt.convert_kptsZ_to_kpts(kpts_Z)
data = {
'offset_list': offset_list,
'all_kpts': all_kpts,
'all_vecs': all_vecs,
'idx_to_wx': idx_to_wx,
'data_uri_order': data_uri_order,
'query_uri_order': query_uri_order,
}
ut.save_data(data_fpath0, data)
return data
def load_oxford_2013():
"""
Found this data in README of SMK publication
https://hal.inria.fr/hal-00864684/document
http://people.rennes.inria.fr/Herve.Jegou/publications.html
with download script
CommandLine:
# Download oxford13 data
cd ~/work/Oxford
mkdir -p smk_data_iccv_2013
cd smk_data_iccv_2013
wget -nH --cut-dirs=4 -r -Pdata/ ftp://ftp.irisa.fr/local/texmex/corpus/iccv2013/
This dataset has 5063 images wheras 07 has 5062
This dataset seems to contain an extra junk image:
ashmolean_000214
# Remember that matlab is 1 indexed!
# DONT FORGET TO CONVERT TO 0 INDEXING!
"""
from yael.ynumpy import fvecs_read
from yael.yutils import load_ext
import scipy.io
import vtool_ibeis as vt
from os.path import join
dbdir = ut.truepath('/raid/work/Oxford/')
datadir = dbdir + '/smk_data_iccv_2013/data/'
# we are not retraining, so this is unused
# # Training data descriptors for Paris6k dataset
# train_sift_fname = join(datadir, 'paris_sift.uint8') # NOQA
# # File storing visual words of Paris6k descriptors used in our ICCV paper
# train_vw_fname = join(datadir, 'clust_preprocessed/oxford_train_vw.int32')
# Pre-learned quantizer used in ICCV paper (used if docluster=false)
codebook_fname = join(datadir, 'clust_preprocessed/oxford_codebook.fvecs')
# Files storing descriptors/geometry for Oxford5k dataset
test_sift_fname = join(datadir, 'oxford_sift.uint8')
test_geom_fname = join(datadir, 'oxford_geom_sift.float')
test_nf_fname = join(datadir, 'oxford_nsift.uint32')
# File storing visual words of Oxford5k descriptors used in our ICCV paper
test_vw_fname = join(datadir, 'clust_preprocessed/oxford_vw.int32')
# Ground-truth for Oxford dataset
gnd_fname = join(datadir, 'gnd_oxford.mat')
oxford_vecs = load_ext(test_sift_fname, ndims=128, verbose=True)
oxford_nfeats = load_ext(test_nf_fname, verbose=True)
oxford_words = fvecs_read(codebook_fname)
oxford_wids = load_ext(test_vw_fname, verbose=True) - 1
test_geom_invV_fname = test_geom_fname + '.invV.pkl'
try:
all_kpts = ut.load_data(test_geom_invV_fname)
print('loaded invV keypoints')
except IOError:
oxford_kptsZ = load_ext(test_geom_fname, ndims=5, verbose=True)
print('converting to invV keypoints')
all_kpts = vt.convert_kptsZ_to_kpts(oxford_kptsZ)
ut.save_data(test_geom_invV_fname, all_kpts)
gnd_ox = scipy.io.loadmat(gnd_fname)
imlist = [x[0][0] for x in gnd_ox['imlist']]
qx_to_dx = gnd_ox['qidx'] - 1
data_uri_order = imlist
query_uri_order = ut.take(data_uri_order, qx_to_dx)
offset_list = np.hstack(([0], oxford_nfeats.cumsum())).astype(np.int64)
# query_gnd = gnd_ox['gnd'][0][0]
# bboxes = query_gnd[0]
# qx_to_ok_gtidxs1 = [x[0] for x in query_gnd[1][0]]
# qx_to_junk_gtidxs2 = [x[0] for x in query_gnd[2][0]]
# # ut.depth_profile(qx_to_gtidxs1)
# # ut.depth_profile(qx_to_gtidxs2)
assert sum(oxford_nfeats) == len(oxford_vecs)
assert offset_list[-1] == len(oxford_vecs)
assert len(oxford_wids) == len(oxford_vecs)
assert oxford_wids.max() == len(oxford_words) - 1
data = {
'offset_list': offset_list,
'all_kpts': all_kpts,
'all_vecs': oxford_vecs,
'words': oxford_words,
'idx_to_wx': oxford_wids,
'data_uri_order': data_uri_order,
'query_uri_order': query_uri_order,
}
return data
def load_oxford_ibeis():
import ibeis
ibs = ibeis.opendb('Oxford')
dim_size = None
_dannots = ibs.annots(ibs.filter_annots_general(has_none='query'),
config=dict(dim_size=dim_size))
_qannots = ibs.annots(ibs.filter_annots_general(has_any='query'),
config=dict(dim_size=dim_size))
with ut.Timer('reading info'):
vecs_list = _dannots.vecs
kpts_list = _dannots.kpts
nfeats_list = np.array(_dannots.num_feats)
with ut.Timer('stacking info'):
all_vecs = np.vstack(vecs_list)
all_kpts = np.vstack(kpts_list)
offset_list = np.hstack(([0], nfeats_list.cumsum())).astype(np.int64)
# data_annots = reorder_annots(_dannots, data_uri_order)
data_uri_order = get_annots_imgid(_dannots)
query_uri_order = get_annots_imgid(_qannots)
data = {
'offset_list': offset_list,
'all_kpts': all_kpts,
'all_vecs': all_vecs,
'data_uri_order': data_uri_order,
'query_uri_order': query_uri_order,
}
return data
def get_annots_imgid(_annots):
from os.path import basename, splitext
_images = _annots._ibs.images(_annots.gids)
intern_uris = [splitext(basename(uri))[0]
for uri in _images.uris_original]
return intern_uris
def load_ordered_annots(data_uri_order, query_uri_order):
# Open the ibeis version of oxford
import ibeis
ibs = ibeis.opendb('Oxford')
def reorder_annots(_annots, uri_order):
intern_uris = get_annots_imgid(_annots)
lookup = ut.make_index_lookup(intern_uris)
_reordered = _annots.take(ut.take(lookup, uri_order))
return _reordered
# Load database annotations and reorder them to agree with internals
_dannots = ibs.annots(ibs.filter_annots_general(has_none='query'))
data_annots = reorder_annots(_dannots, data_uri_order)
# Load query annototations and reorder to standard order
_qannots = ibs.annots(ibs.filter_annots_general(has_any='query'))
query_annots = reorder_annots(_qannots, query_uri_order)
# Map each query annot to its corresponding data index
dgid_to_dx = ut.make_index_lookup(data_annots.gids)
qx_to_dx = ut.take(dgid_to_dx, query_annots.gids)
return ibs, query_annots, data_annots, qx_to_dx
def run_asmk_script():
with ut.embed_on_exception_context: # NOQA
"""
>>> from ibeis.algo.smk.script_smk import *
""" # NOQA
# ==============================================
# PREPROCESSING CONFIGURATION
# ==============================================
config = {
# 'data_year': 2013,
'data_year': None,
'dtype': 'float32',
# 'root_sift': True,
'root_sift': False,
# 'centering': True,
'centering': False,
'num_words': 2 ** 16,
#'num_words': 1E6
#'num_words': 8000,
'kmeans_impl': 'sklearn.mini',
'extern_words': False,
'extern_assign': False,
'assign_algo': 'kdtree',
'checks': 1024,
'int_rvec': True,
'only_xy': False,
}
# Define which params are relevant for which operations
relevance = {}
relevance['feats'] = ['dtype', 'root_sift', 'centering', 'data_year']
relevance['words'] = relevance['feats'] + ['num_words', 'extern_words', 'kmeans_impl']
relevance['assign'] = relevance['words'] + ['checks', 'extern_assign', 'assign_algo']
# relevance['ydata'] = relevance['assign'] + ['int_rvec']
# relevance['xdata'] = relevance['assign'] + ['only_xy', 'int_rvec']
nAssign = 1
class SMKCacher(ut.Cacher):
def __init__(self, fname, ext='.cPkl'):
relevant_params = relevance[fname]
relevant_cfg = ut.dict_subset(config, relevant_params)
cfgstr = ut.get_cfg_lbl(relevant_cfg)
dbdir = ut.truepath('/raid/work/Oxford/')
super(SMKCacher, self).__init__(fname, cfgstr, cache_dir=dbdir,
ext=ext)
# ==============================================
# LOAD DATASET, EXTRACT AND POSTPROCESS FEATURES
# ==============================================
if config['data_year'] == 2007:
data = load_oxford_2007()
elif config['data_year'] == 2013:
data = load_oxford_2013()
elif config['data_year'] is None:
data = load_oxford_ibeis()
offset_list = data['offset_list']
all_kpts = data['all_kpts']
raw_vecs = data['all_vecs']
query_uri_order = data['query_uri_order']
data_uri_order = data['data_uri_order']
# del data
# ================
# PRE-PROCESS
# ================
import vtool_ibeis as vt
# Alias names to avoid errors in interactive sessions
proc_vecs = raw_vecs
del raw_vecs
feats_cacher = SMKCacher('feats', ext='.npy')
all_vecs = feats_cacher.tryload()
if all_vecs is None:
if config['dtype'] == 'float32':
print('Converting vecs to float32')
proc_vecs = proc_vecs.astype(np.float32)
else:
proc_vecs = proc_vecs
raise NotImplementedError('other dtype')
if config['root_sift']:
with ut.Timer('Apply root sift'):
np.sqrt(proc_vecs, out=proc_vecs)
vt.normalize(proc_vecs, ord=2, axis=1, out=proc_vecs)
if config['centering']:
with ut.Timer('Apply centering'):
mean_vec = np.mean(proc_vecs, axis=0)
# Center and then re-normalize
np.subtract(proc_vecs, mean_vec[None, :], out=proc_vecs)
vt.normalize(proc_vecs, ord=2, axis=1, out=proc_vecs)
if config['dtype'] == 'int8':
smk_funcs
all_vecs = proc_vecs
feats_cacher.save(all_vecs)
del proc_vecs
# =====================================
# BUILD VISUAL VOCABULARY
# =====================================
if config['extern_words']:
words = data['words']
assert config['num_words'] is None or len(words) == config['num_words']
else:
word_cacher = SMKCacher('words')
words = word_cacher.tryload()
if words is None:
with ut.embed_on_exception_context:
if config['kmeans_impl'] == 'sklearn.mini':
import sklearn.cluster
rng = np.random.RandomState(13421421)
# init_size = int(config['num_words'] * 8)
init_size = int(config['num_words'] * 4)
# converged after 26043 iterations
clusterer = sklearn.cluster.MiniBatchKMeans(
config['num_words'], init_size=init_size,
batch_size=1000, compute_labels=False, max_iter=20,
random_state=rng, n_init=1, verbose=1)
clusterer.fit(all_vecs)
words = clusterer.cluster_centers_
elif config['kmeans_impl'] == 'yael':
from yael import ynumpy
centroids, qerr, dis, assign, nassign = ynumpy.kmeans(
all_vecs, config['num_words'], init='kmeans++',
verbose=True, output='all')
words = centroids
word_cacher.save(words)
# =====================================
# ASSIGN EACH VECTOR TO ITS NEAREST WORD
# =====================================
if config['extern_assign']:
assert config['extern_words'], 'need extern cluster to extern assign'
idx_to_wxs = vt.atleast_nd(data['idx_to_wx'], 2)
idx_to_maws = np.ones(idx_to_wxs.shape, dtype=np.float32)
idx_to_wxs = np.ma.array(idx_to_wxs)
idx_to_maws = np.ma.array(idx_to_maws)
else:
from ibeis.algo.smk import vocab_indexer
vocab = vocab_indexer.VisualVocab(words)
dassign_cacher = SMKCacher('assign')
assign_tup = dassign_cacher.tryload()
if assign_tup is None:
vocab.flann_params['algorithm'] = config['assign_algo']
vocab.build()
# Takes 12 minutes to assign jegous vecs to 2**16 vocab
with ut.Timer('assign vocab neighbors'):
_idx_to_wx, _idx_to_wdist = vocab.nn_index(all_vecs, nAssign,
checks=config['checks'])
if nAssign > 1:
idx_to_wxs, idx_to_maws = smk_funcs.weight_multi_assigns(
_idx_to_wx, _idx_to_wdist, massign_alpha=1.2, massign_sigma=80.0,
massign_equal_weights=True)
else:
idx_to_wxs = np.ma.masked_array(_idx_to_wx, fill_value=-1)
idx_to_maws = np.ma.ones(idx_to_wxs.shape, fill_value=-1,
dtype=np.float32)
idx_to_maws.mask = idx_to_wxs.mask
assign_tup = (idx_to_wxs, idx_to_maws)
dassign_cacher.save(assign_tup)
idx_to_wxs, idx_to_maws = assign_tup
# Breakup vectors, keypoints, and word assignments by annotation
wx_lists = [idx_to_wxs[l:r] for l, r in ut.itertwo(offset_list)]
maw_lists = [idx_to_maws[l:r] for l, r in ut.itertwo(offset_list)]
vecs_list = [all_vecs[l:r] for l, r in ut.itertwo(offset_list)]
kpts_list = [all_kpts[l:r] for l, r in ut.itertwo(offset_list)]
# =======================
# FIND QUERY SUBREGIONS
# =======================
ibs, query_annots, data_annots, qx_to_dx = load_ordered_annots(
data_uri_order, query_uri_order)
daids = data_annots.aids
qaids = query_annots.aids
query_super_kpts = ut.take(kpts_list, qx_to_dx)
query_super_vecs = ut.take(vecs_list, qx_to_dx)
query_super_wxs = ut.take(wx_lists, qx_to_dx)
query_super_maws = ut.take(maw_lists, qx_to_dx)
# Mark which keypoints are within the bbox of the query
query_flags_list = []
only_xy = config['only_xy']
for kpts_, bbox in zip(query_super_kpts, query_annots.bboxes):
flags = kpts_inside_bbox(kpts_, bbox, only_xy=only_xy)
query_flags_list.append(flags)
print('Queries are crops of existing database images.')
print('Looking at average percents')
percent_list = [flags_.sum() / flags_.shape[0]
for flags_ in query_flags_list]
percent_stats = ut.get_stats(percent_list)
print('percent_stats = %s' % (ut.repr4(percent_stats),))
import vtool_ibeis as vt
query_kpts = vt.zipcompress(query_super_kpts, query_flags_list, axis=0)
query_vecs = vt.zipcompress(query_super_vecs, query_flags_list, axis=0)
query_wxs = vt.zipcompress(query_super_wxs, query_flags_list, axis=0)
query_maws = vt.zipcompress(query_super_maws, query_flags_list, axis=0)
# =======================
# CONSTRUCT QUERY / DATABASE REPR
# =======================
# int_rvec = not config['dtype'].startswith('float')
int_rvec = config['int_rvec']
X_list = []
_prog = ut.ProgPartial(length=len(qaids), label='new X', bs=True,
adjust=True)
for aid, fx_to_wxs, fx_to_maws in _prog(zip(qaids, query_wxs,
query_maws)):
X = new_external_annot(aid, fx_to_wxs, fx_to_maws, int_rvec)
X_list.append(X)
# ydata_cacher = SMKCacher('ydata')
# Y_list = ydata_cacher.tryload()
# if Y_list is None:
Y_list = []
_prog = ut.ProgPartial(length=len(daids), label='new Y', bs=True,
adjust=True)
for aid, fx_to_wxs, fx_to_maws in _prog(zip(daids, wx_lists, maw_lists)):
Y = new_external_annot(aid, fx_to_wxs, fx_to_maws, int_rvec)
Y_list.append(Y)
# ydata_cacher.save(Y_list)
#======================
# Add in some groundtruth
print('Add in some groundtruth')
for Y, nid in zip(Y_list, ibs.get_annot_nids(daids)):
Y.nid = nid
for X, nid in zip(X_list, ibs.get_annot_nids(qaids)):
X.nid = nid
for Y, qual in zip(Y_list, ibs.get_annot_quality_texts(daids)):
Y.qual = qual
#======================
# Add in other properties
for Y, vecs, kpts in zip(Y_list, vecs_list, kpts_list):
Y.vecs = vecs
Y.kpts = kpts
imgdir = ut.truepath('/raid/work/Oxford/oxbuild_images')
for Y, imgid in zip(Y_list, data_uri_order):
gpath = ut.unixjoin(imgdir, imgid + '.jpg')
Y.gpath = gpath
for X, vecs, kpts in zip(X_list, query_vecs, query_kpts):
X.kpts = kpts
X.vecs = vecs
#======================
print('Building inverted list')
daids = [Y.aid for Y in Y_list]
# wx_list = sorted(ut.list_union(*[Y.wx_list for Y in Y_list]))
wx_list = sorted(set.union(*[Y.wx_set for Y in Y_list]))
assert daids == data_annots.aids
assert len(wx_list) <= config['num_words']
wx_to_aids = smk_funcs.invert_lists(
daids, [Y.wx_list for Y in Y_list], all_wxs=wx_list)
# Compute IDF weights
print('Compute IDF weights')
ndocs_total = len(daids)
# Use only the unique number of words
ndocs_per_word = np.array([len(set(wx_to_aids[wx])) for wx in wx_list])
print('ndocs_perword stats: ' + ut.repr4(ut.get_stats(ndocs_per_word)))
idf_per_word = smk_funcs.inv_doc_freq(ndocs_total, ndocs_per_word)
wx_to_weight = dict(zip(wx_list, idf_per_word))
print('idf stats: ' + ut.repr4(ut.get_stats(wx_to_weight.values())))
# Filter junk
Y_list_ = [Y for Y in Y_list if Y.qual != 'junk']
# =======================
# CHOOSE QUERY KERNEL
# =======================
params = {
'asmk': dict(alpha=3.0, thresh=0.0),
'bow': dict(),
'bow2': dict(),
}
# method = 'bow'
method = 'bow2'
method = 'asmk'
smk = SMK(wx_to_weight, method=method, **params[method])
# Specific info for the type of query
if method == 'asmk':
# Make residual vectors
if True:
# The stacked way is 50x faster
# TODO: extend for multi-assignment and record fxs
flat_query_vecs = np.vstack(query_vecs)
flat_query_wxs = np.vstack(query_wxs)
flat_query_offsets = np.array([0] + ut.cumsum(ut.lmap(len, query_wxs)))
flat_wxs_assign = flat_query_wxs
flat_offsets = flat_query_offsets
flat_vecs = flat_query_vecs
tup = smk_funcs.compute_stacked_agg_rvecs(
words, flat_wxs_assign, flat_vecs, flat_offsets)
all_agg_vecs, all_error_flags, agg_offset_list = tup
if int_rvec:
all_agg_vecs = smk_funcs.cast_residual_integer(all_agg_vecs)
agg_rvecs_list = [all_agg_vecs[l:r] for l, r in ut.itertwo(agg_offset_list)]
agg_flags_list = [all_error_flags[l:r] for l, r in ut.itertwo(agg_offset_list)]
for X, agg_rvecs, agg_flags in zip(X_list, agg_rvecs_list, agg_flags_list):
X.agg_rvecs = agg_rvecs
X.agg_flags = agg_flags[:, None]
flat_wxs_assign = idx_to_wxs
flat_offsets = offset_list
flat_vecs = all_vecs
tup = smk_funcs.compute_stacked_agg_rvecs(
words, flat_wxs_assign, flat_vecs, flat_offsets)
all_agg_vecs, all_error_flags, agg_offset_list = tup
if int_rvec:
all_agg_vecs = smk_funcs.cast_residual_integer(all_agg_vecs)
agg_rvecs_list = [all_agg_vecs[l:r] for l, r in ut.itertwo(agg_offset_list)]
agg_flags_list = [all_error_flags[l:r] for l, r in ut.itertwo(agg_offset_list)]
for Y, agg_rvecs, agg_flags in zip(Y_list, agg_rvecs_list, agg_flags_list):
Y.agg_rvecs = agg_rvecs
Y.agg_flags = agg_flags[:, None]
else:
# This non-stacked way is about 500x slower
_prog = ut.ProgPartial(label='agg Y rvecs', bs=True, adjust=True)
for Y in _prog(Y_list_):
make_agg_vecs(Y, words, Y.vecs)
_prog = ut.ProgPartial(label='agg X rvecs', bs=True, adjust=True)
for X in _prog(X_list):
make_agg_vecs(X, words, X.vecs)
elif method == 'bow2':
# Hack for orig tf-idf bow vector
nwords = len(words)
for X in ut.ProgIter(X_list, label='make bow vector'):
ensure_tf(X)
bow_vector(X, wx_to_weight, nwords)
for Y in ut.ProgIter(Y_list_, label='make bow vector'):
ensure_tf(Y)
bow_vector(Y, wx_to_weight, nwords)
if method != 'bow2':
for X in ut.ProgIter(X_list, 'compute X gamma'):
X.gamma = smk.gamma(X)
for Y in ut.ProgIter(Y_list_, 'compute Y gamma'):
Y.gamma = smk.gamma(Y)
# Execute matches (could go faster by enumerating candidates)
scores_list = []
for X in ut.ProgIter(X_list, label='query %s' % (smk,)):
scores = [smk.kernel(X, Y) for Y in Y_list_]
scores = np.array(scores)
scores = np.nan_to_num(scores)
scores_list.append(scores)
import sklearn.metrics
avep_list = []
_iter = list(zip(scores_list, X_list))
_iter = ut.ProgIter(_iter, label='evaluate %s' % (smk,))
for scores, X in _iter:
truth = [X.nid == Y.nid for Y in Y_list_]
avep = sklearn.metrics.average_precision_score(truth, scores)
avep_list.append(avep)
avep_list = np.array(avep_list)
mAP = np.mean(avep_list)
print('mAP = %r' % (mAP,))
def new_external_annot(aid, fx_to_wxs, fx_to_maws, int_rvec):
wx_to_fxs, wx_to_maws = smk_funcs.invert_assigns(fx_to_wxs, fx_to_maws)
X = inverted_index.SingleAnnot()
X.aid = aid
# Build Aggregate Residual Vectors
X.wx_list = np.array(sorted(wx_to_fxs.keys()), dtype=np.int32)
X.wx_to_idx = ut.make_index_lookup(X.wx_list)
X.int_rvec = int_rvec
X.wx_set = set(X.wx_list)
# TODO: maybe use offset list structure instead of heavy nesting
X.fxs_list = ut.take(wx_to_fxs, X.wx_list)
X.maws_list = ut.take(wx_to_maws, X.wx_list)
return X
def make_agg_vecs(X, words, fx_to_vecs):
word_list = ut.take(words, X.wx_list)
dtype = np.int8 if X.int_rvec else np.float32
dim = fx_to_vecs.shape[1]
X.agg_rvecs = np.empty((len(X.wx_list), dim), dtype=dtype)
X.agg_flags = np.empty((len(X.wx_list), 1), dtype=np.bool)
for idx in range(len(X.wx_list)):
word = word_list[idx]
fxs = X.fxs_list[idx]
maws = X.maws_list[idx]
vecs = fx_to_vecs.take(fxs, axis=0)
_rvecs, _flags = smk_funcs.compute_rvec(vecs, word)
_agg_rvec, _agg_flag = smk_funcs.aggregate_rvecs(_rvecs, maws, _flags)
if X.int_rvec:
_agg_rvec = smk_funcs.cast_residual_integer(_agg_rvec)
X.agg_rvecs[idx] = _agg_rvec
X.agg_flags[idx] = _agg_flag
return X
def ensure_tf(X):
termfreq = ut.dict_hist(X.wx_list)
# do what video google does
termfreq = ut.map_dict_vals(lambda x: x / len(X.wx_list), termfreq)
X.termfreq = termfreq
def bow_vector(X, wx_to_weight, nwords):
import vtool_ibeis as vt
wxs = sorted(list(X.wx_set))
tf = np.array(ut.take(X.termfreq, wxs))
idf = np.array(ut.take(wx_to_weight, wxs))
bow_ = tf * idf
bow_ = vt.normalize(bow_)
bow = SparseVector(dict(zip(wxs, bow_)))
X.bow = bow
def make_temporary_annot(aid, vocab, wx_to_weight, ibs, config):
nAssign = config.get('nAssign', 1)
alpha = config.get('smk_alpha', 3.0)
thresh = config.get('smk_thresh', 3.0)
# Compute assignments
fx_to_vecs = ibs.get_annot_vecs(aid, config2_=config)
fx_to_wxs, fx_to_maws = smk_funcs.assign_to_words(vocab, fx_to_vecs, nAssign)
wx_to_fxs, wx_to_maws = smk_funcs.invert_assigns(fx_to_wxs, fx_to_maws)
# Build Aggregate Residual Vectors
wx_list = sorted(wx_to_fxs.keys())
word_list = ut.take(vocab.wx_to_word, wx_list)
fxs_list = ut.take(wx_to_fxs, wx_list)
maws_list = ut.take(wx_to_maws, wx_list)
agg_rvecs = np.empty((len(wx_list), fx_to_vecs.shape[1]), dtype=np.float)
agg_flags = np.empty((len(wx_list), 1), dtype=np.bool)
for idx in range(len(wx_list)):
word = word_list[idx]
fxs = fxs_list[idx]
maws = maws_list[idx]
vecs = fx_to_vecs.take(fxs, axis=0)
_rvecs, _flags = smk_funcs.compute_rvec(vecs, word)
_agg_rvec, _agg_flag = smk_funcs.aggregate_rvecs(_rvecs, maws, _flags)
agg_rvecs[idx] = _agg_rvec
agg_flags[idx] = _agg_flag
X = inverted_index.SingleAnnot()
X.aid = aid
X.wx_list = wx_list
X.fxs_list = fxs_list
X.maws_list = maws_list
X.agg_rvecs = agg_rvecs
X.agg_flags = agg_flags
X.wx_to_idx = ut.make_index_lookup(X.wx_list)
X.int_rvec = False
X.wx_set = set(X.wx_list)
weight_list = np.array(ut.take(wx_to_weight, wx_list))
X.gamma = smk_funcs.gamma_agg(X.agg_rvecs, X.agg_flags, weight_list,
alpha, thresh)
return X
def verify_score():
"""
Recompute all SMK things for two annotations and compare scores.
>>> from ibeis.algo.smk.script_smk import * # NOQA
cm.print_inspect_str(qreq_)
cm.show_single_annotmatch(qreq_, daid1)
cm.show_single_annotmatch(qreq_, daid2)
"""
qreq_, cm = load_internal_data()
qreq_.ensure_data()
ibs = qreq_.ibs
qaid = cm.qaid
daid1 = cm.get_top_truth_aids(ibs, ibs.const.EVIDENCE_DECISION.POSITIVE)[0]
daid2 = cm.get_top_truth_aids(ibs, ibs.const.EVIDENCE_DECISION.POSITIVE, invert=True)[0]
vocab = ibs.depc['vocab'].get_row_data([qreq_.dinva.vocab_rowid], 'words')[0]
wx_to_weight = qreq_.dinva.wx_to_weight
aid = qaid # NOQA
config = qreq_.qparams
alpha = config.get('smk_alpha', 3.0)
thresh = config.get('smk_thresh', 3.0)
X = make_temporary_annot(qaid, vocab, wx_to_weight, ibs, config)
assert np.isclose(smk_pipeline.match_kernel_agg(X, X, wx_to_weight, alpha, thresh)[0], 1.0)
Y1 = make_temporary_annot(daid1, vocab, wx_to_weight, ibs, config)
item = smk_pipeline.match_kernel_agg(X, Y1, wx_to_weight, alpha, thresh)
score = item[0]
assert np.isclose(score, cm.get_annot_scores([daid1])[0])
assert np.isclose(smk_pipeline.match_kernel_agg(Y1, Y1, wx_to_weight, alpha, thresh)[0], 1.0)
Y2 = make_temporary_annot(daid2, vocab, wx_to_weight, ibs, config)
item = smk_pipeline.match_kernel_agg(X, Y2, wx_to_weight, alpha, thresh)
score = item[0]
assert np.isclose(score, cm.get_annot_scores([daid2])[0])
assert np.isclose(smk_pipeline.match_kernel_agg(Y2, Y2, wx_to_weight, alpha, thresh)[0], 1.0)
#Y2 = make_temporary_annot(daid2, vocab, wx_to_weight, ibs, config)
def kpts_inside_bbox(kpts, bbox, only_xy=False):
# Use keypoint extent to filter out what is in query
import vtool_ibeis as vt
xys = kpts[:, 0:2]
if only_xy:
flags = vt.point_inside_bbox(xys.T, bbox)
else:
wh_list = vt.get_kpts_wh(kpts)
radii = wh_list / 2
pts1 = xys + radii * (-1, 1)
pts2 = xys + radii * (-1, -1)
pts3 = xys + radii * (1, -1)
pts4 = xys + radii * (1, 1)
flags = np.logical_and.reduce([
vt.point_inside_bbox(pts1.T, bbox),
vt.point_inside_bbox(pts2.T, bbox),
vt.point_inside_bbox(pts3.T, bbox),
vt.point_inside_bbox(pts4.T, bbox),
])
return flags
def sanity_checks(offset_list, Y_list, query_annots, ibs):
nfeat_list = np.diff(offset_list)
for Y, nfeat in ut.ProgIter(zip(Y_list, nfeat_list), 'checking'):
assert nfeat == sum(ut.lmap(len, Y.fxs_list))
if False:
# Visualize queries
# Look at the standard query images here
# http://www.robots.ox.ac.uk:5000/~vgg/publications/2007/Philbin07/philbin07.pdf
from ibeis.viz import viz_chip
import plottool_ibeis as pt
pt.qt4ensure()
fnum = 1
pnum_ = pt.make_pnum_nextgen(len(query_annots.aids) // 5, 5)
for aid in ut.ProgIter(query_annots.aids):
pnum = pnum_()
viz_chip.show_chip(ibs, aid, in_image=True, annote=False,
notitle=True, draw_lbls=False,
fnum=fnum, pnum=pnum)
def oxford_conic_test():
# Test that these are what the readme says
A, B, C = [0.016682, 0.001693, 0.014927]
A, B, C = [0.010141, -1.1e-05, 0.02863]
Z = np.array([[A, B], [B, C]])
import vtool_ibeis as vt
invV = vt.decompose_Z_to_invV_2x2(Z) # NOQA
invV = vt.decompose_Z_to_invV_mats2x2(np.array([Z])) # NOQA
# seems ok
#invV = np.linalg.inv(V)
def load_internal_data():
"""
ibeis TestResult --db Oxford \
-p smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True,dim_size=None \
-a oxford \
--dev-mode
ibeis TestResult --db GZ_Master1 \
-p smk:nWords=[64000],nAssign=[1],SV=[False],fg_on=False \
-a ctrl:qmingt=2 \
--dev-mode
"""
# from ibeis.algo.smk.smk_pipeline import * # NOQA
import ibeis
qreq_ = ibeis.testdata_qreq_(
defaultdb='Oxford', a='oxford',
p='smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True,dim_size=None')
cm_list = qreq_.execute()
ave_precisions = [cm.get_annot_ave_precision() for cm in cm_list]
mAP = np.mean(ave_precisions)
print('mAP = %.3f' % (mAP,))
cm = cm_list[-1]
return qreq_, cm
def compare_data(Y_list_):
import ibeis
qreq_ = ibeis.testdata_qreq_(
defaultdb='Oxford', a='oxford',
p='smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True,dim_size=None')
qreq_.ensure_data()
gamma1s = []
gamma2s = []
print(len(Y_list_))
print(len(qreq_.daids))
dinva = qreq_.dinva
bady = []
for Y in Y_list_:
aid = Y.aid
gamma1 = Y.gamma
if aid in dinva.aid_to_idx:
idx = dinva.aid_to_idx[aid]
gamma2 = dinva.gamma_list[idx]
gamma1s.append(gamma1)
gamma2s.append(gamma2)
else:
bady += [Y]
print(Y.nid)
# print(Y.qual)
# ibs = qreq_.ibs
# z = ibs.annots([a.aid for a in bady])
import plottool_ibeis as pt
ut.qtensure()
gamma1s = np.array(gamma1s)
gamma2s = np.array(gamma2s)
sortx = gamma1s.argsort()
pt.plot(gamma1s[sortx], label='script')
pt.plot(gamma2s[sortx], label='pipe')
pt.legend()
def show_data_image(data_uri_order, i, offset_list, all_kpts, all_vecs):
"""
i = 12
"""
import vtool_ibeis as vt
from os.path import join
imgdir = ut.truepath('/raid/work/Oxford/oxbuild_images')
gpath = join(imgdir, data_uri_order[i] + '.jpg')
image = vt.imread(gpath)
import plottool_ibeis as pt
pt.qt4ensure()
# pt.imshow(image)
l = offset_list[i]
r = offset_list[i + 1]
kpts = all_kpts[l:r]
vecs = all_vecs[l:r]
pt.interact_keypoints.ishow_keypoints(image, kpts, vecs,
ori=False, ell_alpha=.4,
color='distinct')
def check_image_sizes(data_uri_order, all_kpts, offset_list):
"""
Check if any keypoints go out of bounds wrt their associated images
"""
import vtool_ibeis as vt
from os.path import join
imgdir = ut.truepath('/raid/work/Oxford/oxbuild_images')
gpath_list = [join(imgdir, imgid + '.jpg') for imgid in data_uri_order]
imgsize_list = [vt.open_image_size(gpath) for gpath in gpath_list]
kpts_list = [all_kpts[l:r] for l, r in ut.itertwo(offset_list)]
kpts_extent = [vt.get_kpts_image_extent(kpts, outer=False, only_xy=False)
for kpts in ut.ProgIter(kpts_list, 'kpts extent')]
for i, (size, extent) in enumerate(zip(imgsize_list, kpts_extent)):
w, h = size
_, maxx, _, maxy = extent
assert np.isnan(maxx) or maxx < w
assert np.isnan(maxy) or maxy < h
def hyrule_vocab_test():
from yael.yutils import load_ext
from os.path import join
import sklearn.cluster
dbdir = ut.truepath('/raid/work/Oxford/')
datadir = dbdir + '/smk_data_iccv_2013/data/'
# Files storing descriptors/geometry for Oxford5k dataset
test_sift_fname = join(datadir, 'oxford_sift.uint8')
# test_nf_fname = join(datadir, 'oxford_nsift.uint32')
all_vecs = load_ext(test_sift_fname, ndims=128, verbose=True).astype(np.float32)
print(ut.print_object_size(all_vecs))
# nfeats_list = load_ext(test_nf_fname, verbose=True)
with ut.embed_on_exception_context:
rng = np.random.RandomState(13421421)
# init_size = int(config['num_words'] * 8)
num_words = int(2 ** 16)
init_size = num_words * 4
# converged after 26043 iterations
minibatch_params = dict(
n_clusters=num_words,
init='k-means++',
# init='random',
init_size=init_size,
n_init=1,
max_iter=100,
batch_size=1000,
tol=0.0,
max_no_improvement=10,
reassignment_ratio=0.01,
)
clusterer = sklearn.cluster.MiniBatchKMeans(
compute_labels=False, random_state=rng, verbose=1,
**minibatch_params)
clusterer.fit(all_vecs)
words = clusterer.cluster_centers_
print(words.shape)
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.algo.smk.script_smk
"""
run_asmk_script()
| apache-2.0 |
soylentdeen/BlurryApple | Diagnostics/calc_xy.py | 1 | 3238 | import scipy
import numpy
import pyfits
import matplotlib.pyplot as pyplot
def findRegion(x, y, regions):
for r in range(len(regions)):
for pix in regions[r]:
if (abs(x-pix[0]) < 2) & (abs(y-pix[1]) < 2):
return r
return -1
def findContiguousRegions(nonzero):
regions = []
for x, y in zip(nonzero[0], nonzero[1]):
index = findRegion(x, y, regions)
if index >= 0:
regions[index].append([x, y])
else:
regions.append([[x,y]])
return regions
def computeCentroid(image, coords):
sdrooc = coords.T
xvals = numpy.unique(sdrooc[0])
yvals = numpy.unique(sdrooc[1])
x = 0.0
y = 0.0
x_c = 0.0
y_c = 0.0
intensity = 0.0
i_c = 0.0
for pix in coords:
pixval = image[pix[0]][pix[1]]
intensity += pixval
x += pixval*pix[0]
y += pixval*pix[1]
x_c += pix[0]
y_c += pix[1]
i_c += 1.0
#print x_c/i_c, y_c/i_c
#print x/intensity, x_c/i_c
#print y/intensity, y_c/i_c
return x/intensity, y/intensity, x_c/i_c, y_c/i_c
def avgGradients(image, nonzero):
gradx = []
grady = []
xc_pos = []
yc_pos = []
postageStamps = findContiguousRegions(nonzero)
for postageStamp in postageStamps:
x, y, x_c, y_c = computeCentroid(image, numpy.array(postageStamp))
gradx.append(x-x_c)
grady.append(y-y_c)
xc_pos.append(x_c)
yc_pos.append(y_c)
return gradx, grady, xc_pos, yc_pos
fig = pyplot.figure(0)
fig.clear()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
datadir = '/home/deen/Data/GRAVITY/'
image = 'RefSlopes/RefSlopes_5.fits'
bkgnd = 'Bkgnd/Bkgnd.fits'
back = pyfits.getdata(datadir+bkgnd)
backstack = numpy.zeros([72, 72])
f = numpy.resize(back[0][-1].copy(), [72,72])
nonzero = numpy.nonzero(f)
backstack += f
initial_median = numpy.median(f[nonzero])
for frame in back[1:]:
f = numpy.resize(frame[-1].copy(), [72, 72])
backstack += f*initial_median/numpy.median(f[nonzero])
backstack /= float(len(back))
data = pyfits.getdata(datadir+image)
imgstack = numpy.zeros([72, 72])
f = numpy.resize(data[0][-1].copy(), [72, 72])
imgstack += f
#nonzero = numpy.nonzero(f)
#initial_median = numpy.median(f[nonzero])
f = numpy.resize(data[0][-1].copy(), [72, 72])
nonzero = numpy.nonzero(f)
initial_median = numpy.median(f[nonzero])
for frame in data[1:]:
f = numpy.resize(frame[-1].copy(), [72, 72])
imgstack+= f*initial_median/numpy.median(f[nonzero])
imgstack /= float(len(data))
calibrated = imgstack
#calibrated = imgstack - backstack
gx, gy, x_c, y_c = avgGradients(calibrated, nonzero)
#calibrated[nonzero] += 50000.0
ax.imshow(calibrated.transpose())
ax.scatter(x_c, y_c, color = 'r', marker = '+')
ax.scatter(numpy.array(x_c)+numpy.array(gx), numpy.array(y_c)+numpy.array(gy), color = 'k', marker = '+')
for p in zip(x_c, y_c, gx, gy):
ax.plot([p[0], p[0]+p[2]], [p[1], p[1]+p[3]], color = 'k')
print("Mean X gradient: %f" % numpy.mean(gx))
print("Mean Y gradient: %f" % numpy.mean(gy))
ax.set_title("Gradients: x=%5.3f, y=%5.3f" %(numpy.mean(gx), numpy.mean(gy)))
fig.show()
#fig.savefig('centroids_stretch.png')
#fig.savefig('centroids.png')
| gpl-2.0 |
zingale/pyro2 | examples/multigrid/mg_vis.py | 2 | 2607 | #!/usr/bin/env python3
"""
an example of using the multigrid class to solve Laplace's equation. Here, we
solve::
u_xx + u_yy = -2[(1-6x**2)y**2(1-y**2) + (1-6y**2)x**2(1-x**2)]
u = 0 on the boundary
this is the example from page 64 of the book `A Multigrid Tutorial, 2nd Ed.`
The analytic solution is u(x,y) = (x**2 - x**4)(y**4 - y**2)
"""
from __future__ import print_function
import numpy as np
import multigrid.MG as MG
import matplotlib.pyplot as plt
# the analytic solution
def true(x, y):
return (x**2 - x**4)*(y**4 - y**2)
# the righthand side
def f(x, y):
return -2.0*((1.0-6.0*x**2)*y**2*(1.0-y**2) + (1.0-6.0*y**2)*x**2*(1.0-x**2))
def doit(nx, ny):
# test the multigrid solver
# create the multigrid object
a = MG.CellCenterMG2d(nx, ny,
xl_BC_type="dirichlet", yl_BC_type="dirichlet",
xr_BC_type="dirichlet", yr_BC_type="dirichlet",
verbose=0,
nsmooth=5, nsmooth_bottom=10,
vis=1, true_function=true,
vis_title=r"$u_{xx} + u_{yy} = -2[(1-6x^2)y^2(1-y^2) + (1-6y^2)x^2(1-x^2)]$")
plt.ion()
plt.figure(num=1, figsize=(12.8, 7.2), dpi=100, facecolor='w')
# initialize the solution to 0
init = a.soln_grid.scratch_array()
a.init_solution(init)
# initialize the RHS using the function f
rhs = f(a.x2d, a.y2d)
a.init_RHS(rhs)
# solve to a relative tolerance of 1.e-11
a.solve(rtol=1.e-11)
# alternately, we can just use smoothing by uncommenting the following
# a.smooth(a.nlevels-1,50000)
# get the solution
v = a.get_solution()
# compute the error from the analytic solution
b = true(a.x2d, a.y2d)
e = v - b
print(" L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" %
(a.soln_grid.norm(e), a.relative_error, a.num_cycles))
# plot it
# plt.figure(num=1, figsize=(2.10,2.10), dpi=100, facecolor='w')
plt.figure(num=1, figsize=(5.0, 5.0), dpi=100, facecolor='w')
plt.imshow(np.transpose(v[a.ilo:a.ihi+1, a.jlo:a.jhi+1]),
interpolation="nearest", origin="lower",
extent=[a.xmin, a.xmax, a.ymin, a.ymax])
# plt.axis("off")
# plt.subplots_adjust(bottom=0.0, top=1.0, left=0.0, right=1.0)
plt.xlabel("x")
plt.ylabel("y")
plt.savefig("mg_test.png")
# store the output for later comparison
my_data = a.get_solution_object()
my_data.write("mg_test")
if __name__ == "__main__":
doit(64, 64)
| bsd-3-clause |
TheNeuralBit/arrow | python/pyarrow/tests/test_ipc.py | 2 | 4452 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import pytest
import numpy as np
from pandas.util.testing import assert_frame_equal
import pandas as pd
from pyarrow.compat import unittest
import pyarrow as pa
class MessagingTest(object):
def setUp(self):
self.sink = self._get_sink()
def _get_sink(self):
return io.BytesIO()
def _get_source(self):
return pa.BufferReader(self.sink.getvalue())
def write_batches(self):
nrows = 5
df = pd.DataFrame({
'one': np.random.randn(nrows),
'two': ['foo', np.nan, 'bar', 'bazbaz', 'qux']})
batch = pa.RecordBatch.from_pandas(df)
writer = self._get_writer(self.sink, batch.schema)
num_batches = 5
frames = []
batches = []
for i in range(num_batches):
unique_df = df.copy()
unique_df['one'] = np.random.randn(nrows)
batch = pa.RecordBatch.from_pandas(unique_df)
writer.write_batch(batch)
frames.append(unique_df)
batches.append(batch)
writer.close()
return batches
class TestFile(MessagingTest, unittest.TestCase):
# Also tests writing zero-copy NumPy array with additional padding
def _get_writer(self, sink, schema):
return pa.FileWriter(sink, schema)
def test_simple_roundtrip(self):
batches = self.write_batches()
file_contents = self._get_source()
reader = pa.FileReader(file_contents)
assert reader.num_record_batches == len(batches)
for i, batch in enumerate(batches):
# it works. Must convert back to DataFrame
batch = reader.get_batch(i)
assert batches[i].equals(batch)
def test_read_all(self):
batches = self.write_batches()
file_contents = self._get_source()
reader = pa.FileReader(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
class TestStream(MessagingTest, unittest.TestCase):
def _get_writer(self, sink, schema):
return pa.StreamWriter(sink, schema)
def test_simple_roundtrip(self):
batches = self.write_batches()
file_contents = self._get_source()
reader = pa.StreamReader(file_contents)
total = 0
for i, next_batch in enumerate(reader):
assert next_batch.equals(batches[i])
total += 1
assert total == len(batches)
with pytest.raises(StopIteration):
reader.get_next_batch()
def test_read_all(self):
batches = self.write_batches()
file_contents = self._get_source()
reader = pa.StreamReader(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
class TestInMemoryFile(TestFile):
def _get_sink(self):
return pa.InMemoryOutputStream()
def _get_source(self):
return self.sink.get_result()
def test_ipc_zero_copy_numpy():
df = pd.DataFrame({'foo': [1.5]})
batch = pa.RecordBatch.from_pandas(df)
sink = pa.InMemoryOutputStream()
write_file(batch, sink)
buffer = sink.get_result()
reader = pa.BufferReader(buffer)
batches = read_file(reader)
data = batches[0].to_pandas()
rdf = pd.DataFrame(data)
assert_frame_equal(df, rdf)
def write_file(batch, sink):
writer = pa.FileWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
def read_file(source):
reader = pa.FileReader(source)
return [reader.get_batch(i)
for i in range(reader.num_record_batches)]
| apache-2.0 |
okesontj/APMonitor | apm.py | 4 | 26852 | # Import
import csv
import math
import os
import random
import string
import time
import webbrowser
from contextlib import closing
import sys
# Get Python version
ver = sys.version_info[0]
#print('Version: '+str(ver))
if ver==2: # Python 2
import urllib
else: # Python 3+
import urllib.request, urllib.parse, urllib.error
#import socket
if ver==2: # Python 2
def cmd(server, app, aline):
'''Send a request to the server \n \
server = address of server \n \
app = application name \n \
aline = line to send to server \n'''
try:
# Web-server URL address
url_base = string.strip(server) + '/online/apm_line.php'
app = app.lower()
app.replace(" ", "")
params = urllib.urlencode({'p': app, 'a': aline})
f = urllib.urlopen(url_base, params)
# Stream solution output
if(aline=='solve'):
line = ''
while True:
char = f.read(1)
if not char:
break
elif char == '\n':
print(line)
line = ''
else:
line += char
# Send request to web-server
response = f.read()
except:
response = 'Failed to connect to server'
return response
def load_model(server,app,filename):
'''Load APM model file \n \
server = address of server \n \
app = application name \n \
filename = APM file name'''
# Load APM File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,' '+aline)
return
def load_data(server,app,filename):
'''Load CSV data file \n \
server = address of server \n \
app = application name \n \
filename = CSV file name'''
# Load CSV File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,'csv '+aline)
return
def get_ip(server):
'''Get current IP address \n \
server = address of server'''
# get ip address for web-address lookup
url_base = string.strip(server) + '/ip.php'
f = urllib.urlopen(url_base)
ip = string.strip(f.read())
return ip
def apm_t0(server,app,mode):
'''Retrieve restart file \n \
server = address of server \n \
app = application name \n \
mode = {'ss','mpu','rto','sim','est','ctl'} '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + string.strip(mode) + '.t0'
f = urllib.urlopen(url)
# Send request to web-server
solution = f.read()
return solution
def get_solution(server,app):
'''Retrieve solution results\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/results.csv'
f = urllib.urlopen(url)
# Send request to web-server
solution = f.read()
# Write the file
sol_file = 'solution_' + app + '.csv'
fh = open(sol_file,'w')
# possible problem here if file isn't able to open (see MATLAB equivalent)
fh.write(solution.replace('\r',''))
fh.close()
# Use array package
from array import array
# Import CSV file from web server
with closing(urllib.urlopen(url)) as f:
reader = csv.reader(f, delimiter=',')
y={}
for row in reader:
if len(row)==2:
y[row[0]] = float(row[1])
else:
y[row[0]] = array('f', [float(col) for col in row[1:]])
# Return solution
return y
def get_file(server,app,filename):
'''Retrieve any file from web-server\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + filename
f = urllib.urlopen(url)
# Send request to web-server
file = f.read()
# Write the file
fh = open(filename,'w')
fh.write(file.replace('\r',''))
fh.close()
return (file)
def set_option(server,app,name,value):
'''Load APM option \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.option \n \
value = numeric value of option '''
aline = 'option %s = %f' %(name,value)
app = app.lower()
app.replace(" ","")
response = cmd(server,app,aline)
return response
def web(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_oper.htm'
webbrowser.get().open_new_tab(url)
return url
def web_var(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_var.htm'
webbrowser.get().open_new_tab(url)
return url
def web_root(server,app):
'''Open APM root folder \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/'
webbrowser.get().open_new_tab(url)
return url
def classify(server,app,type,aline):
'''Classify parameter or variable as FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
type = {FV,MV,SV,CV} \n \
aline = parameter or variable name '''
x = 'info' + ' ' + type + ', ' + aline
app = app.lower()
app.replace(" ","")
response = cmd(server,app,x)
return response
def csv_data(filename):
'''Load CSV File into Python
A = csv_data(filename)
Function csv_data extracts data from a comma
separated value (csv) file and returns it
to the array A'''
try:
f = open(filename, 'rb')
reader = csv.reader(f)
headers = reader.next()
c = [float] * (len(headers))
A = {}
for h in headers:
A[h] = []
for row in reader:
for h, v, conv in zip(headers, row, c):
A[h].append(conv(v))
except ValueError:
A = {}
return A
def csv_lookup(name,replay):
'''Lookup Index of CSV Column \n \
name = parameter or variable name \n \
replay = csv replay data to search'''
header = replay[0]
try:
i = header.index(string.strip(name))
except ValueError:
i = -1 # no match
return i
def csv_element(name,row,replay):
'''Retrieve CSV Element \n \
name = parameter or variable name \n \
row = row of csv file \n \
replay = csv replay data to search'''
# get row number
if (row>len(replay)): row = len(replay)-1
# get column number
col = csv_lookup(name,replay)
if (col>=0): value = float(replay[row][col])
else: value = float('nan')
return value
def get_attribute(server,app,name):
'''Retrieve options for FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.{MEAS,MODEL,NEWVAL} \n \n \
Valid name combinations \n \
{FV,MV,CV}.MEAS \n \
{SV,CV}.MODEL \n \
{FV,MV}.NEWVAL '''
# Web-server URL address
url_base = string.strip(server) + '/online/get_tag.php'
app = app.lower()
app.replace(" ","")
params = urllib.urlencode({'p':app,'n':name})
f = urllib.urlopen(url_base,params)
# Send request to web-server
value = eval(f.read())
return value
def load_meas(server,app,name,value):
'''Transfer measurement to server for FV, MV, or CV \n \
server = address of server \n \
app = application name \n \
name = name of {FV,MV,CV} '''
# Web-server URL address
url_base = string.strip(server) + '/online/meas.php'
app = app.lower()
app.replace(" ","")
params = urllib.urlencode({'p':app,'n':name+'.MEAS','v':value})
f = urllib.urlopen(url_base,params)
# Send request to web-server
response = f.read()
return response
else: # Python 3+
def cmd(server,app,aline):
'''Send a request to the server \n \
server = address of server \n \
app = application name \n \
aline = line to send to server \n'''
try:
# Web-server URL address
url_base = server.strip() + '/online/apm_line.php'
app = app.lower()
app.replace(" ","")
params = urllib.parse.urlencode({'p':app,'a':aline})
en_params = params.encode()
f = urllib.request.urlopen(url_base,en_params)
# Stream solution output
if(aline=='solve'):
line = ''
while True:
en_char = f.read(1)
char = en_char.decode()
if not char:
break
elif char == '\n':
print(line)
line = ''
else:
line += char
# Send request to web-server
en_response = f.read()
response = en_response.decode()
except:
response = 'Failed to connect to server'
return response
def load_model(server,app,filename):
'''Load APM model file \n \
server = address of server \n \
app = application name \n \
filename = APM file name'''
# Load APM File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,' '+aline)
return
def load_data(server,app,filename):
'''Load CSV data file \n \
server = address of server \n \
app = application name \n \
filename = CSV file name'''
# Load CSV File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,'csv '+aline)
return
def get_ip(server):
'''Get current IP address \n \
server = address of server'''
# get ip address for web-address lookup
url_base = server.strip() + '/ip.php'
f = urllib.request.urlopen(url_base)
fip = f.read()
ip = fip.decode().strip()
return ip
def apm_t0(server,app,mode):
'''Retrieve restart file \n \
server = address of server \n \
app = application name \n \
mode = {'ss','mpu','rto','sim','est','ctl'} '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + mode.strip() + '.t0'
f = urllib.request.urlopen(url)
# Send request to web-server
solution = f.read()
return solution
def get_solution(server,app):
'''Retrieve solution results\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/results.csv'
f = urllib.request.urlopen(url)
# Send request to web-server
solution = f.read()
# Write the file
sol_file = 'solution_' + app + '.csv'
fh = open(sol_file,'w')
# possible problem here if file isn't able to open (see MATLAB equivalent)
en_solution = solution.decode().replace('\r','')
fh.write(en_solution)
fh.close()
# Use array package
from array import array
# Import CSV file from web server
with closing(urllib.request.urlopen(url)) as f:
fr = f.read()
de_f = fr.decode()
reader = csv.reader(de_f.splitlines(), delimiter=',')
y={}
for row in reader:
if len(row)==2:
y[row[0]] = float(row[1])
else:
y[row[0]] = array('f', [float(col) for col in row[1:]])
# Return solution
return y
def get_file(server,app,filename):
'''Retrieve any file from web-server\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + filename
f = urllib.request.urlopen(url)
# Send request to web-server
file = f.read()
# Write the file
fh = open(filename,'w')
en_file = file.decode().replace('\r','')
fh.write(en_file)
fh.close()
return (file)
def set_option(server,app,name,value):
'''Load APM option \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.option \n \
value = numeric value of option '''
aline = 'option %s = %f' %(name,value)
app = app.lower()
app.replace(" ","")
response = cmd(server,app,aline)
return response
def web(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_oper.htm'
webbrowser.get().open_new_tab(url)
return url
def web_var(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_var.htm'
webbrowser.get().open_new_tab(url)
return url
def web_root(server,app):
'''Open APM root folder \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/'
webbrowser.get().open_new_tab(url)
return url
def classify(server,app,type,aline):
'''Classify parameter or variable as FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
type = {FV,MV,SV,CV} \n \
aline = parameter or variable name '''
x = 'info' + ' ' + type + ', ' + aline
app = app.lower()
app.replace(" ","")
response = cmd(server,app,x)
return response
def csv_data(filename):
'''Load CSV File into Python
A = csv_data(filename)
Function csv_data extracts data from a comma
separated value (csv) file and returns it
to the array A'''
try:
f = open(filename, 'rb')
reader = csv.reader(f)
headers = next(reader)
c = [float] * (len(headers))
A = {}
for h in headers:
A[h] = []
for row in reader:
for h, v, conv in zip(headers, row, c):
A[h].append(conv(v))
except ValueError:
A = {}
return A
def csv_lookup(name,replay):
'''Lookup Index of CSV Column \n \
name = parameter or variable name \n \
replay = csv replay data to search'''
header = replay[0]
try:
i = header.index(name.strip())
except ValueError:
i = -1 # no match
return i
def csv_element(name,row,replay):
'''Retrieve CSV Element \n \
name = parameter or variable name \n \
row = row of csv file \n \
replay = csv replay data to search'''
# get row number
if (row>len(replay)): row = len(replay)-1
# get column number
col = csv_lookup(name,replay)
if (col>=0): value = float(replay[row][col])
else: value = float('nan')
return value
def get_attribute(server,app,name):
'''Retrieve options for FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.{MEAS,MODEL,NEWVAL} \n \n \
Valid name combinations \n \
{FV,MV,CV}.MEAS \n \
{SV,CV}.MODEL \n \
{FV,MV}.NEWVAL '''
# Web-server URL address
url_base = server.strip() + '/online/get_tag.php'
app = app.lower()
app.replace(" ","")
params = urllib.parse.urlencode({'p':app,'n':name})
params_en = params.encode()
f = urllib.request.urlopen(url_base,params_en)
# Send request to web-server
value = eval(f.read())
return value
def load_meas(server,app,name,value):
'''Transfer measurement to server for FV, MV, or CV \n \
server = address of server \n \
app = application name \n \
name = name of {FV,MV,CV} '''
# Web-server URL address
url_base = server.strip() + '/online/meas.php'
app = app.lower()
app.replace(" ","")
params = urllib.parse.urlencode({'p':app,'n':name+'.MEAS','v':value})
params_en = params.encode()
f = urllib.request.urlopen(url_base,params_en)
# Send request to web-server
response = f.read()
return response
def solve(app,imode):
'''
APM Solver for simulation, estimation, and optimization with both
static (steady-state) and dynamic models. The dynamic modes can solve
index 2+ DAEs without numerical differentiation.
y = solve(app,imode)
Function solve uploads the model file (apm) and optionally
a data file (csv) with the same name to the web-server and performs
a forward-time stepping integration of ODE or DAE equations
with the following arguments:
Input: app = model (apm) and data file (csv) name
imode = simulation mode {1..7}
steady-state dynamic sequential
simulate 1 4 7
estimate 2 5 8 (under dev)
optimize 3 6 9 (under dev)
Output: y.names = names of all variables
y.values = tables of values corresponding to y.names
y.nvar = number of variables
y.x = combined variables and values but variable
names may be modified to make them valid
characters (e.g. replace '[' with '')
'''
# server and application file names
server = 'http://byu.apmonitor.com'
app = app.lower()
app.replace(" ","")
app_model = app + '.apm'
app_data = app + '.csv'
# randomize the application name
from random import randint
app = app + '_' + str(randint(1000,9999))
# clear previous application
cmd(server,app,'clear all')
try:
# load model file
load_model(server,app,app_model)
except:
msg = 'Model file ' + app + '.apm does not exist'
print(msg)
return []
# check if data file exists (optional)
try:
# load data file
load_data(server,app,app_data)
except:
# data file is optional
print('Optional data file ' + app + '.csv does not exist')
pass
# default options
# use or don't use web viewer
web = False
if web:
set_option(server,app,'nlc.web',2)
else:
set_option(server,app,'nlc.web',0)
# internal nodes in the collocation (between 2 and 6)
set_option(server,app,'nlc.nodes',3)
# sensitivity analysis (default: 0 - off)
set_option(server,app,'nlc.sensitivity',0)
# simulation mode (1=ss, 2=mpu, 3=rto)
# (4=sim, 5=est, 6=nlc, 7=sqs)
set_option(server,app,'nlc.imode',imode)
# attempt solution
solver_output = cmd(server,app,'solve')
# check for successful solution
status = get_attribute(server,app,'nlc.appstatus')
if status==1:
# open web viewer if selected
if web:
web(server,app)
# retrieve solution and solution.csv
z = get_solution(server,app)
return z
else:
print(solver_output)
print('Error: Did not converge to a solution')
return []
def plotter(y, subplots=1, save=False, filename='solution', format='png'):
'''
The plotter will go through each of the variables in the output y and
create plots for them. The number of vertical subplots can be
specified and the plots can be saved in the same folder.
This functionality is dependant on matplotlib, so this library must
be installed on the computer for the automatic plotter to work.
The input y should be the output from the apm solution. This can be
retrieved from the server using the following line of code:
y = get_solution(server, app)
'''
try:
import matplotlib.pyplot as plt
var_size = len(y)
colors = ['r-', 'g-', 'k-', 'b-']
color_pick = 0
if subplots > 9:
subplots = 9
j = 1
pltcount = 0
start = True
for i in range(var_size):
if list(y)[i] != 'time' and list(y)[i][:3] != 'slk':
if j == 1:
if start != True:
plt.xlabel('time')
start = False
if save:
if pltcount != 0:
plt.savefig(filename + str(pltcount) + '.' + format, format=format)
pltcount += 1
plt.figure()
else:
plt.gca().axes.get_xaxis().set_ticklabels([])
plt.subplot(100*subplots+10+j)
plt.plot(y['time'], y[list(y)[i]], colors[color_pick], linewidth=2.0)
if color_pick == 3:
color_pick = 0
else:
color_pick += 1
plt.ylabel(list(y)[i])
if subplots == 1:
plt.title(list(y)[i])
if j == subplots or i+2 == var_size:
j = 1
else:
j += 1
plt.xlabel('time')
if save:
plt.savefig('plots/' + filename + str(pltcount) + '.' + format, format=format)
if pltcount <= 20:
plt.show()
except ImportError:
print('Dependent Packages not imported.')
print('Please install matplotlib package to use plotting features.')
except:
print('Graphs not created. Double check that the')
print('simulation/optimization was succesfull')
# This code adds back compatibility with previous versions
apm = cmd
apm_load = load_model
csv_load = load_data
apm_ip = get_ip
apm_sol = get_solution
apm_get = get_file
apm_option = set_option
apm_web = web
apm_web_var = web_var
apm_web_root = web_root
apm_info = classify
apm_tag = get_attribute
apm_meas = load_meas
apm_solve = solve
| bsd-2-clause |
ronalcc/zipline | zipline/finance/performance/tracker.py | 3 | 22303 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performance Tracking
====================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| period_start | The beginning of the period to be tracked. datetime|
| | in pytz.utc timezone. Will always be 0:00 on the |
| | date in UTC. The fact that the time may be on the |
| | prior day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| period_end | The end of the period to be tracked. datetime |
| | in pytz.utc timezone. Will always be 23:59 on the |
| | date in UTC. The fact that the time may be on the |
| | next day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| progress | percentage of test completed |
+-----------------+----------------------------------------------------+
| capital_base | The initial capital assumed for this tracker. |
+-----------------+----------------------------------------------------+
| cumulative_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
+-----------------+----------------------------------------------------+
| todays_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker with datetime stamps between last_open|
| | and last_close. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
| | TODO: adding this because we calculate it. May be |
| | overkill. |
+-----------------+----------------------------------------------------+
| cumulative_risk | A dictionary representing the risk metrics |
| _metrics | calculated based on the positions aggregated |
| | through all the events delivered to this tracker. |
| | For details look at the comments for |
| | :py:meth:`zipline.finance.risk.RiskMetrics.to_dict`|
+-----------------+----------------------------------------------------+
"""
from __future__ import division
import logbook
import pickle
from six import iteritems
from datetime import datetime
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
import zipline.finance.risk as risk
from zipline.finance.trading import TradingEnvironment
from . period import PerformancePeriod
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
from . position_tracker import PositionTracker
log = logbook.Logger('Performance')
class PerformanceTracker(object):
"""
Tracks the performance of the algorithm.
"""
def __init__(self, sim_params):
self.sim_params = sim_params
env = TradingEnvironment.instance()
self.period_start = self.sim_params.period_start
self.period_end = self.sim_params.period_end
self.last_close = self.sim_params.last_close
first_open = self.sim_params.first_open.tz_convert(env.exchange_tz)
self.day = pd.Timestamp(datetime(first_open.year, first_open.month,
first_open.day), tz='UTC')
self.market_open, self.market_close = env.get_open_and_close(self.day)
self.total_days = self.sim_params.days_in_period
self.capital_base = self.sim_params.capital_base
self.emission_rate = sim_params.emission_rate
all_trading_days = env.trading_days
mask = ((all_trading_days >= normalize_date(self.period_start)) &
(all_trading_days <= normalize_date(self.period_end)))
self.trading_days = all_trading_days[mask]
self.dividend_frame = pd.DataFrame()
self._dividend_count = 0
self.position_tracker = PositionTracker()
self.perf_periods = []
if self.emission_rate == 'daily':
self.all_benchmark_returns = pd.Series(
index=self.trading_days)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params)
elif self.emission_rate == 'minute':
self.all_benchmark_returns = pd.Series(index=pd.date_range(
self.sim_params.first_open, self.sim_params.last_close,
freq='Min'))
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(self.sim_params,
create_first_day_stats=True)
self.minute_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the
# entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False
)
self.minute_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.minute_performance)
# this performance period will span the entire simulation from
# inception.
self.cumulative_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the cumulative period will be calculated over the entire test.
self.period_start,
self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumualtive period
serialize_positions=False,
)
self.cumulative_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.cumulative_performance)
# this performance period will span just the current market day
self.todays_performance = PerformancePeriod(
# initial cash is your capital base.
self.capital_base,
# the daily period will be calculated for the market day
self.market_open,
self.market_close,
keep_transactions=True,
keep_orders=True,
serialize_positions=True,
)
self.todays_performance.position_tracker = self.position_tracker
self.perf_periods.append(self.todays_performance)
self.saved_dt = self.period_start
# one indexed so that we reach 100%
self.day_count = 0.0
self.txn_count = 0
self.account_needs_update = True
self._account = None
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
{'simulation parameters': self.sim_params})
@property
def progress(self):
if self.emission_rate == 'minute':
# Fake a value
return 1.0
elif self.emission_rate == 'daily':
return self.day_count / self.total_days
def set_date(self, date):
if self.emission_rate == 'minute':
self.saved_dt = date
self.todays_performance.period_close = self.saved_dt
def update_dividends(self, new_dividends):
"""
Update our dividend frame with new dividends. @new_dividends should be
a DataFrame with columns containing at least the entries in
zipline.protocol.DIVIDEND_FIELDS.
"""
# Mark each new dividend with a unique integer id. This ensures that
# we can differentiate dividends whose date/sid fields are otherwise
# identical.
new_dividends['id'] = np.arange(
self._dividend_count,
self._dividend_count + len(new_dividends),
)
self._dividend_count += len(new_dividends)
self.dividend_frame = pd.concat(
[self.dividend_frame, new_dividends]
).sort(['pay_date', 'ex_date']).set_index('id', drop=False)
def initialize_dividends_from_other(self, other):
"""
Helper for copying dividends to a new PerformanceTracker while
preserving dividend count. Useful if a simulation needs to create a
new PerformanceTracker mid-stream and wants to preserve stored dividend
info.
Note that this does not copy unpaid dividends.
"""
self.dividend_frame = other.dividend_frame
self._dividend_count = other._dividend_count
def handle_sid_removed_from_universe(self, sid):
"""
This method handles any behaviors that must occur when a SID leaves the
universe of the TradingAlgorithm.
Parameters
__________
sid : int
The sid of the Asset being removed from the universe.
"""
# Drop any dividends for the sid from the dividends frame
self.dividend_frame = self.dividend_frame[
self.dividend_frame.sid != sid
]
def update_performance(self):
# calculate performance as of last trade
for perf_period in self.perf_periods:
perf_period.calculate_performance()
def get_portfolio(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
return self.cumulative_performance.as_portfolio()
def get_account(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
if self.account_needs_update:
self._update_account()
return self._account
def _update_account(self):
self._account = self.cumulative_performance.as_account()
self.account_needs_update = False
def to_dict(self, emission_type=None):
"""
Creates a dictionary representing the state of this tracker.
Returns a dict object of the form described in header comments.
"""
# Default to the emission rate of this tracker if no type is provided
if emission_type is None:
emission_type = self.emission_rate
_dict = {
'period_start': self.period_start,
'period_end': self.period_end,
'capital_base': self.capital_base,
'cumulative_perf': self.cumulative_performance.to_dict(),
'progress': self.progress,
'cumulative_risk_metrics': self.cumulative_risk_metrics.to_dict()
}
if emission_type == 'daily':
_dict['daily_perf'] = self.todays_performance.to_dict()
elif emission_type == 'minute':
_dict['minute_perf'] = self.todays_performance.to_dict(
self.saved_dt)
else:
raise ValueError("Invalid emission type: %s" % emission_type)
return _dict
def process_trade(self, event):
# update last sale, and pay out a cash adjustment
cash_adjustment = self.position_tracker.update_last_sale(event)
if cash_adjustment != 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(cash_adjustment)
def process_transaction(self, event):
self.txn_count += 1
self.position_tracker.execute_transaction(event)
for perf_period in self.perf_periods:
perf_period.handle_execution(event)
def process_dividend(self, dividend):
log.info("Ignoring DIVIDEND event.")
def process_split(self, event):
leftover_cash = self.position_tracker.handle_split(event)
if leftover_cash > 0:
for perf_period in self.perf_periods:
perf_period.handle_cash_payment(leftover_cash)
def process_order(self, event):
for perf_period in self.perf_periods:
perf_period.record_order(event)
def process_commission(self, event):
self.position_tracker.handle_commission(event)
for perf_period in self.perf_periods:
perf_period.handle_commission(event)
def process_benchmark(self, event):
if self.sim_params.data_frequency == 'minute' and \
self.sim_params.emission_rate == 'daily':
# Minute data benchmarks should have a timestamp of market
# close, so that calculations are triggered at the right time.
# However, risk module uses midnight as the 'day'
# marker for returns, so adjust back to midnight.
midnight = pd.tseries.tools.normalize_date(event.dt)
else:
midnight = event.dt
if midnight not in self.all_benchmark_returns.index:
raise AssertionError(
("Date %s not allocated in all_benchmark_returns. "
"Calendar seems to mismatch with benchmark. "
"Benchmark container is=%s" %
(midnight,
self.all_benchmark_returns.index)))
self.all_benchmark_returns[midnight] = event.returns
def process_close_position(self, event):
# CLOSE_POSITION events contain prices that must be handled as a final
# trade event
self.process_trade(event)
txn = self.position_tracker.create_close_position_transaction(event)
if txn:
self.process_transaction(txn)
def check_upcoming_dividends(self, completed_date):
"""
Check if we currently own any stocks with dividends whose ex_date is
the next trading day. Track how much we should be payed on those
dividends' pay dates.
Then check if we are owed cash/stock for any dividends whose pay date
is the next trading day. Apply all such benefits, then recalculate
performance.
"""
if len(self.dividend_frame) == 0:
# We don't currently know about any dividends for this simulation
# period, so bail.
return
# Get the next trading day and, if it is outside the bounds of the
# simulation, bail.
next_trading_day = TradingEnvironment.instance().\
next_trading_day(completed_date)
if (next_trading_day is None) or (next_trading_day >= self.last_close):
return
# Dividends whose ex_date is the next trading day. We need to check if
# we own any of these stocks so we know to pay them out when the pay
# date comes.
ex_date_mask = (self.dividend_frame['ex_date'] == next_trading_day)
dividends_earnable = self.dividend_frame[ex_date_mask]
# Dividends whose pay date is the next trading day. If we held any of
# these stocks on midnight before the ex_date, we need to pay these out
# now.
pay_date_mask = (self.dividend_frame['pay_date'] == next_trading_day)
dividends_payable = self.dividend_frame[pay_date_mask]
position_tracker = self.position_tracker
if len(dividends_earnable):
position_tracker.earn_dividends(dividends_earnable)
if not len(dividends_payable):
return
net_cash_payment = position_tracker.pay_dividends(dividends_payable)
for period in self.perf_periods:
# notify periods to update their stats
period.handle_dividends_paid(net_cash_payment)
def handle_minute_close(self, dt):
"""
Handles the close of the given minute. This includes handling
market-close functions if the given minute is the end of the market
day.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
(dict, dict/None)
A tuple of the minute perf packet and daily perf packet.
If the market day has not ended, the daily perf packet is None.
"""
self.update_performance()
todays_date = normalize_date(dt)
account = self.get_account(False)
self.minute_performance.rollover()
bench_returns = self.all_benchmark_returns.loc[todays_date:dt]
# cumulative returns
bench_since_open = (1. + bench_returns).prod() - 1
self.cumulative_risk_metrics.update(todays_date,
self.todays_performance.returns,
bench_since_open,
account)
minute_packet = self.to_dict(emission_type='minute')
# if this is the close, update dividends for the next day.
# Return the performance tuple
if dt == self.market_close:
return (minute_packet, self._handle_market_close(todays_date))
else:
return (minute_packet, None)
def handle_market_close_daily(self):
"""
Function called after handle_data when running with daily emission
rate.
"""
self.update_performance()
completed_date = self.day
account = self.get_account(False)
# update risk metrics for cumulative performance
self.cumulative_risk_metrics.update(
completed_date,
self.todays_performance.returns,
self.all_benchmark_returns[completed_date],
account)
return self._handle_market_close(completed_date)
def _handle_market_close(self, completed_date):
# increment the day counter before we move markers forward.
self.day_count += 1.0
# Take a snapshot of our current performance to return to the
# browser.
daily_update = self.to_dict(emission_type='daily')
# On the last day of the test, don't create tomorrow's performance
# period. We may not be able to find the next trading day if we're at
# the end of our historical data
if self.market_close >= self.last_close:
return daily_update
# move the market day markers forward
env = TradingEnvironment.instance()
self.market_open, self.market_close = \
env.next_open_and_close(self.day)
self.day = env.next_trading_day(self.day)
# Roll over positions to current day.
self.todays_performance.rollover()
self.todays_performance.period_open = self.market_open
self.todays_performance.period_close = self.market_close
# Check for any dividends
self.check_upcoming_dividends(completed_date)
return daily_update
def handle_simulation_end(self):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log_msg = "Simulated {n} trading days out of {m}."
log.info(log_msg.format(n=int(self.day_count), m=self.total_days))
log.info("first open: {d}".format(
d=self.sim_params.first_open))
log.info("last close: {d}".format(
d=self.sim_params.last_close))
bms = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.benchmark_returns_cont)
ars = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.algorithm_returns_cont)
acl = self.cumulative_risk_metrics.algorithm_cumulative_leverages
self.risk_report = risk.RiskReport(
ars,
self.sim_params,
benchmark_returns=bms,
algorithm_leverages=acl)
risk_dict = self.risk_report.to_dict()
return risk_dict
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
state_dict['dividend_frame'] = pickle.dumps(self.dividend_frame)
state_dict['_dividend_count'] = self._dividend_count
# we already store perf periods as attributes
del state_dict['perf_periods']
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("PerformanceTracker saved state is too old.")
self.__dict__.update(state)
# Handle the dividend frame specially
self.dividend_frame = pickle.loads(state['dividend_frame'])
# properly setup the perf periods
self.perf_periods = []
p_types = ['cumulative', 'todays', 'minute']
for p_type in p_types:
name = p_type + '_performance'
period = getattr(self, name, None)
if period is None:
continue
period._position_tracker = self.position_tracker
self.perf_periods.append(period)
| apache-2.0 |
ngoix/OCRF | examples/gaussian_process/plot_gpc_iris.py | 81 | 2231 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
jchodera/mdtraj | tests/test_topology.py | 4 | 9078 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, Matthew Harrigan, Carlos Xavier Hernandez
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import pickle
import tempfile
import mdtraj as md
import numpy as np
import pytest
from mdtraj.testing import eq
try:
from simtk.openmm import app
import simtk.unit as u
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
needs_openmm = pytest.mark.skipif(not HAVE_OPENMM, reason='needs OpenMM')
@needs_openmm
def test_topology_openmm(get_fn):
topology = md.load(get_fn('1bpi.pdb')).topology
topology_with_bond_order = md.load(get_fn('imatinib.mol2')).topology
# the openmm trajectory doesn't have the distinction
# between resSeq and index, so if they're out of whack
# in the openmm version, that cant be preserved
for top in [topology, topology_with_bond_order]:
for residue in top.residues:
residue.resSeq = residue.index
mm = top.to_openmm()
assert isinstance(mm, app.Topology)
topology2 = md.Topology.from_openmm(mm)
eq(top, topology2)
@needs_openmm
def test_topology_openmm_boxes(get_fn):
traj = md.load(get_fn('1vii_sustiva_water.pdb'))
mmtop = traj.topology.to_openmm(traj=traj)
box = mmtop.getUnitCellDimensions() / u.nanometer
def test_topology_pandas(get_fn):
topology = md.load(get_fn('native.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
# Make sure default argument of None works, see issue #774
topology3 = md.Topology.from_dataframe(atoms)
def test_topology_pandas_TIP4PEW(get_fn):
topology = md.load(get_fn('GG-tip4pew.pdb')).topology
atoms, bonds = topology.to_dataframe()
topology2 = md.Topology.from_dataframe(atoms, bonds)
eq(topology, topology2)
def test_topology_numbers(get_fn):
topology = md.load(get_fn('1bpi.pdb')).topology
assert len(list(topology.atoms)) == topology.n_atoms
assert len(list(topology.residues)) == topology.n_residues
assert all([topology.atom(i).index == i for i in range(topology.n_atoms)])
def test_topology_unique_elements_bpti(get_fn):
traj = md.load(get_fn('bpti.pdb'))
top, bonds = traj.top.to_dataframe()
atoms = np.unique(["C", "O", "N", "H", "S"])
eq(atoms, np.unique(top.element.values))
def test_chain(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
chain = top.chain(0)
assert chain.n_residues == len(list(chain.residues))
atoms = list(chain.atoms)
assert chain.n_atoms == len(atoms)
for i in range(chain.n_atoms):
assert atoms[i] == chain.atom(i)
def test_residue(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
residue = top.residue(0)
assert len(list(residue.atoms)) == residue.n_atoms
atoms = list(residue.atoms)
for i in range(residue.n_atoms):
assert residue.atom(i) == atoms[i]
def test_segment_id(get_fn):
top = md.load(get_fn('ala_ala_ala.pdb')).topology
assert next(top.residues).segment_id == "AAL", "Segment id is not being assigned correctly for ala_ala_ala.psf"
df = top.to_dataframe()[0]
assert len(df["segmentID"] == "AAL") == len(
df), "Segment id is not being assigned correctly to topology data frame ala_ala_ala.psf"
def test_nonconsective_resSeq(get_fn):
t = md.load(get_fn('nonconsecutive_resSeq.pdb'))
assert eq(np.array([r.resSeq for r in t.top.residues]), np.array([1, 3, 5]))
df1 = t.top.to_dataframe()
df2 = md.Topology.from_dataframe(*df1).to_dataframe()
assert eq(df1[0], df2[0])
# round-trip through a PDB load/save loop
fd, fname = tempfile.mkstemp(suffix='.pdb')
os.close(fd)
t.save(fname)
t2 = md.load(fname)
assert eq(df1[0], t2.top.to_dataframe()[0])
os.unlink(fname)
def test_pickle(get_fn):
# test pickling of topology (bug #391)
topology_without_bond_order = md.load(get_fn('bpti.pdb')).topology
topology_with_bond_order = md.load(get_fn('imatinib.mol2')).topology
for top in [topology_with_bond_order, topology_without_bond_order]:
loaded_top = pickle.loads(pickle.dumps(top))
assert loaded_top == top
def test_atoms_by_name(get_fn):
top = md.load(get_fn('bpti.pdb')).topology
atoms = list(top.atoms)
for atom1, atom2 in zip(top.atoms_by_name('CA'), top.chain(0).atoms_by_name('CA')):
assert atom1 == atom2
assert atom1 in atoms
assert atom1.name == 'CA'
assert len(list(top.atoms_by_name('CA'))) == sum(1 for _ in atoms if _.name == 'CA')
assert top.residue(15).atom('CA') == [a for a in top.residue(15).atoms if a.name == 'CA'][0]
with pytest.raises(KeyError):
top.residue(15).atom('sdfsdf')
def test_select_atom_indices(get_fn):
top = md.load(get_fn('native.pdb')).topology
assert eq(top.select_atom_indices('alpha'), np.array([8]))
assert eq(top.select_atom_indices('minimal'),
np.array([4, 5, 6, 8, 10, 14, 15, 16, 18]))
with pytest.raises(ValueError):
top.select_atom_indices('sdfsdf')
@needs_openmm
def test_top_dataframe_openmm_roundtrip(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
top, bonds = t.top.to_dataframe()
t.topology = md.Topology.from_dataframe(top, bonds)
omm_top = t.top.to_openmm()
def test_n_bonds(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
for atom in t.top.atoms:
if atom.element.symbol == 'H':
assert atom.n_bonds == 1
elif atom.element.symbol == 'C':
assert atom.n_bonds in [3, 4]
elif atom.element.symbol == 'O':
assert atom.n_bonds in [1, 2]
def test_load_unknown_topology(get_fn):
try:
md.load(get_fn('frame0.dcd'), top=get_fn('frame0.dcd'))
except IOError as e:
# we want to make sure there's a nice error message than includes
# a list of the supported topology formats.
assert all(s in str(e) for s in ('.pdb', '.psf', '.prmtop'))
else:
assert False # fail
def test_unique_pairs():
n = 10
a = np.arange(n)
b = np.arange(n, n + n)
eq(md.Topology._unique_pairs(a, a).sort(), md.Topology._unique_pairs_equal(a).sort())
eq(md.Topology._unique_pairs(a, b).sort(), md.Topology._unique_pairs_mutually_exclusive(a, b).sort())
def test_select_pairs(get_fn):
traj = md.load(get_fn('tip3p_300K_1ATM.pdb'))
select_pairs = traj.top.select_pairs
assert len(select_pairs(selection1='name O', selection2='name O')) == 258 * (258 - 1) // 2
assert len(select_pairs(selection1='name H1', selection2='name O')) == 258 * 258
selections = iter([
# Equal
("(name O) or (name =~ 'H.*')", "(name O) or (name =~ 'H.*')"),
('all', 'all'),
# Exclusive
('name O', 'name H1'),
('name H1', 'name O'),
# Overlap
(range(traj.n_atoms), 'name O'),
('all', 'name O')])
for select1, select2 in selections:
select3, select4 = next(selections)
assert eq(select_pairs(selection1=select1, selection2=select2).sort(),
select_pairs(selection1=select3, selection2=select4).sort())
def test_to_fasta(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
assert t.topology.to_fasta(0) == "ENFSGGCVAGYMRTPDGRCKPTFYQLIT"
def test_subset(get_fn):
t1 = md.load(get_fn('2EQQ.pdb')).top
t2 = t1.subset([1, 2, 3])
assert t2.n_residues == 1
def test_molecules(get_fn):
top = md.load(get_fn('4OH9.pdb')).topology
molecules = top.find_molecules()
assert sum(len(mol) for mol in molecules) == top.n_atoms
assert sum(1 for mol in molecules if len(mol) > 1) == 2 # All but two molecules are water
def test_copy_and_hash(get_fn):
t = md.load(get_fn('traj.h5'))
t1 = t.topology
t2 = t.topology.copy()
assert t1 == t2
assert hash(tuple(t1._chains)) == hash(tuple(t2._chains))
assert hash(tuple(t1._atoms)) == hash(tuple(t2._atoms))
assert hash(tuple(t1._bonds)) == hash(tuple(t2._bonds))
assert hash(tuple(t1._residues)) == hash(tuple(t2._residues))
assert hash(t1) == hash(t2)
| lgpl-2.1 |
giorgiop/scikit-learn | sklearn/neighbors/nearest_centroid.py | 37 | 7348 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
LuizArmesto/gastos_abertos | setup.py | 1 | 1181 | # -*- coding: utf-8 -*-
from setuptools import setup
project = "gastosabertos"
setup(
name=project,
version='0.0.1',
url='https://github.com/okfn-brasil/gastos_abertos',
description='Visualization of public spending in Sao Paulo city for Gastos Abertos project',
author='Edgar Zanella Alvarenga',
author_email='[email protected]',
packages=["gastosabertos"],
include_package_data=True,
zip_safe=False,
install_requires=[
'Flask>=0.10.1',
'Flask-SQLAlchemy',
'Flask-WTF',
'Flask-Script',
'Flask-Babel',
'Flask-Testing',
'Flask-Restful',
'Flask-Paginate',
'Flask-CORS',
'Flask-Restplus',
'geoalchemy2',
'fabric',
'docopt',
'pandas',
'psycopg2',
'geopy',
'shapely',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries'
]
)
| agpl-3.0 |
jorisvandenbossche/geopandas | geopandas/tests/test_sindex.py | 1 | 4517 | import sys
from shapely.geometry import Polygon, Point
from geopandas import GeoSeries, GeoDataFrame, base, read_file
from geopandas.tests.util import unittest, download_nybb
@unittest.skipIf(sys.platform.startswith("win"), "fails on AppVeyor")
@unittest.skipIf(not base.HAS_SINDEX, 'Rtree absent, skipping')
class TestSeriesSindex(unittest.TestCase):
def test_empty_index(self):
self.assert_(GeoSeries().sindex is None)
def test_point(self):
s = GeoSeries([Point(0, 0)])
self.assertEqual(s.sindex.size, 1)
hits = s.sindex.intersection((-1, -1, 1, 1))
self.assertEqual(len(list(hits)), 1)
hits = s.sindex.intersection((-2, -2, -1, -1))
self.assertEqual(len(list(hits)), 0)
def test_empty_point(self):
s = GeoSeries([Point()])
self.assert_(GeoSeries().sindex is None)
def test_polygons(self):
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(0, 0), (1, 1), (0, 1)])
sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
s = GeoSeries([t1, t2, sq])
self.assertEqual(s.sindex.size, 3)
def test_polygons_append(self):
t1 = Polygon([(0, 0), (1, 0), (1, 1)])
t2 = Polygon([(0, 0), (1, 1), (0, 1)])
sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
s = GeoSeries([t1, t2, sq])
t = GeoSeries([t1, t2, sq], [3,4,5])
s = s.append(t)
self.assertEqual(len(s), 6)
self.assertEqual(s.sindex.size, 6)
def test_lazy_build(self):
s = GeoSeries([Point(0, 0)])
self.assert_(s._sindex is None)
self.assertEqual(s.sindex.size, 1)
self.assert_(s._sindex is not None)
@unittest.skipIf(sys.platform.startswith("win"), "fails on AppVeyor")
@unittest.skipIf(not base.HAS_SINDEX, 'Rtree absent, skipping')
class TestFrameSindex(unittest.TestCase):
def setUp(self):
data = {"A": range(5), "B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))]}
self.df = GeoDataFrame(data, geometry='location')
def test_sindex(self):
self.df.crs = {'init': 'epsg:4326'}
self.assertEqual(self.df.sindex.size, 5)
hits = list(self.df.sindex.intersection((2.5, 2.5, 4, 4), objects=True))
self.assertEqual(len(hits), 2)
self.assertEqual(hits[0].object, 3)
def test_lazy_build(self):
self.assert_(self.df._sindex is None)
self.assertEqual(self.df.sindex.size, 5)
self.assert_(self.df._sindex is not None)
def test_sindex_rebuild_on_set_geometry(self):
# First build the sindex
self.assert_(self.df.sindex is not None)
self.df.set_geometry(
[Point(x, y) for x, y in zip(range(5, 10), range(5, 10))],
inplace=True)
self.assert_(self.df._sindex_valid == False)
# Skip to accommodate Shapely geometries being unhashable
@unittest.skip
class TestJoinSindex(unittest.TestCase):
def setUp(self):
nybb_filename, nybb_zip_path = download_nybb()
self.boros = read_file(nybb_zip_path, vfs='zip://' + nybb_filename)
def test_merge_geo(self):
# First check that we gets hits from the boros frame.
tree = self.boros.sindex
hits = tree.intersection((1012821.80, 229228.26), objects=True)
self.assertEqual(
[self.boros.ix[hit.object]['BoroName'] for hit in hits],
['Bronx', 'Queens'])
# Check that we only get the Bronx from this view.
first = self.boros[self.boros['BoroCode'] < 3]
tree = first.sindex
hits = tree.intersection((1012821.80, 229228.26), objects=True)
self.assertEqual(
[first.ix[hit.object]['BoroName'] for hit in hits],
['Bronx'])
# Check that we only get Queens from this view.
second = self.boros[self.boros['BoroCode'] >= 3]
tree = second.sindex
hits = tree.intersection((1012821.80, 229228.26), objects=True)
self.assertEqual(
[second.ix[hit.object]['BoroName'] for hit in hits],
['Queens'])
# Get both the Bronx and Queens again.
merged = first.merge(second, how='outer')
self.assertEqual(len(merged), 5)
self.assertEqual(merged.sindex.size, 5)
tree = merged.sindex
hits = tree.intersection((1012821.80, 229228.26), objects=True)
self.assertEqual(
[merged.ix[hit.object]['BoroName'] for hit in hits],
['Bronx', 'Queens'])
| bsd-3-clause |
dimitri-yatsenko/datajoint-python | tests/test_relational_operand.py | 2 | 20759 | import random
import string
import pandas
import datetime
import numpy as np
from nose.tools import (assert_equal, assert_false, assert_true, raises, assert_set_equal,
assert_list_equal)
import datajoint as dj
from .schema_simple import (A, B, D, E, F, L, DataA, DataB, TTestUpdate, IJ, JI,
ReservedWord, OutfitLaunch)
from .schema import (Experiment, TTest3, Trial, Ephys, Child, Parent, SubjectA, SessionA,
SessionStatusA, SessionDateA)
def setup():
"""
module-level test setup
"""
A.insert(A.contents, skip_duplicates=True)
L.insert(L.contents, skip_duplicates=True)
B.populate()
D.populate()
E.populate()
Experiment.populate()
class TestRelational:
@staticmethod
def test_populate():
assert_false(B().progress(display=False)[0], 'B incompletely populated')
assert_false(D().progress(display=False)[0], 'D incompletely populated')
assert_false(E().progress(display=False)[0], 'E incompletely populated')
assert_true(len(B()) == 40, 'B populated incorrectly')
assert_true(len(B.C()) > 0, 'C populated incorrectly')
assert_true(len(D()) == 40, 'D populated incorrectly')
assert_true(len(E()) == len(B()) * len(D()) / len(A()), 'E populated incorrectly')
assert_true(len(E.F()) > 0, 'F populated incorrectly')
@staticmethod
def test_free_relation():
b = B()
free = dj.FreeTable(b.connection, b.full_table_name)
assert_true(repr(free).startswith('FreeTable') and b.full_table_name in repr(free))
r = 'n>5'
assert_equal((B() & r).make_sql(), (free & r).make_sql())
@staticmethod
def test_rename():
# test renaming
x = B().proj(i='id_a') & 'i in (1,2,3,4)'
lenx = len(x)
assert_equal(len(x), len(B() & 'id_a in (1,2,3,4)'),
'incorrect restriction of renamed attributes')
assert_equal(len(x & 'id_b in (1,2)'), len(B() & 'id_b in (1,2) and id_a in (1,2,3,4)'),
'incorrect restriction of renamed restriction')
assert_equal(len(x), lenx, 'restriction modified original')
y = x.proj(j='i')
assert_equal(len(y), len(B() & 'id_a in (1,2,3,4)'),
'incorrect projection of restriction')
z = y & 'j in (3, 4, 5, 6)'
assert_equal(len(z), len(B() & 'id_a in (3,4)'),
'incorrect nested subqueries')
@staticmethod
def test_rename_order():
"""
Renaming projection should not change the order of the primary key attributes.
See issues #483 and #516.
"""
pk1 = D.primary_key
pk2 = D.proj(a='id_a').primary_key
assert_list_equal(['a' if i == 'id_a' else i for i in pk1], pk2)
@staticmethod
def test_join():
# Test cartesian product
x = A()
y = L()
rel = x * y
assert_equal(len(rel), len(x) * len(y),
'incorrect join')
assert_equal(set(x.heading.names).union(y.heading.names), set(rel.heading.names),
'incorrect join heading')
assert_equal(set(x.primary_key).union(y.primary_key), set(rel.primary_key),
'incorrect join primary_key')
# Test cartesian product of restricted relations
x = A() & 'cond_in_a=1'
y = L() & 'cond_in_l=1'
rel = x * y
assert_equal(len(rel), len(x) * len(y),
'incorrect join')
assert_equal(set(x.heading.names).union(y.heading.names), set(rel.heading.names),
'incorrect join heading')
assert_equal(set(x.primary_key).union(y.primary_key), set(rel.primary_key),
'incorrect join primary_key')
# Test join with common attributes
cond = A() & 'cond_in_a=1'
x = B() & cond
y = D()
rel = x * y
assert_true(len(rel) >= len(x) and len(rel) >= len(y), 'incorrect join')
assert_false(rel - cond, 'incorrect join, restriction, or antijoin')
assert_equal(set(x.heading.names).union(y.heading.names), set(rel.heading.names),
'incorrect join heading')
assert_equal(set(x.primary_key).union(y.primary_key), set(rel.primary_key),
'incorrect join primary_key')
# test renamed join
x = B().proj(i='id_a') # rename the common attribute to achieve full cartesian product
y = D()
rel = x * y
assert_equal(len(rel), len(x) * len(y), 'incorrect join')
assert_equal(set(x.heading.names).union(y.heading.names), set(rel.heading.names),
'incorrect join heading')
assert_equal(set(x.primary_key).union(y.primary_key), set(rel.primary_key),
'incorrect join primary_key')
x = B().proj(a='id_a')
y = D()
rel = x * y
assert_equal(len(rel), len(x) * len(y), 'incorrect join')
assert_equal(set(x.heading.names).union(y.heading.names), set(rel.heading.names),
'incorrect join heading')
assert_equal(set(x.primary_key).union(y.primary_key), set(rel.primary_key),
'incorrect join primary_key')
# test pairing
# Approach 1: join then restrict
x = A.proj(a1='id_a', c1='cond_in_a')
y = A.proj(a2='id_a', c2='cond_in_a')
rel = x * y & 'c1=0' & 'c2=1'
lenx = len(x & 'c1=0')
leny = len(y & 'c2=1')
assert_equal(lenx + leny, len(A()),
'incorrect restriction')
assert_equal(len(rel), len(x & 'c1=0') * len(y & 'c2=1'),
'incorrect pairing')
# Approach 2: restrict then join
x = (A & 'cond_in_a=0').proj(a1='id_a')
y = (A & 'cond_in_a=1').proj(a2='id_a')
assert_equal(len(rel), len(x * y))
@staticmethod
def test_issue_376():
tab = TTest3()
tab.delete_quick()
tab.insert((
(1, '%%%'),
(2, 'one%'),
(3, 'one')))
assert_equal(len(tab & 'value="%%%"'), 1)
assert_equal(len(tab & {'value': "%%%"}), 1)
assert_equal(len(tab & 'value like "o%"'), 2)
assert_equal(len(tab & 'value like "o%%"'), 2)
@staticmethod
def test_issue_463():
assert_equal(((A & B) * B).fetch().size, len(A * B))
@staticmethod
def test_project():
x = A().proj(a='id_a') # rename
assert_equal(x.heading.names, ['a'],
'renaming does not work')
x = A().proj(a='(id_a)') # extend
assert_equal(set(x.heading.names), set(('id_a', 'a')),
'extend does not work')
# projection after restriction
cond = L() & 'cond_in_l'
assert_equal(len(D() & cond) + len(D() - cond), len(D()),
'failed semijoin or antijoin')
assert_equal(len((D() & cond).proj()), len((D() & cond)),
'projection failed: altered its argument''s cardinality')
@staticmethod
def test_union():
x = set(zip(*IJ.fetch('i', 'j')))
y = set(zip(*JI.fetch('i', 'j')))
assert_true(len(x) > 0 and len(y) > 0 and len(IJ() * JI()) < len(x)) # ensure the IJ and JI are non-trivial
z = set(zip(*(IJ + JI).fetch('i', 'j'))) # union
assert_set_equal(x.union(y), z)
assert_equal(len(IJ + JI), len(z))
@staticmethod
@raises(dj.DataJointError)
def test_outer_union_fail():
"""Union of two tables with different primary keys raises an error."""
A() + B()
@staticmethod
def test_outer_union_fail():
"""Union of two tables with different primary keys raises an error."""
t = Trial + Ephys
t.fetch()
assert_set_equal(set(t.heading.names), set(Trial.heading.names) | set(Ephys.heading.names))
len(t)
@staticmethod
def test_preview():
with dj.config(display__limit=7):
x = A().proj(a='id_a')
s = x.preview()
assert_equal(len(s.split('\n')), len(x) + 2)
@staticmethod
def test_heading_repr():
x = A * D
s = repr(x.heading)
assert_equal(len(list(1 for g in s.split('\n') if g.strip() and not g.strip().startswith(('-', '#')))),
len(x.heading.attributes))
@staticmethod
def test_aggregate():
x = B().aggregate(B.C())
assert_equal(len(x), len(B() & B.C()))
x = B().aggregate(B.C(), keep_all_rows=True)
assert_equal(len(x), len(B())) # test LEFT join
assert_equal(len((x & 'id_b=0').fetch()), len(B() & 'id_b=0')) # test restricted aggregation
x = B().aggregate(B.C(), 'n', count='count(id_c)', mean='avg(value)', max='max(value)', keep_all_rows=True)
assert_equal(len(x), len(B()))
y = x & 'mean>0' # restricted aggregation
assert_true(len(y) > 0)
assert_true(all(y.fetch('mean') > 0))
for n, count, mean, max_, key in zip(*x.fetch('n', 'count', 'mean', 'max', dj.key)):
assert_equal(n, count, 'aggregation failed (count)')
values = (B.C() & key).fetch('value')
assert_true(bool(len(values)) == bool(n),
'aggregation failed (restriction)')
if n:
assert_true(np.isclose(mean, values.mean(), rtol=1e-4, atol=1e-5),
"aggregation failed (mean)")
assert_true(np.isclose(max_, values.max(), rtol=1e-4, atol=1e-5),
"aggregation failed (max)")
@staticmethod
def test_aggr():
x = B.aggr(B.C)
l1 = len(x)
l2 = len(B & B.C)
assert_equal(l1, l2)
x = B().aggr(B.C(), keep_all_rows=True)
assert_equal(len(x), len(B())) # test LEFT join
assert_equal(len((x & 'id_b=0').fetch()), len(B() & 'id_b=0')) # test restricted aggregation
x = B().aggr(B.C(), 'n', count='count(id_c)', mean='avg(value)', max='max(value)', keep_all_rows=True)
assert_equal(len(x), len(B()))
y = x & 'mean>0' # restricted aggregation
assert_true(len(y) > 0)
assert_true(all(y.fetch('mean') > 0))
for n, count, mean, max_, key in zip(*x.fetch('n', 'count', 'mean', 'max', dj.key)):
assert_equal(n, count, 'aggregation failed (count)')
values = (B.C() & key).fetch('value')
assert_true(bool(len(values)) == bool(n),
'aggregation failed (restriction)')
if n:
assert_true(np.isclose(mean, values.mean(), rtol=1e-4, atol=1e-5),
"aggregation failed (mean)")
assert_true(np.isclose(max_, values.max(), rtol=1e-4, atol=1e-5),
"aggregation failed (max)")
@staticmethod
def test_semijoin():
"""
test that semijoins and antijoins are formed correctly
"""
x = IJ()
y = JI()
n = len(x & y.fetch(as_dict=True))
m = len(x - y.fetch(as_dict=True))
assert_true(n > 0 and m > 0)
assert_true(len(x) == m + n)
assert_true(len(x & y.fetch()) == n)
assert_true(len(x - y.fetch()) == m)
semi = x & y
anti = x - y
assert_true(len(semi) == n)
assert_true(len(anti) == m)
@staticmethod
def test_pandas_fetch_and_restriction():
q = (L & 'cond_in_l = 0')
df = q.fetch(format='frame') # pandas dataframe
assert_true(isinstance(df, pandas.DataFrame))
assert_equal(len(E & q), len(E & df))
@staticmethod
def test_restriction_by_null():
assert_true(len(Experiment & 'username is null') > 0)
assert_true(len(Experiment & 'username is not null') > 0)
@staticmethod
def test_restriction_between(): # see issue
assert_true(len(Experiment & 'username between "S" and "Z"') < len(Experiment()))
@staticmethod
def test_restrictions_by_lists():
x = D()
y = L() & 'cond_in_l'
lenx = len(x)
assert_true(lenx > 0 and len(y) > 0 and len(x & y) < len(x), 'incorrect test setup')
assert_equal(len(D()), len(D & dj.AndList([])))
assert_true(len(D & []) == 0)
assert_true(len(D & [[]]) == 0) # an OR-list of OR-list
lenx = len(x)
assert_true(lenx > 0 and len(y) > 0 and len(x & y) < len(x), 'incorrect test setup')
assert_equal(len(x & y), len(D * L & 'cond_in_l'),
'incorrect semijoin')
assert_equal(len(x - y), len(x) - len(x & y),
'incorrect antijoin')
assert_equal(len(y - x), len(y) - len(y & x),
'incorrect antijoin')
assert_true(len(x & []) == 0,
'incorrect restriction by an empty list')
assert_true(len(x & ()) == 0,
'incorrect restriction by an empty tuple')
assert_true(len(x & set()) == 0,
'incorrect restriction by an empty set')
assert_equal(len(x - []), lenx,
'incorrect restriction by an empty list')
assert_equal(len(x - ()), lenx,
'incorrect restriction by an empty tuple')
assert_equal(len(x - set()), lenx,
'incorrect restriction by an empty set')
assert_equal(len(x & {}), lenx,
'incorrect restriction by a tuple with no attributes')
assert_true(len(x - {}) == 0,
'incorrect restriction by a tuple with no attributes')
assert_equal(len(x & {'foo': 0}), lenx,
'incorrect restriction by a tuple with no matching attributes')
assert_true(len(x - {'foo': 0}) == 0,
'incorrect restriction by a tuple with no matching attributes')
assert_equal(len(x & y), len(x & y.fetch()),
'incorrect restriction by a list')
assert_equal(len(x - y), len(x - y.fetch()),
'incorrect restriction by a list')
w = A()
assert_true(len(w) > 0, 'incorrect test setup: w is empty')
assert_false(bool(set(w.heading.names) & set(y.heading.names)),
'incorrect test setup: w and y should have no common attributes')
assert_equal(len(w), len(w & y),
'incorrect restriction without common attributes')
assert_true(len(w - y) == 0,
'incorrect restriction without common attributes')
@staticmethod
def test_datetime():
"""Test date retrieval"""
date = Experiment().fetch('experiment_date')[0]
e1 = Experiment() & dict(experiment_date=str(date))
e2 = Experiment() & dict(experiment_date=date)
assert_true(len(e1) == len(e2) > 0, 'Two date restriction do not yield the same result')
@staticmethod
def test_date():
"""Test date update"""
# https://github.com/datajoint/datajoint-python/issues/664
F.insert1((2, '2019-09-25'))
new_value = None
(F & 'id=2')._update('date', new_value)
assert_equal((F & 'id=2').fetch1('date'), new_value)
new_value = datetime.date(2019, 10, 25)
(F & 'id=2')._update('date', new_value)
assert_equal((F & 'id=2').fetch1('date'), new_value)
(F & 'id=2')._update('date')
assert_equal((F & 'id=2').fetch1('date'), None)
@staticmethod
def test_join_project():
"""Test join of projected relations with matching non-primary key"""
q = DataA.proj() * DataB.proj()
assert_true(len(q) == len(DataA()) == len(DataB()),
"Join of projected relations does not work")
@staticmethod
def test_ellipsis():
r = Experiment.proj(..., '- data_path').head(1, as_dict=True)
assert_set_equal(set(Experiment.heading).difference(r[0]), {'data_path'})
@staticmethod
@raises(dj.DataJointError)
def test_update_single_key():
"""Test that only one row can be updated"""
TTestUpdate()._update('string_attr', 'my new string')
@staticmethod
@raises(dj.DataJointError)
def test_update_no_primary():
"""Test that no primary key can be updated"""
TTestUpdate()._update('primary_key', 2)
@staticmethod
@raises(dj.DataJointError)
def test_update_missing_attribute():
"""Test that attribute is in table"""
TTestUpdate()._update('not_existing', 2)
@staticmethod
def test_update_string_attribute():
"""Test replacing a string value"""
rel = (TTestUpdate() & dict(primary_key=0))
s = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
rel._update('string_attr', s)
assert_equal(s, rel.fetch1('string_attr'), "Updated string does not match")
@staticmethod
def test_update_numeric_attribute():
"""Test replacing a string value"""
rel = (TTestUpdate() & dict(primary_key=0))
s = random.randint(0, 10)
rel._update('num_attr', s)
assert_equal(s, rel.fetch1('num_attr'), "Updated integer does not match")
rel._update('num_attr', None)
assert_true(np.isnan(rel.fetch1('num_attr')), "Numeric value is not NaN")
@staticmethod
def test_update_blob_attribute():
"""Test replacing a string value"""
rel = (TTestUpdate() & dict(primary_key=0))
s = rel.fetch1('blob_attr')
rel._update('blob_attr', s.T)
assert_equal(s.T.shape, rel.fetch1('blob_attr').shape, "Array dimensions do not match")
@staticmethod
def test_reserved_words():
"""Test the user of SQL reserved words as attributes"""
rel = ReservedWord()
rel.insert1({'key': 1, 'in': 'ouch', 'from': 'bummer', 'int': 3, 'select': 'major pain'})
assert_true((rel & {'key': 1, 'in': 'ouch', 'from': 'bummer'}).fetch1('int') == 3)
assert_true((rel.proj('int', double='from') & {'double': 'bummer'}).fetch1('int') == 3)
(rel & {'key': 1}).delete()
@staticmethod
@raises(dj.DataJointError)
def test_reserved_words2():
"""Test the user of SQL reserved words as attributes"""
rel = ReservedWord()
rel.insert1({'key': 1, 'in': 'ouch', 'from': 'bummer', 'int': 3, 'select': 'major pain'})
(rel & 'key=1').fetch('in') # error because reserved word `key` is not in backquotes. See issue #249
@staticmethod
def test_permissive_join_basic():
"""Verify join compatibility check is skipped for join"""
Child @ Parent
@staticmethod
def test_permissive_restriction_basic():
"""Verify join compatibility check is skipped for restriction"""
Child ^ Parent
@staticmethod
def test_complex_date_restriction():
# https://github.com/datajoint/datajoint-python/issues/892
"""Test a complex date restriction"""
q = OutfitLaunch & 'day between curdate() - interval 30 day and curdate()'
assert len(q) == 1
q = OutfitLaunch & 'day between curdate() - interval 4 week and curdate()'
assert len(q) == 1
q = OutfitLaunch & 'day between curdate() - interval 1 month and curdate()'
assert len(q) == 1
q = OutfitLaunch & 'day between curdate() - interval 1 year and curdate()'
assert len(q) == 1
q = OutfitLaunch & '`day` between curdate() - interval 30 day and curdate()'
assert len(q) == 1
q.delete()
@staticmethod
def test_null_dict_restriction():
# https://github.com/datajoint/datajoint-python/issues/824
"""Test a restriction for null using dict"""
F.insert([dict(id=5)])
q = F & dj.AndList([dict(id=5), 'date is NULL'])
assert len(q) == 1
q = F & dict(id=5, date=None)
assert len(q) == 1
@staticmethod
def test_joins_with_aggregation():
# https://github.com/datajoint/datajoint-python/issues/898
# https://github.com/datajoint/datajoint-python/issues/899
subjects = SubjectA.aggr(
SessionStatusA & 'status="trained_1a" or status="trained_1b"',
date_trained='min(date(session_start_time))')
assert len(SessionDateA * subjects) == 4
assert len(subjects * SessionDateA) == 4
subj_query = SubjectA.aggr(
SessionA * SessionStatusA & 'status="trained_1a" or status="trained_1b"',
date_trained='min(date(session_start_time))')
session_dates = ((SessionDateA * (subj_query & 'date_trained<"2020-12-21"')) &
'session_date<date_trained')
assert len(session_dates) == 1
| lgpl-2.1 |
drphilmarshall/LocalGroupHaloProps | localgroup/likelihood.py | 1 | 9210 | # ======================================================================
import localgroup
import numpy as np
from sklearn import mixture
from sklearn.grid_search import GridSearchCV
from scipy import linalg
import matplotlib as mpl
mpl.rc('text', usetex=True)
import triangle
# ======================================================================
class Likelihood(object):
"""
NAME
Likelihood
PURPOSE
Compute the likelihood of a given halo having the observed properties.
COMMENTS
This class allows you to generate the approximation used, as well as evalu ate the likelihood.
INITIALISATION
METHODS
generate - draw sample vector from observations' distribuions
approximate - compute KNN/GMM/etc estimate of PDF
evaluate - compute value of PDF at given vector
plot_samples - make triangle plot for observed data in M31 ref frame
set_PDF - set the L.PDF field
test_gauss - plots and calculates score vs ngauss components
preprocess_samples - zero the mean of the samples and scale by standard deviation
NB. "vector" refes to a position in 6D MW-M31 D,vr,vt space
BUGS
AUTHORS
This file is part of the LocalGroupHaloProps project,
distributed under the GPL v2,
by Marc Williamson Phil Marshall (KIPAC).
Please cite: Williamson et al in preparation.
HISTORY
2014-09-23 started Williamson and Marshall (KIPAC)
"""
# ======================================================================
def __init__(self, isPair=False):
self.T = localgroup.Triplet(isPair=isPair)
self.PDF = None
self.write_labels()
return
# ----------------------------------------------------------------------------
def write_labels(self):
if self.T.isPair:
# self.labs = ["MW_D", "MW_vr", "MW_vt"]
self.labels = ["$D^{\\rm M31} Mpc$", "$v_{\\rm rad}^{\\rm M31} km/s$", "$v_{\\rm tan}^{\\rm M31} km/s$"]
else:
# self.labs = ["MW_D", "MW_vr", "MW_vt", "M33_D", "M33_vr", "M33_vt"]
self.labels = ["$D^{\\rm M31} Mpc$", "$v_{\\rm rad}^{\\rm M31} km/s$", "$v_{\\rm tan}^{\\rm M31} km/s$", "$D^{\\rm M33} Mpc$", "$v_{\\rm rad}^{\\rm M33} km/s$", "$v_{\\rm tan}^{\\rm M33} km/s$","$D^{\\rm LMC} Mpc$", "$v_{\\rm rad}^{\\rm LMC} km/s$", "$v_{\\rm tan}^{\\rm LMC} km/s$"]
return
# ----------------------------------------------------------------------------
def generate(self,mode="observational",Nsamples=10000):
self.T.observe_halos(Nsamples=Nsamples)
self.T.transform_to_M31()
#dt = np.dtype([('MW_D', 'f8'), ('MW_vr', 'f8'), ('MW_vt', 'f8'), ('M33_D', 'f8'), ('M33_vr', 'f8'), ('M33_vt', 'f8')])
self.samples = np.transpose(np.array(self.T.get_kinematics()))
self.samples_means = np.array([np.mean(self.samples[:,i]) for i in range(self.samples.shape[1])])
self.samples_stds = np.array([np.std(self.samples[:,i]) for i in range(self.samples.shape[1])])
self.preprocess_samples()
# PJM: Might be better to have Triplet.get_kinematics do this
# packaging, perhaps... Also, might be better to leave the samples
# in the Triplet object, and feed them to the GMM...
return
# ----------------------------------------------------------------------------
def approximate(self, mode="GMM", cv=False):
if (mode == "GMM"):
if (cv):
def bic_scorefn(GMM, X): return GMM.bic(X)
score_dict = self.test_GaussMM(num_folds=10, score_fns=[bic_scorefn], maxMM=15)
bic = score_dict['bic_scorefn']
min_bic = np.min(bic)
min_bic_components = 1+np.array([i for i, x in enumerate(bic) if x == min_bic])
print "Minimum BIC: ", min_bic
print "Minimum BIC number of components: ", min_bic_components
self.set_PDF(mixture.GMM(min_bic_components[0], covariance_type='full'))
self.PDF.fit(self.samples)
else:
raise ValueError("Unrecognised approximation mode %s" % mode)
return
# ======================================================================
def evaluate(self, points, mode="GMM"):
if self.T.isPair:
eval_points = points[:,0:3]
else:
eval_points = points[:,0:9]
if (mode == "GMM"):
logprobs = self.PDF.score_mem(eval_points)
else:
raise ValueError("Unrecognised approximation mode %s" % mode )
return logprobs
# ======================================================================
def model_gof(self, n_points, color, fig=None, mode="GMM"):
self.write_labels()
if (mode == "GMM"):
drawn_points = self.PDF.sample(n_samples=n_points)*self.samples_stds + self.samples_means
drawn_points = drawn_points[drawn_points[:,2] > 0]
if not self.T.isPair: drawn_points = drawn_points[drawn_points[:,8] > 0]
figure = triangle.corner(drawn_points, labels=self.labels, quantiles=[0.16,0.5,0.84], fig=fig, show_titles=True, title_args={"fontsize": 12}, label_args={"fontsize": 16}, color=color, verbose=False)
else:
raise ValueError("Unrecognized approximation mode %s" % mode)
if fig==None: return figure
return fig
# ======================================================================
def plot_samples(self, ngauss, color, fig=None, overlay=False):
self.unprocess()
self.write_labels()
try:
figure = triangle.corner(self.samples, labels=self.labels, quantiles=[0.16,0.5,0.84], fig=fig, plot_contours=True, show_titles=True, title_args={"fontsize": 12}, label_args={"fontsize": 16}, color=color)
except AttributeError:
raise AttributeError("L.generate has not been run.")
# figure.gca().annotate("MW and M33 Observational Data Distributions (M31 centric)", xy=(0.5, 1.0), xycoords="figure fraction", xytext=(0, -5), textcoords="offset points", ha="center", va="top")
if overlay:
self.gaussianOverlay(figure, ngauss)
self.preprocess_samples()
return figure
# ======================================================================
def set_PDF(self, pdf):
self.PDF = pdf
return
# ======================================================================
def test_GaussMM(self, num_folds, score_fns, maxMM=10):
aic_scores = []
bic_scores = []
scores_dict = {}
params = {'n_components':np.arange(1,maxMM+1), 'covariance_type':['full']}
gmm = mixture.GMM()
for score_fn in score_fns:
grid = GridSearchCV(estimator=gmm, param_grid=params, cv=num_folds, scoring=score_fn)
grid.fit(self.samples)
scores_dict[score_fn.func_name] = np.array(grid.grid_scores_)[:,1]
return scores_dict
# ======================================================================
def preprocess_samples(self):
self.samples = (self.samples - self.samples_means)/self.samples_stds
return
# ======================================================================
def unprocess(self):
self.samples = self.samples*self.samples_stds + self.samples_means
return
# ======================================================================
def gaussianOverlay(self, figure, ngauss):
if self.T.isPair:
size = 3
else:
size = 6
n_gaussians = ngauss
#if ngauss > 5: raise AttributeError("Only 5 colors can be shown.")
colors = ['g', 'r', 'y', 'b', 'c']
transparency = 0.5
model = mixture.GMM(n_gaussians, covariance_type='full')
axes = np.reshape(figure.axes, (size,size))
model.fit(self.samples)
for i in range(size):
for j in range(size):
if j < i:
#model.fit(self.samples)
subplot = axes[i,j]
for gauss_num in range(n_gaussians):
mean = [model.means_[gauss_num][j], model.means_[gauss_num][i]]
covar = model.covars_[gauss_num]
covar = [[covar[j,j], covar[j,i]], [covar[i,j], covar[i,i]]]
color = colors[gauss_num%5]
v, w = linalg.eigh(covar)
u = w[0]/linalg.norm(w[0])
angle = np.arctan(u[1]/u[0])
angle = 180 * angle / np.pi
ell = mpl.patches.Ellipse(mean, 2*np.sqrt(v[0]), 2*np.sqrt(v[1]), 180 + angle, color=color)
ell.set_clip_box(subplot.bbox)
ell.set_alpha(transparency)
subplot.add_artist(ell)
return
# ======================================================================
if __name__ == '__main__':
Lhood = Likelihood()
Lhood.generate()
print "Sample kinematic parameters: ",Lhood.samples
| gpl-2.0 |
shenzebang/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
gregreen/bayestar | scripts/compareColorsGridded.py | 1 | 12495 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# compareColorsGridded.py
#
# Copyright 2013 Greg Green <greg@greg-UX31A>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import numpy as np
import scipy, scipy.stats, scipy.special
import h5py
import time
import argparse, sys, os
import matplotlib.pyplot as plt
import matplotlib as mplib
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import Grid
from matplotlib.ticker import MaxNLocator, AutoMinorLocator, FormatStrFormatter
import hdf5io
class TStellarModel:
def __init__(self, fname):
self.load(fname)
def load(self, fname):
f = open(fname, 'r')
row = []
for l in f:
line = l.rstrip().lstrip()
if len(line) == 0: # Empty line
continue
if line[0] == '#': # Comment
continue
data = line.split()
if len(data) < 6:
txt = 'Error reading in stellar templates.\n'
txt += 'The following line does not have the correct number of entries (6 expected):\n'
txt += line
raise Exception(txt)
row.append([float(s) for s in data])
f.close()
template = np.array(row, dtype='f8')
fields = ['Mr', 'FeH', 'gr', 'ri', 'iz', 'zy']
dtype = [(field, 'f8') for field in fields]
self.data = np.empty(len(template), dtype=dtype)
for i,field in enumerate(fields):
self.data[field][:] = template[:,i]
self.FeH = np.unique(self.data['FeH'])
def get_isochrone(self, FeH):
if FeH >= np.max(self.FeH):
FeH_eval = np.max(self.FeH)
idx = (self.data['FeH'] == FeH_eval)
return self.data[idx]
elif FeH <= np.min(self.FeH):
FeH_eval = np.min(self.FeH)
idx = (self.data['FeH'] == FeH_eval)
return self.data[idx]
else:
k = np.arange(self.FeH.size)
#print np.where(FeH > self.FeH, k, -1)
#print self.FeH
idx = np.max(np.where(FeH > self.FeH, k, -1))
FeH_eval = [self.FeH[idx], self.FeH[idx+1]]
a = float(FeH_eval[1] - FeH) / float(FeH_eval[1] - FeH_eval[0])
idx = (self.data['FeH'] == FeH_eval[0])
d1 = self.data[idx]
idx = (self.data['FeH'] == FeH_eval[1])
d2 = self.data[idx]
if np.any(d1['Mr'] != d2['Mr']):
raise Exception('Expected Mr samples to be the same for each metallicity.')
fields = ['Mr', 'FeH', 'gr', 'ri', 'iz', 'zy']
dtype = [(field, 'f8') for field in fields]
ret = np.empty(len(d1), dtype=dtype)
#print FeH_eval
#print a
for field in fields:
ret[field][:] = a * d1[field][:] + (1. - a) * d2[field][:]
return ret
def read_photometry(fname, pixel):
f = h5py.File(fname, 'r')
dset = f['/photometry/pixel %d-%d' % (pixel[0], pixel[1])]
# Load in photometry from selected target
l = dset.attrs['l']
b = dset.attrs['b']
mags = dset['mag'][:]
errs = dset['err'][:]
EBV_SFD = dset['EBV'][:]
f.close()
return mags, errs, EBV_SFD, l, b
def read_inferences(fname, pix_idx):
f = h5py.File(fname, 'r')
dtype = [('lnZ','f8'), ('conv',np.bool),
('DM','f8'), ('EBV','f8'),
('Mr','f8'), ('FeH','f8')]
ret = None
dset = '/pixel %d-%d/stellar chains' % (pix_idx[0], pix_idx[1])
try:
lnZ = f[dset].attrs['ln(Z)'][:]
conv = f[dset].attrs['converged'][:].astype(np.bool)
best = f[dset][:, 1, 1:]
ret = np.empty(len(lnZ), dtype=dtype)
ret['lnZ'] = lnZ
ret['conv'] = conv
ret['EBV'] = best[:,0]
ret['DM'] = best[:,1]
ret['Mr'] = best[:,2]
ret['FeH'] = best[:,3]
except:
print 'Dataset "%s" does not exist.' % dset
return ret
def get_reddening_vector():
return np.array([3.172, 2.271, 1.682, 1.322, 1.087])
def dereddened_mags(mags, EBV):
R = get_reddening_vector()
if type(EBV) == float:
R.shape = (1, R.size)
R = np.repeat(R, len(mags), axis=0)
return mags - EBV * R
elif type(EBV) == np.ndarray:
return mags - np.einsum('i,j->ij', EBV, R)
else:
raise TypeError('EBV has unexpected type: %s' % type(EBV))
def plot_cluster(ax, template, mags):
pass
class KnotLogger:
def __init__(self, ax, marker='+', c='r', s=4):
self.ax = ax
self.cid = ax.figure.canvas.mpl_connect('button_press_event', self)
self.x = []
self.y = []
self.marker = marker
self.c = c
self.s = s
def __call__(self, event):
if event.inaxes != self.ax:
return
self.x.append(event.xdata)
self.y.append(event.ydata)
if self.marker != None:
event.inaxes.scatter([event.xdata], [event.ydata],
marker=self.marker, s=self.s, c=self.c)
self.ax.figure.canvas.draw()
def get_knots(self):
return self.x, self.y
def main():
parser = argparse.ArgumentParser(prog='compareColorsGridded.py',
description='Compare photometric colors to model colors.',
add_help=True)
parser.add_argument('--templates', '-t', type=str, required=True,
help='Stellar templates (in ASCII format).')
parser.add_argument('--photometry', '-ph', type=str, required=True,
help='Bayestar input file with photometry.')
parser.add_argument('--bayestar', '-bayes', type=str, default=None,
help='Bayestar output file with inferences.')
parser.add_argument('--color', '-c', type=str, default='lnZ',
choices=('lnZ', 'conv', 'EBV', 'DM', 'Mr', 'FeH'),
help='Field by which to color stars.')
#parser.add_argument('--name', '-n', type=str, required=True,
# help='Region name.')
parser.add_argument('--pixel', '-pix', type=int, nargs=2, required=True,
help='HEALPix nside and pixel index.')
parser.add_argument('--output', '-o', type=str, default=None, help='Plot filename.')
parser.add_argument('--show', '-sh', action='store_true', help='Show plot.')
if 'python' in sys.argv[0]:
offset = 2
else:
offset = 1
args = parser.parse_args(sys.argv[offset:])
templates = TStellarModel(args.templates)
color_names = ['gr', 'ri', 'iz', 'zy']
lnZ_max = 0.
Delta_lnZ = 15.
# Load photometry
ret = read_photometry(args.photometry, args.pixel)
if ret == None:
print 'Pixel not found.'
return 0
mags, errs, EBV, l, b = ret
mags = dereddened_mags(mags, EBV)
colors = -np.diff(mags, axis=1)
#print ' name: %s' % (args.name)
print ' (l, b): %.2f, %.2f' % (l, b)
print ' E(B-V)_SFD: %.4f' % (np.percentile(EBV, 95.))
print ' # of stars: %d' % (len(mags))
# Load bayestar inferences
params = None
vmin, vmax = None, None
if args.bayestar != None:
params = read_inferences(args.bayestar, args.pixel)
idx = np.isfinite(params['lnZ'])
n_rejected = np.sum(params['lnZ'] < np.percentile(params['lnZ'][idx], 95.) - 15.)
pct_rejected = 100. * float(n_rejected) / np.float(len(params))
n_nonconv = np.sum(~params['conv'])
pct_nonconv = 100. * float(n_nonconv) / float(len(params))
if args.color != None:
vmin, vmax = np.percentile(params[args.color], [2., 98.])
print vmin, vmax
print ' # rejected: %d (%.2f %%)' % (n_rejected, pct_rejected)
print ' # nonconverged: %d (%.2f %%)' % (n_nonconv, pct_nonconv)
print ' ln(Z_max): %.2f' % (np.max(params['lnZ'][idx]))
print ' ln(Z_95): %.2f' % (np.percentile(params['lnZ'][idx], 95.))
# Compute mask for each color
idx = []
for i in xrange(4):
idx.append( (mags[:,i] > 10.) & (mags[:,i] < 28.)
& (mags[:,i+1] > 10.) & (mags[:,i+1] < 28.) )
# Compute display limits for each color
lim = np.empty((4,2), dtype='f8')
for i in xrange(4):
lim[i,0], lim[i,1] = np.percentile(colors[idx[i],i], [2., 98.])
w = np.reshape(np.diff(lim, axis=1), (4,))
lim[:,0] -= 0.15 * w
lim[:,1] += 0.15 * w
lim_bounds = np.array([[-0.2, 1.6],
[-0.3, 2.0],
[-0.2, 1.1],
[-0.15, 0.45]])
for i in xrange(4):
lim[i,0] = max(lim[i,0], lim_bounds[i,0])
lim[i,1] = min(lim[i,1], lim_bounds[i,1])
# Set matplotlib style attributes
mplib.rc('text', usetex=True)
mplib.rc('xtick.major', size=6)
mplib.rc('xtick.minor', size=4)
mplib.rc('ytick.major', size=6)
mplib.rc('ytick.minor', size=4)
mplib.rc('xtick', direction='in')
mplib.rc('ytick', direction='in')
mplib.rc('axes', grid=False)
# Set up figure
fig = plt.figure(figsize=(6,6), dpi=150)
axgrid = Grid(fig, 111,
nrows_ncols=(3,3),
axes_pad=0.0,
add_all=False,
label_mode='L')
cdict = {'red': ((0., 1., 1.),
(1., 0., 0.)),
'green': ((0., 0., 0.),
(1., 0., 0.)),
'blue': ((0., 0., 0.),
(1., 1., 1.))}
br_cmap = LinearSegmentedColormap('br1', cdict)
#plt.register_cmap(br_cmap)
logger = []
cbar_ret = None
# Grid of axes
for row in xrange(3):
color_y = colors[:,row+1]
for col in xrange(row+1):
color_x = colors[:,col]
idx_xy = idx[col] & idx[row+1]
#print colors.shape
#print color_x.shape
#print idx_xy.shape
ax = axgrid[3*row + col]
fig.add_axes(ax)
logger.append(KnotLogger(ax, s=25))
# Empirical
c = 'k'
if (params != None) and (args.color != None):
#print params[args.color].shape
c = params[args.color][idx_xy]
cbar_ret = ax.scatter(color_x[idx_xy], color_y[idx_xy],
c=c, #cmap=br_cmap,
vmin=vmin, vmax=vmax,
s=3., alpha=0.30, edgecolor='none')
#idx_rej = lnZ < lnZ_max - Delta_lnZ
#idx_tmp = idx_xy & ~idx_rej
#ax.scatter(color_x[idx_tmp], color_y[idx_tmp],
# c='b', s=1.5, alpha=0.15, edgecolor='none')
#idx_tmp = idx_xy & idx_rej
#ax.scatter(color_x[idx_tmp], color_y[idx_tmp],
# c='r', s=1.5, alpha=0.15, edgecolor='none')
# Model
cx, cy = color_names[col], color_names[row+1]
for FeH in np.linspace(-2.5, 0., 30):
isochrone = templates.get_isochrone(FeH)
ax.plot(isochrone[cx], isochrone[cy], 'k-', lw=1., alpha=0.03)
ax.set_xlim(lim[col])
ax.set_ylim(lim[row+1])
# Format x-axes
for i,c in enumerate(color_names[:-1]):
color_label = r'$%s - %s$' % (c[0], c[1])
ax = axgrid[6+i]
ax.set_xlabel(color_label, fontsize=16)
ax.xaxis.set_major_locator(MaxNLocator(nbins=4))
ax.xaxis.set_minor_locator(AutoMinorLocator())
# Format y-axes
for i,c in enumerate(color_names[1:]):
color_label = r'$%s - %s$' % (c[0], c[1])
ax = axgrid[3*i]
ax.set_ylabel(color_label, fontsize=16)
ax.yaxis.set_major_locator(MaxNLocator(nbins=4))
ax.yaxis.set_minor_locator(AutoMinorLocator())
# Colorbar
x_0 = 0.10
y_0 = 0.10
x_1 = 0.98
y_1 = 0.98
fig.subplots_adjust(bottom=y_0, top=y_1, left=x_0, right=x_1) #right=0.85)
w = (x_1 - x_0) / 3.
h = (y_1 - y_0) / 3.
cx = x_0 + 2.3*w
cy = y_0 + h + 0.02
cw = 0.05
ch = 2. * h - 0.02
if (params != None) and (args.color != None):
cax = fig.add_axes([cx, cy, cw, ch])
norm = mplib.colors.Normalize(vmin=vmax, vmax=vmax)
mappable = mplib.cm.ScalarMappable(norm=norm)#cmap=br_cmap, norm=norm)
mappable.set_array(np.array([vmin, vmax]))
fig.colorbar(cbar_ret, cax=cax)#, ticks=[0., -3., -6., -9., -12., -15.])
cax.yaxis.set_label_position('right')
cax.yaxis.tick_right()
cax.set_ylabel(r'$\mathrm{ln} \left( Z \right)$', rotation='vertical', fontsize=16)
# Information on l.o.s.
txt = '$\ell = %.2f^{\circ}$\n' % l
txt += '$b = %.2f^{\circ}$\n' % b
txt += '$\mathrm{E} \! \left( B \! - \! V \\right) = %.3f$' % (np.median(EBV))
fig.text(x_0 + 1.1*w, y_0 + 2.5*h, txt, fontsize=14, ha='left', va='center')
if args.output != None:
fig.savefig(args.output, dpi=300)
if args.show:
plt.show()
for i,log in enumerate(logger):
x, y = log.get_knots()
if len(x) != 0:
print ''
print 'Axis %d:' % (i + 1)
print x
print y
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
rohanp/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
jairideout/scikit-bio | skbio/stats/distance/_anosim.py | 12 | 8664 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from functools import partial
import numpy as np
from scipy.stats import rankdata
from ._base import (_preprocess_input, _run_monte_carlo_stats, _build_results)
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def anosim(distance_matrix, grouping, column=None, permutations=999):
"""Test for significant differences between groups using ANOSIM.
Analysis of Similarities (ANOSIM) is a non-parametric method that tests
whether two or more groups of objects (e.g., samples) are significantly
different based on a categorical factor. The ranks of the distances in the
distance matrix are used to calculate an R statistic, which ranges between
-1 (anti-grouping) to +1 (strong grouping), with an R value of 0 indicating
random grouping.
Statistical significance is assessed via a permutation test. The assignment
of objects to groups (`grouping`) is randomly permuted a number of times
(controlled via `permutations`). An R statistic is computed for each
permutation and the p-value is the proportion of permuted R statisics that
are equal to or greater than the original (unpermuted) R statistic.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
grouping : 1-D array_like or pandas.DataFrame
Vector indicating the assignment of objects to groups. For example,
these could be strings or integers denoting which group an object
belongs to. If `grouping` is 1-D ``array_like``, it must be the same
length and in the same order as the objects in `distance_matrix`. If
`grouping` is a ``DataFrame``, the column specified by `column` will be
used as the grouping vector. The ``DataFrame`` must be indexed by the
IDs in `distance_matrix` (i.e., the row labels must be distance matrix
IDs), but the order of IDs between `distance_matrix` and the
``DataFrame`` need not be the same. All IDs in the distance matrix must
be present in the ``DataFrame``. Extra IDs in the ``DataFrame`` are
allowed (they are ignored in the calculations).
column : str, optional
Column name to use as the grouping vector if `grouping` is a
``DataFrame``. Must be provided if `grouping` is a ``DataFrame``.
Cannot be provided if `grouping` is 1-D ``array_like``.
permutations : int, optional
Number of permutations to use when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and the p-value
will be ``np.nan``.
Returns
-------
pandas.Series
Results of the statistical test, including ``test statistic`` and
``p-value``.
See Also
--------
permanova
Notes
-----
See [1]_ for the original method reference. The general algorithm and
interface are similar to ``vegan::anosim``, available in R's vegan package
[2]_.
The p-value will be ``np.nan`` if `permutations` is zero.
References
----------
.. [1] Clarke, KR. "Non-parametric multivariate analyses of changes in
community structure." Australian journal of ecology 18.1 (1993):
117-143.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
Examples
--------
Load a 4x4 distance matrix and grouping vector denoting 2 groups of
objects:
>>> from skbio import DistanceMatrix
>>> dm = DistanceMatrix([[0, 1, 1, 4],
... [1, 0, 3, 2],
... [1, 3, 0, 3],
... [4, 2, 3, 0]],
... ['s1', 's2', 's3', 's4'])
>>> grouping = ['Group1', 'Group1', 'Group2', 'Group2']
Run ANOSIM using 99 permutations to calculate the p-value:
>>> import numpy as np
>>> # make output deterministic; not necessary for normal use
>>> np.random.seed(0)
>>> from skbio.stats.distance import anosim
>>> anosim(dm, grouping, permutations=99)
method name ANOSIM
test statistic name R
sample size 4
number of groups 2
test statistic 0.25
p-value 0.67
number of permutations 99
Name: ANOSIM results, dtype: object
The return value is a ``pandas.Series`` object containing the results of
the statistical test.
To suppress calculation of the p-value and only obtain the R statistic,
specify zero permutations:
>>> anosim(dm, grouping, permutations=0)
method name ANOSIM
test statistic name R
sample size 4
number of groups 2
test statistic 0.25
p-value NaN
number of permutations 0
Name: ANOSIM results, dtype: object
You can also provide a ``pandas.DataFrame`` and a column denoting the
grouping instead of a grouping vector. The following ``DataFrame``'s
``Group`` column specifies the same grouping as the vector we used in the
previous examples:
>>> # make output deterministic; not necessary for normal use
>>> np.random.seed(0)
>>> import pandas as pd
>>> df = pd.DataFrame.from_dict(
... {'Group': {'s2': 'Group1', 's3': 'Group2', 's4': 'Group2',
... 's5': 'Group3', 's1': 'Group1'}})
>>> anosim(dm, df, column='Group', permutations=99)
method name ANOSIM
test statistic name R
sample size 4
number of groups 2
test statistic 0.25
p-value 0.67
number of permutations 99
Name: ANOSIM results, dtype: object
The results match the first example above.
Note that when providing a ``DataFrame``, the ordering of rows and/or
columns does not affect the grouping vector that is extracted. The
``DataFrame`` must be indexed by the distance matrix IDs (i.e., the row
labels must be distance matrix IDs).
If IDs (rows) are present in the ``DataFrame`` but not in the distance
matrix, they are ignored. The previous example's ``s5`` ID illustrates this
behavior: note that even though the ``DataFrame`` had 5 objects, only 4
were used in the test (see the "Sample size" row in the results above to
confirm this). Thus, the ``DataFrame`` can be a superset of the distance
matrix IDs. Note that the reverse is not true: IDs in the distance matrix
*must* be present in the ``DataFrame`` or an error will be raised.
"""
sample_size, num_groups, grouping, tri_idxs, distances = _preprocess_input(
distance_matrix, grouping, column)
divisor = sample_size * ((sample_size - 1) / 4)
ranked_dists = rankdata(distances, method='average')
test_stat_function = partial(_compute_r_stat, tri_idxs, ranked_dists,
divisor)
stat, p_value = _run_monte_carlo_stats(test_stat_function, grouping,
permutations)
return _build_results('ANOSIM', 'R', sample_size, num_groups, stat,
p_value, permutations)
def _compute_r_stat(tri_idxs, ranked_dists, divisor, grouping):
"""Compute ANOSIM R statistic (between -1 and +1)."""
# Create a matrix where True means that the two objects are in the same
# group. This ufunc requires that grouping is a numeric vector (e.g., it
# won't work with a grouping vector of strings).
grouping_matrix = np.equal.outer(grouping, grouping)
# Extract upper triangle from the grouping matrix. It is important to
# extract the values in the same order that the distances are extracted
# from the distance matrix (see ranked_dists). Extracting the upper
# triangle (excluding the diagonal) preserves this order.
grouping_tri = grouping_matrix[tri_idxs]
# within
r_W = np.mean(ranked_dists[grouping_tri])
# between
r_B = np.mean(ranked_dists[np.invert(grouping_tri)])
return (r_B - r_W) / divisor
| bsd-3-clause |
geodynamics/specfem2d | EXAMPLES/paper_axisymmetry_example/processAmplitudes.py | 2 | 1604 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 1 15:05:59 2015
Small script to process transmission losses data
@author: bottero
"""
from __future__ import (absolute_import, division, print_function)
import argparse # To deal with arguments :
# https://docs.python.org/2/library/argparse.html
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(
description='Small script to process transmission losses data')
parser.add_argument('files', nargs='+', type=argparse.FileType('r'),
help='files to be plotted')
parser.add_argument('--ref', type=float, default=1.0,
help='reference amplitude')
parser.add_argument('--source_vel', type=float, default=1500.0,
help='source medium velocity')
parser.add_argument('--noplot', action='store_true',
help='do not display any curves')
parser.add_argument('-v','--verbose', action='store_true',
help='show more details')
args = parser.parse_args()
for seismo in args.files: # Loop on the files given
print(seismo.name)
data = np.loadtxt(seismo) # Load the seismogram
X = data[:, 0] # First column is range
amplitudes = data[:, 1] # Second column is amplitude
TL=-10*np.log10(amplitudes/args.ref) # (2.0*args.source_vel**2)**2
if not args.noplot:
plt.figure()
plt.plot(X,TL,'b') #,marker,linestyle=linestyle,color = (r,g,b),markersize=marker_size)
plt.gca().invert_yaxis()
plt.show()
np.savetxt(seismo.name+"Losses",np.dstack((X,TL))[0])
| gpl-3.0 |
equialgo/scikit-learn | sklearn/neural_network/tests/test_stochastic_optimizers.py | 146 | 4310 | import numpy as np
from sklearn.neural_network._stochastic_optimizers import (BaseOptimizer,
SGDOptimizer,
AdamOptimizer)
from sklearn.utils.testing import (assert_array_equal, assert_true,
assert_false, assert_equal)
shapes = [(4, 6), (6, 8), (7, 8, 9)]
def test_base_optimizer():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = BaseOptimizer(params, lr)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_no_momentum():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False)
grads = [np.random.random(shape) for shape in shapes]
expected = [param - lr * grad for param, grad in zip(params, grads)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=False)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_trigger_stopping():
params = [np.zeros(shape) for shape in shapes]
lr = 2e-6
optimizer = SGDOptimizer(params, lr, lr_schedule='adaptive')
assert_false(optimizer.trigger_stopping('', False))
assert_equal(lr / 5, optimizer.learning_rate)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_nesterovs_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=True)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
updates = [momentum * update - lr * grad
for update, grad in zip(updates, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_adam_optimizer():
params = [np.zeros(shape) for shape in shapes]
lr = 0.001
epsilon = 1e-8
for beta_1 in np.arange(0.9, 1.0, 0.05):
for beta_2 in np.arange(0.995, 1.0, 0.001):
optimizer = AdamOptimizer(params, lr, beta_1, beta_2, epsilon)
ms = [np.random.random(shape) for shape in shapes]
vs = [np.random.random(shape) for shape in shapes]
t = 10
optimizer.ms = ms
optimizer.vs = vs
optimizer.t = t - 1
grads = [np.random.random(shape) for shape in shapes]
ms = [beta_1 * m + (1 - beta_1) * grad
for m, grad in zip(ms, grads)]
vs = [beta_2 * v + (1 - beta_2) * (grad ** 2)
for v, grad in zip(vs, grads)]
learning_rate = lr * np.sqrt(1 - beta_2 ** t) / (1 - beta_1**t)
updates = [-learning_rate * m / (np.sqrt(v) + epsilon)
for m, v in zip(ms, vs)]
expected = [param + update
for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
| bsd-3-clause |
misgeatgit/opencog | opencog/python/spatiotemporal/temporal_events/relation_formulas.py | 33 | 19534 | from math import fabs, sqrt, floor
from numpy import convolve, NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY
import numpy
from scipy.stats.distributions import uniform_gen
from spatiotemporal.temporal_events.util import calculate_bounds_of_probability_distribution
from spatiotemporal.temporal_interval_handling import calculateCenterMass
from spatiotemporal.time_intervals import TimeInterval
from utility.functions import FunctionPiecewiseLinear, FunctionHorizontalLinear, integral, FUNCTION_ZERO, almost_equals
DECOMPOSITION_PRECISION = 10 ** 14
__author__ = 'keyvan'
TEMPORAL_RELATIONS = {
'p': 'precedes',
'm': 'meets',
'o': 'overlaps',
'F': 'finished by',
'D': 'contains',
's': 'starts',
'e': 'equals',
'S': 'started by',
'd': 'during',
'f': 'finishes',
'O': 'overlapped by',
'M': 'met by',
'P': 'preceded by'
}
class TemporalRelation(dict):
all_relations = 'pmoFDseSdfOMP'
_type = None
_list = None
_vector = None
@staticmethod
def from_list(list_object):
relation = TemporalRelation()
for i, name in enumerate(TemporalRelation.all_relations):
value = list_object[i]
if not isinstance(value, (int, float)):
value = float(value)
relation[name] = value
return relation
def to_list(self):
if self._list is None:
self._list = []
for name in self.all_relations:
self._list.append(self[name])
return self._list
def to_vector(self):
if self._vector is None:
_list = self.to_list()
self._vector = numpy.array(_list)
return self._vector
@property
def type(self):
if self._type is None:
self._type = ''.join([name for name in TemporalRelation.all_relations if self[name] > 0])
return self._type
def __setitem__(self, relation_name, value):
if relation_name not in TemporalRelation.all_relations:
raise AttributeError("'{0}' is not a valid Allen relation'".format(relation_name))
dict.__setitem__(self, relation_name, floor(value * DECOMPOSITION_PRECISION) / DECOMPOSITION_PRECISION)
def __repr__(self):
return 'TemporalRelation({0})'.format(self.type)
def __str__(self):
return repr(self)
def __hash__(self):
return hash(tuple(self.to_list()))
class BaseRelationFormula(object):
def __init__(self):
self.bounds = {}
def duration_of(self, dist):
a, b = self.bounds_of(dist)
return fabs(a - b)
def bounds_of(self, dist):
# if dist in self.bounds:
# return self.bounds[dist]
bounds = calculate_bounds_of_probability_distribution(dist)
self.bounds[dist] = bounds
return bounds
def before_point(self, point_1_value, point_2_value):
return 0
def same_point(self, point_1_value, point_2_value):
return 1 - fabs(self.before_point(point_1_value,
point_2_value) - self.after_point(point_1_value, point_2_value))
def after_point(self, point_1_value, point_2_value):
return self.before_point(point_2_value, point_1_value)
def before_integral_bounds(self, dist_1, dist_2):
return calculate_bounds_of_probability_distribution(dist_1)
def same_integral_bounds(self, dist_1, dist_2):
dist_1_a, dist_1_b = calculate_bounds_of_probability_distribution(dist_1)
dist_2_a, dist_2_b = calculate_bounds_of_probability_distribution(dist_2)
return max(dist_1_a, dist_2_a), min(dist_1_b, dist_2_b)
def after_integral_bounds(self, dist_1, dist_2):
return calculate_bounds_of_probability_distribution(dist_2)
def before(self, dist_1, dist_2):
return integral(lambda x: self.before_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.before_integral_bounds(dist_1, dist_2))
def same(self, dist_1, dist_2):
return integral(lambda x: self.same_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.same_integral_bounds(dist_1, dist_2))
def after(self, dist_1, dist_2):
return integral(lambda x: self.after_point(dist_1.pdf(x), dist_2.pdf(x)),
*self.after_integral_bounds(dist_1, dist_2))
def compare(self, dist_1, dist_2):
"""
returns before, same and after
"""
return self.before(dist_1, dist_2), self.same(dist_1, dist_2), self.after(dist_1, dist_2)
class FormulaCreator(object):
def __init__(self, relation_formula):
self.relation_formula = relation_formula
def temporal_relations_between(self, temporal_event_1, temporal_event_2):
dist_1_beginning, dist_1_ending = temporal_event_1.distribution_beginning, temporal_event_1.distribution_ending
dist_2_beginning, dist_2_ending = temporal_event_2.distribution_beginning, temporal_event_2.distribution_ending
self.relation_formula.bounds[dist_1_beginning] = temporal_event_1.a, temporal_event_1.beginning
self.relation_formula.bounds[dist_1_ending] = temporal_event_1.ending, temporal_event_1.b
self.relation_formula.bounds[dist_2_beginning] = temporal_event_2.a, temporal_event_2.beginning
self.relation_formula.bounds[dist_2_ending] = temporal_event_2.ending, temporal_event_2.b
combinations = [
(dist_1_beginning, dist_2_beginning),
(dist_1_beginning, dist_2_ending),
(dist_1_ending, dist_2_beginning),
(dist_1_ending, dist_2_ending)
]
return self.calculate_relations(combinations)
def calculate_relations(self, combinations=None):
"""
Calculates the values of the 13 relations based on the before, same,
and after values of the combinations between the beginning and
ending distributions of the two intervals obtained, e.g. from
the DecompositionFitter.
:param combinations: the 4 combinations between beginning and ending
distribution
:return: a dictionary containing the 13 relations as keys and their
degrees as values
"""
if combinations is None:
combinations = self.relation_formula.combinations
dist_1_beginning, dist_2_beginning = combinations[0]
dist_1_ending, dist_2_ending = combinations[3]
before = {}
same = {}
after = {}
# iterates over the 4 combinations between beginning and ending
for key in combinations:
before[key], same[key], after[key] = self.relation_formula.compare(*key)
result = TemporalRelation()
result['p'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
before[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['m'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
same[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['o'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['F'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['D'] = before[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['s'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['e'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['S'] = same[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['d'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * before[dist_1_ending, dist_2_ending]
result['f'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * same[dist_1_ending, dist_2_ending]
result['O'] = after[dist_1_beginning, dist_2_beginning] * before[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['M'] = after[dist_1_beginning, dist_2_beginning] * same[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
result['P'] = after[dist_1_beginning, dist_2_beginning] * after[dist_1_beginning, dist_2_ending] * \
after[dist_1_ending, dist_2_beginning] * after[dist_1_ending, dist_2_ending]
return result
class RelationFormulaConvolution(BaseRelationFormula):
def function_convolution_uniform(self, bounds_1, bounds_2, probability=None):
a1, b1 = bounds_1
a2, b2 = bounds_2
length_1 = fabs(a1 - b1)
length_2 = fabs(a2 - b2)
convolution_bounds_a, convolution_bounds_b = a1 - b2, b1 - a2
trapezium_0, trapezium_1 = convolution_bounds_a, convolution_bounds_a + min(length_2, length_1)
trapezium_2, trapezium_3 = trapezium_1 + fabs(length_1 - length_2), convolution_bounds_b
#assert trapezium_2 + min(length_2, length_1) == trapezium_3
if probability is None:
probability = min(1 / length_1, 1 / length_2)
result = FunctionPiecewiseLinear(
{trapezium_0: 0, trapezium_1: probability, trapezium_2: probability, trapezium_3: 0},
FUNCTION_ZERO)
result.is_normalised = True
return result
def function_convolution(self, dist_1, dist_2, bins=50):
a_1, b_1, a_2, b_2 = 0, 0, 0, 0
if dist_1 in self.bounds:
a_1, b_1 = self.bounds[dist_1]
else:
a_1, b_1 = calculate_bounds_of_probability_distribution(dist_1)
self.bounds[dist_1] = a_1, b_1
if dist_2 in self.bounds:
a_2, b_2 = self.bounds[dist_2]
else:
a_2, b_2 = calculate_bounds_of_probability_distribution(dist_2)
self.bounds[dist_2] = a_2, b_2
if (type(dist_1.dist), type(dist_2.dist)) == (uniform_gen, uniform_gen):
return self.function_convolution_uniform((a_1, b_1), (a_2, b_2))
convolution_bounds_a, convolution_bounds_b = min(a_1, a_2), max(b_1, b_2)
delta = fabs(convolution_bounds_a - convolution_bounds_b) / bins
convolution_interval = TimeInterval(convolution_bounds_a, convolution_bounds_b, bins)
x = [dist_1.pdf(t) for t in convolution_interval]
y = [dist_2.pdf(t) for t in reversed(convolution_interval)]
c = convolve(x, y)
dictionary_convolution = {}
for t in xrange(len(c)):
dictionary_convolution[delta * t] = c[t]
bias = calculateCenterMass(dictionary_convolution)[0] + dist_2.mean() - dist_1.mean()
dictionary_convolution_biased = {}
for t in dictionary_convolution:
dictionary_convolution_biased[t - bias] = dictionary_convolution[t]
convolution_function = FunctionPiecewiseLinear(dictionary_convolution_biased, FunctionHorizontalLinear(0))
return convolution_function.normalised()
def calculate_similarity(self, dist_1, dist_2):
if (type(dist_1.dist), type(dist_2.dist)) == (uniform_gen, uniform_gen):
length_dist_1 = self.duration_of(dist_1)
length_dist_2 = self.duration_of(dist_2)
return min(length_dist_1, length_dist_2) / sqrt(length_dist_1 * length_dist_2)
dist_1_mean, dist_2_mean = dist_1.mean(), dist_2.mean()
dist_1_transformed = lambda t: dist_1.pdf(t + dist_1_mean)
dist_2_transformed = lambda t: dist_2.pdf(t + dist_2_mean)
geometric_mean = lambda t: sqrt(dist_1_transformed(t) * dist_2_transformed(t))
return integral(geometric_mean, NEGATIVE_INFINITY, POSITIVE_INFINITY)
def compare(self, dist_1, dist_2):
convolution = self.function_convolution(dist_1, dist_2)
before = integral(convolution, NEGATIVE_INFINITY, 0)
after = integral(convolution, 0, POSITIVE_INFINITY)
similarity = self.calculate_similarity(dist_1, dist_2)
correlation = 1 - fabs(before - after)
same = similarity * correlation
return before, same, after
class RelationFormulaGeometricMean(BaseRelationFormula):
def compare(self, dist_1, dist_2):
dist_1_interval = TimeInterval(*self.bounds_of(dist_1))
dist_2_interval = TimeInterval(*self.bounds_of(dist_2))
dictionary_input_output = {}
for time_step in dist_1_interval + dist_2_interval:
dictionary_input_output[time_step] = sqrt(dist_1.pdf(time_step) * dist_2.pdf(time_step))
geometric_mean = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
same = integral(geometric_mean, NEGATIVE_INFINITY, POSITIVE_INFINITY)
dist_1_mean, dist_1_skewness, dist_1_kurtosis = dist_1.stats(moments='msk')
dist_1_standard_deviation = dist_1.std()
dist_2_mean, dist_2_skewness, dist_2_kurtosis = dist_2.stats(moments='msk')
dist_2_standard_deviation = dist_2.std()
distance = fabs(dist_1_standard_deviation - dist_2_standard_deviation) + fabs(dist_1_skewness - dist_2_skewness)
distance += fabs(dist_1_kurtosis - dist_2_kurtosis)
delta = dist_1_mean - dist_2_mean
non_same_portion = 1.0 - same
portion_after, portion_before = 1.0, 0.0
if almost_equals(distance, 0):
if delta < 0:
portion_after, portion_before = 0.0, 1.0
else:
dist_1_standardized_pdf = lambda x: dist_1.pdf(dist_1_standard_deviation * x + dist_1_mean)
dist_2_standardized_pdf = lambda x: dist_2.pdf(dist_2_standard_deviation * x + dist_2_mean)
geometric_mean = lambda t: sqrt(dist_1_standardized_pdf(t) * dist_2_standardized_pdf(t))
geometric_mean_scaled = lambda p: geometric_mean(p / distance)
geometric_mean_scaled_length = max(self.duration_of(dist_1), self.duration_of(dist_2))
dictionary_input_output = {}
for time_step in TimeInterval(-geometric_mean_scaled_length / 2.0, geometric_mean_scaled_length / 2.0):
dictionary_input_output[time_step] = geometric_mean_scaled(time_step)
geometric_mean_scaled = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=FUNCTION_ZERO)
portion_after = integral(geometric_mean_scaled, NEGATIVE_INFINITY, delta)
portion_before = integral(geometric_mean_scaled, delta, POSITIVE_INFINITY)
after = portion_after / (portion_after + portion_before) * non_same_portion
return 1.0 - same - after, same, after
if __name__ == '__main__':
import matplotlib.pyplot as plt
from scipy.stats import norm, uniform, expon
from spatiotemporal.temporal_events import TemporalEvent, TemporalEventPiecewiseLinear
import matplotlib.pyplot as plt
figure_number = 1
for event_1, event_2 in [
(
TemporalEvent(uniform(loc=3, scale=2), uniform(loc=7, scale=9)),
TemporalEvent(uniform(loc=0, scale=10), uniform(loc=13, scale=2))
),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=3, scale=2)),
# TemporalEvent(uniform(loc=3, scale=2), uniform(loc=6, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=8, scale=5), uniform(loc=15, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=3, scale=2), uniform(loc=13, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=7), uniform(loc=8, scale=7)),
# TemporalEvent(uniform(loc=4, scale=1), uniform(loc=11, scale=2)),
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4)),
# TemporalEvent(uniform(loc=0, scale=11), uniform(loc=13, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=8), uniform(loc=6, scale=8)),
# TemporalEvent(uniform(loc=0, scale=22), uniform(loc=13, scale=8))
# ),
#
# (
# TemporalEvent(uniform(loc=2, scale=2), uniform(loc=7, scale=2)),
# TemporalEvent(uniform(loc=1, scale=4), uniform(loc=6, scale=4))
# ),
#
# (
# TemporalEvent(uniform(loc=1, scale=2), uniform(loc=4, scale=2)),
# TemporalEvent(uniform(loc=6, scale=2), uniform(loc=9, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=3), uniform(loc=15, scale=2)),
# TemporalEvent(uniform(loc=5, scale=2), uniform(loc=9, scale=3))
# ),
#
# (
# TemporalEvent(uniform(loc=5, scale=3), uniform(loc=9, scale=2)),
# TemporalEvent(uniform(loc=1, scale=2), uniform(loc=15, scale=3))
# ),
#
# (
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=10, scale=2)),
# TemporalEvent(uniform(loc=15, scale=2), uniform(loc=25, scale=2))
# ),
#
# (
# TemporalEvent(uniform(loc=15, scale=2), uniform(loc=25, scale=2)),
# TemporalEvent(uniform(loc=0, scale=2), uniform(loc=10, scale=2))
# ),
#
# (
# TemporalEvent(norm(loc=1, scale=4.5), expon(loc=30, scale=2)),
# TemporalEvent(norm(loc=25, scale=4.5), expon(loc=60, scale=2))
# ),
#
# (
# TemporalEvent(expon(loc=1, scale=4.5), norm(loc=30, scale=2)),
# TemporalEvent(expon(loc=25, scale=4.5), norm(loc=60, scale=2))
# ),
#
# (
# TemporalEventPiecewiseLinear({1: 0, 2: 0.1, 3: 0.3, 4: 0.7, 5: 1}, {6: 1, 7: 0.9, 8: 0.6, 9: 0.1, 10: 0}),
# TemporalEventPiecewiseLinear({7.5: 0, 8.5: 0.1, 9.5: 0.3, 10.5: 0.7, 11.5: 1},
# {13: 1, 14.5: 0.9, 15.3: 0.6, 17: 0.1, 20: 0})
# ),
]:
temporal_relations = event_1 * event_2
print '\nFigure' + str(figure_number)
print '----------------------'
print sum(temporal_relations.values())
for p in 'pmoFDseSdfOMP':
print p, temporal_relations[p]
figure_number += 1
event_1.plot(show_distributions=True).ylim(ymin=-0.1, ymax=1.1)
event_2.plot(show_distributions=True).figure()
plt.show()
| agpl-3.0 |
rs2/pandas | pandas/tests/series/apply/test_series_apply.py | 1 | 29438 | from collections import Counter, defaultdict
from itertools import chain
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, isna
import pandas._testing as tm
from pandas.core.base import SpecificationError
class TestSeriesApply:
def test_apply(self, datetime_series):
with np.errstate(all="ignore"):
tm.assert_series_equal(
datetime_series.apply(np.sqrt), np.sqrt(datetime_series)
)
# element-wise apply
import math
tm.assert_series_equal(
datetime_series.apply(math.exp), np.exp(datetime_series)
)
# empty series
s = Series(dtype=object, name="foo", index=pd.Index([], name="bar"))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3], dtype=np.float64)
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
def f(x):
return (x, x + 1)
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
def f(x):
return x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self, datetime_series):
for arg in ["sum", "mean", "min", "max", "std"]:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
def test_apply_args(self):
s = Series(["foo,bar"])
result = s.apply(str.split, args=(",",))
assert result[0] == ["foo", "bar"]
assert isinstance(result[0], list)
def test_series_map_box_timestamps(self):
# GH#2689, GH#2627
ser = Series(pd.date_range("1/1/2000", periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
ser.map(func)
ser.apply(func)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = pd.Series(values, name="XX")
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = pd.Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = pd.DataFrame(
np.random.randn(10, 3),
columns=["A", "B", "C"],
index=pd.date_range("1/1/2000", periods=10),
)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
tsdf.A.agg({"foo": ["sum", "mean"]})
def test_apply_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
ser = pd.Series(values, name="XX", index=list("abcdefg"))
result = ser.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
exp = pd.Series(values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = ser.apply(lambda x: "A")
exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
@pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]])
def test_apply_categorical_with_nan_values(self, series):
# GH 20714 bug fixed in: GH 24275
s = pd.Series(series, dtype="category")
result = s.apply(lambda x: x.split("-")[0])
result = result.astype(object)
expected = pd.Series(["1", "1", np.NaN], dtype="category")
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
def test_apply_empty_integer_series_with_datetime_index(self):
# GH 21245
s = pd.Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int)
result = s.apply(lambda x: x)
tm.assert_series_equal(result, s)
class TestSeriesAggregate:
def test_transform(self, string_series):
# transforming functions
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
# ufunc
result = string_series.apply(np.sqrt)
expected = f_sqrt.copy()
tm.assert_series_equal(result, expected)
# list-like
result = string_series.apply([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ["sqrt"]
tm.assert_frame_equal(result, expected)
result = string_series.apply(["sqrt"])
tm.assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["sqrt", "absolute"]
result = string_series.apply([np.sqrt, np.abs])
tm.assert_frame_equal(result, expected)
# dict, provide renaming
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ["foo", "bar"]
expected = expected.unstack().rename("series")
result = string_series.apply({"foo": np.sqrt, "bar": np.abs})
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self, string_series):
# we are trying to transform with an aggregator
msg = "cannot combine transform and aggregation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg(["sqrt", "max"])
msg = "cannot perform both aggregation and transformation"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg({"foo": np.sqrt, "bar": "sum"})
def test_demo(self):
# demonstration tests
s = Series(range(6), dtype="int64", name="series")
result = s.agg(["min", "max"])
expected = Series([0, 5], index=["min", "max"], name="series")
tm.assert_series_equal(result, expected)
result = s.agg({"foo": "min"})
expected = Series([0], index=["foo"], name="series")
tm.assert_series_equal(result, expected)
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"]})
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype="int64", name="series")
# nested renaming
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg({"foo": ["min", "max"], "bar": ["sum", "mean"]})
def test_agg_apply_evaluate_lambdas_the_same(self, string_series):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = string_series.apply(lambda x: str(x))
expected = string_series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = string_series.apply(str)
expected = string_series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self, datetime_series):
# GH 2316
# .agg with a reducer and a transform, what to do
result = datetime_series.apply(
lambda x: Series([x, x ** 2], index=["x", "x^2"])
)
expected = DataFrame({"x": datetime_series, "x^2": datetime_series ** 2})
tm.assert_frame_equal(result, expected)
result = datetime_series.agg(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self, string_series):
# this also tests a result set that is all scalars
expected = string_series.describe()
result = string_series.apply(
{
"count": "count",
"mean": "mean",
"std": "std",
"min": "min",
"25%": lambda x: x.quantile(0.25),
"50%": "median",
"75%": lambda x: x.quantile(0.75),
"max": "max",
}
)
tm.assert_series_equal(result, expected)
def test_reduce(self, string_series):
# reductions with named functions
result = string_series.agg(["sum", "mean"])
expected = Series(
[string_series.sum(), string_series.mean()],
["sum", "mean"],
name=string_series.name,
)
tm.assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
# test agg using non-callable series attributes
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = s.agg("size")
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = s.agg(["size", "count", "mean"])
expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})
tm.assert_series_equal(result[expected.index], expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("sum", 0),
("max", np.nan),
("min", np.nan),
("all", True),
("any", False),
("mean", np.nan),
("prod", 1),
("std", np.nan),
("var", np.nan),
("median", np.nan),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("sum", 6),
("max", 3),
("min", 1),
("all", True),
("any", True),
("mean", 2),
("prod", 6),
("std", 1),
("var", 1),
("median", 2),
],
),
tm.get_cython_table_params(
Series("a b c".split()),
[
("sum", "abc"),
("max", "c"),
("min", "a"),
("all", "c"), # see GH12863
("any", "a"),
],
),
),
)
def test_agg_cython_table(self, series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if tm.is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series(dtype=np.float64),
[
("cumprod", Series([], Index([]), dtype=np.float64)),
("cumsum", Series([], Index([]), dtype=np.float64)),
],
),
tm.get_cython_table_params(
Series([np.nan, 1, 2, 3]),
[
("cumprod", Series([np.nan, 1, 2, 6])),
("cumsum", Series([np.nan, 1, 3, 6])),
],
),
tm.get_cython_table_params(
Series("a b c".split()), [("cumsum", Series(["a", "ab", "abc"]))]
),
),
)
def test_agg_cython_table_transform(self, series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series("a b c".split()),
[
("mean", TypeError), # mean raises TypeError
("prod", TypeError),
("std", TypeError),
("var", TypeError),
("median", TypeError),
("cumprod", TypeError),
],
)
),
)
def test_agg_cython_table_raises(self, series, func, expected):
# GH21224
with pytest.raises(expected):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
def test_series_apply_no_suffix_index(self):
# GH36189
s = pd.Series([4] * 3)
result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = pd.Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
tm.assert_series_equal(result, expected)
class TestSeriesMap:
def test_map(self, datetime_series):
index, data = tm.getMixedTypeDict()
source = Series(data["B"], index=data["C"])
target = Series(data["C"][:4], index=data["D"][:4])
merged = target.map(source)
for k, v in merged.items():
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in merged.items():
assert v == source[target[k]]
# function
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(["b", "c", "d", "e"]))
c = Series([1, 2, 3, 4], index=Index(["b", "c", "d", "e"]))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series(
["B", "C", "D", "E"],
dtype="category",
index=pd.CategoricalIndex(["b", "c", "d", "e"]),
)
c = Series(["B", "C", "D", "E"], index=Index(["b", "c", "d", "e"]))
exp = Series(
pd.Categorical([np.nan, "B", "C", "D"], categories=["B", "C", "D", "E"])
)
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, "B", "C", "D"])
tm.assert_series_equal(a.map(c), exp)
def test_map_empty(self, index):
if isinstance(index, MultiIndex):
pytest.skip("Initializing a Series from a MultiIndex is not supported")
s = Series(index)
result = s.map({})
expected = pd.Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: "foo", False: "bar"})
expected = Series(["foo", "foo", "bar"], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_int(self):
left = Series({"a": 1.0, "b": 2.0, "c": 3.0, "d": 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged["d"])
assert not isna(merged["c"])
def test_map_type_inference(self):
s = Series(range(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self, string_series):
from decimal import Decimal
result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action="ignore")
exp = s * 2
tm.assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
# GH 18496
df = pd.DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"}
df["labels"] = df["a"].map(label_mappings)
df["expected_labels"] = pd.Series(["A", "B", "A", "B"], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df["labels"], df["expected_labels"], check_names=False)
def test_map_counter(self):
s = Series(["a", "b", "c"], index=[1, 2, 3])
counter = Counter()
counter["b"] = 5
counter["c"] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=["a", "b", "c"])
default_dict = defaultdict(lambda: "blank")
default_dict[1] = "stuff"
result = s.map(default_dict)
expected = Series(["stuff", "blank", "blank"], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
def test_map_dict_na_key(self):
# https://github.com/pandas-dev/pandas/issues/17648
# Checks that np.nan key is appropriately mapped
s = Series([1, 2, np.nan])
expected = Series(["a", "b", "c"])
result = s.map({1: "a", 2: "b", np.nan: "c"})
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: "three"})
result = s.map(dictionary)
expected = Series(["missing", "missing", "three"])
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: "three"})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
s = Series([1, 2, 3])
not_a_dictionary = non_dict_mapping_subclass({3: "three"})
result = s.map(not_a_dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping_with_missing(self, non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
class NonDictMappingWithMissing(non_dict_mapping_subclass):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
not_a_dictionary = NonDictMappingWithMissing({3: "three"})
result = s.map(not_a_dictionary)
# __missing__ is a dict concept, not a Mapping concept,
# so it should not change the result!
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = pd.Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = pd.Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = pd.Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = pd.Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = pd.Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = pd.Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = pd.Series(values, name="XX", index=list("abcdefg"))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(
list("abbabcd"), categories=list("dcba"), ordered=True
)
exp = pd.Series(exp_values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: "A")
exp = pd.Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action="ignore")
def test_map_datetimetz(self):
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = pd.Series(values, name="XX")
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = pd.Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action="ignore")
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"vals,mapping,exp",
[
(list("abc"), {np.nan: "not NaN"}, [np.nan] * 3 + ["not NaN"]),
(list("abc"), {"a": "a letter"}, ["a letter"] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3),
],
)
def test_map_missing_mixed(self, vals, mapping, exp):
# GH20495
s = pd.Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, pd.Series(exp))
@pytest.mark.parametrize(
"dti,exp",
[
(
Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),
DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),
),
(
tm.makeTimeSeries(nper=30),
DataFrame(np.repeat([[1, 2]], 30, axis=0), dtype="int64"),
),
],
)
def test_apply_series_on_date_time_index_aware_series(self, dti, exp):
# GH 25959
# Calling apply on a localized time series should not cause an error
index = dti.tz_localize("UTC").index
result = pd.Series(index).apply(lambda x: pd.Series([1, 2]))
tm.assert_frame_equal(result, exp)
def test_apply_scaler_on_date_time_index_aware_series(self):
# GH 25959
# Calling apply on a localized time series should not cause an error
series = tm.makeTimeSeries(nper=30).tz_localize("UTC")
result = pd.Series(series.index).apply(lambda x: 1)
tm.assert_series_equal(result, pd.Series(np.ones(30), dtype="int64"))
def test_map_float_to_string_precision(self):
# GH 13228
ser = pd.Series(1 / 3)
result = ser.map(lambda val: str(val)).to_dict()
expected = {0: "0.3333333333333333"}
assert result == expected
def test_map_with_invalid_na_action_raises(self):
# https://github.com/pandas-dev/pandas/issues/32815
s = pd.Series([1, 2, 3])
msg = "na_action must either be 'ignore' or None"
with pytest.raises(ValueError, match=msg):
s.map(lambda x: x, na_action="____")
def test_apply_to_timedelta(self):
list_of_valid_strings = ["00:00:01", "00:00:02"]
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# FIXME: dont leave commented-out
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
| bsd-3-clause |
CompPhysics/ComputationalPhysics | doc/Programs/LecturePrograms/programs/StatPhys/python/L2ising.py | 2 | 2185 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import math, sys
def periodic (i, limit, add):
return (i+limit+add) % limit
def monteCarlo(Energies, temp, NSpins, MCcycles):
#Setup spin matrix, initialize to ground state
spin_matrix = np.zeros( (NSpins,NSpins), np.int8) + 1
E = M = 0.0
#Setup array for possible energy changes
w = np.zeros(17,np.float64)
for de in range(-8,9,4): #include +8
w[de+8] = math.exp(-de/temp)
#Calculate initial magnetization:
M = spin_matrix.sum()
#Calculate initial energy
for j in range(NSpins):
for i in range(NSpins):
E -= spin_matrix.item(i,j)*\
(spin_matrix.item(periodic(i,NSpins,-1),j) + spin_matrix.item(i,periodic(j,NSpins,1)))
#Start metropolis MonteCarlo computation
for i in range(MCcycles):
#Metropolis
#Loop over all spins, pick a random spin each time
for s in range(NSpins**2):
x = int(np.random.random()*NSpins)
y = int(np.random.random()*NSpins)
deltaE = 2*spin_matrix.item(x,y)*\
(spin_matrix.item(periodic(x,NSpins,-1), y) +\
spin_matrix.item(periodic(x,NSpins,1), y) +\
spin_matrix.item(x, periodic(y,NSpins,-1)) +\
spin_matrix.item(x, periodic(y,NSpins,1)))
if np.random.random() <= w[deltaE+8]:
#Accept!
spin_matrix[x,y] *= -1
E += deltaE
#Update expectation values
Energies[i] += E
# Main program
# Define number of spins
NSpins = 2
# Define number of Monte Carlo cycles
MCcycles = 1000
# temperature steps, initial temperature, final temperature
Temp = 1.0
# Declare arrays that hold averages
Energies = np.zeros(MCcycles)
# Obtain the energies to construct the diagram
monteCarlo(Energies,Temp,NSpins,MCcycles)
n, bins, patches = plt.hist(Energies, 100, facecolor='red')
plt.xlabel('$E$')
plt.ylabel('Energy distribution P(E)')
plt.title(r'Energy distribution at $k_BT=2.5$')
plt.axis([-9, 9, 0, 1000])
plt.grid(True)
plt.show()
| cc0-1.0 |
wlamond/scikit-learn | benchmarks/bench_lasso.py | 111 | 3364 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import matplotlib.pyplot as plt
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
plt.figure('scikit-learn LASSO benchmark results')
plt.subplot(211)
plt.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
plt.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
plt.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features,
alpha))
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
plt.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
plt.subplot(212)
plt.plot(list_n_features, lasso_results, 'b-', label='Lasso')
plt.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
plt.title('%d samples, alpha=%s' % (n_samples, alpha))
plt.legend(loc='upper left')
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jvbalen/catchy | feature_transforms.py | 1 | 7757 | from __future__ import division, print_function
import numpy as np
import pandas as pd
import scipy.stats as stats
import sklearn.neighbors as nn
import utils
data_dir = ''
def compute(segment_dict, features):
"""
Args:
segment_dict (dict): dictionary of song segments, containing a list of
segment ids (values) for a set of unique song identifiers (keys).
"""
data_dict = {}
# compute features
for feature in features:
print('computing ' + feature + '...')
feature_name, first_order_aggregates, second_order_aggregates = parse_feature(feature)
corpus_features = []
for song_id in segment_dict.keys():
song_features = []
for segment in segment_dict[song_id]:
raw_features = utils.read_feature([data_dir, feature_name, segment], skip_cols='auto')
segment_features = first_order(raw_features, first_order_aggregates, verbose=False)
song_features.append(segment_features)
if 'song' in second_order_aggregates:
song_features = second_order(song_features, second_order_aggregates, verbose=False)
corpus_features.extend(song_features)
if 'corpus' in second_order_aggregates:
# print(' in: len(corpus_features) = {}, corpus_features[0] = {}'.format(len(corpus_features), corpus_features[0]))
corpus_features = second_order(corpus_features, second_order_aggregates, verbose=False)
# print(' out: len(corpus_features) = {}, corpus_features[0] = {}'.format(len(corpus_features), corpus_features[0]))
data_dict[feature] = np.squeeze(corpus_features)
# add segment ids
song_ids = []
segments = []
for song_id in segment_dict.keys():
for segment in segment_dict[song_id]:
song_ids.append(song_id)
segments.append(segment)
data_dict['song.id'] = np.array(song_ids)
data_dict['segment.id'] = np.array(segments)
# convert to dataframe
return pd.DataFrame(data_dict)
# FEATURE COMPUTATION
def parse_feature(feature):
""" Parse feature string into
(feature name, [1st order aggregates], [2nd order aggregates]).
'Grammar':
- feature name and aggregates are separated by dots, e.g. 'mfcc.entropy'
- feature name is first and contains no dots
- first order and second order aggregates are separated by one of 2 keywords:
'corpus' or 'song'
Ex.:
>>> parse_features('loudness.mean.song.pdf.log')
('loudness', ['mean'], ['song', 'pdf', 'log'])
"""
s = np.array(feature.split('.'))
split_points = (s == 'corpus') | (s == 'song')
split_points = np.nonzero(split_points)[0] if any(split_points) else [len(s)]
return s[0], s[1:split_points[0]].tolist(), s[split_points[-1]:].tolist()
def first_order(feature, aggregates, verbose=False):
if not type(aggregates) is list:
aggregates = [aggregates]
for aggregate in aggregates:
if verbose:
print(' first order computation: ' + aggregate)
if aggregate == 'log':
feature = np.log(feature)
elif aggregate == 'sqrt':
feature = np.sqrt(feature)
elif aggregate == 'minlog':
feature = np.log(1 - feature)
elif aggregate == 'minsqrt':
feature = np.sqrt(1 - feature)
elif aggregate == 'mean':
# feature = np.mean(feature, axis=0)
feature = np.nanmean(feature, axis=0)
elif aggregate == 'var':
feature = np.var(feature, axis=0)
elif aggregate == 'std':
# feature = np.std(feature, axis=0)
feature = np.nanstd(feature, axis=0)
elif aggregate == 'stdmean':
feature = np.hstack([np.mean(feature, axis=0), np.std(feature, axis=0)])
elif aggregate == 'cov':
feature = np.flatten(np.cov(feature, axis=0))
elif aggregate == 'totvar':
feature = np.array([np.mean(np.var(feature, axis=0))])
elif aggregate == 'totstd':
feature = np.array([np.mean(np.std(feature, axis=0))])
elif aggregate == 'entropy':
feature = feature.flatten()
feature = np.array([stats.entropy(feature)])
elif aggregate == 'normentropy':
feature = feature.flatten()
feature = np.array([stats.entropy(feature) / np.log(feature.size)])
elif aggregate == 'information':
feature = - np.log(feature)
return feature
def second_order(features, aggregates, verbose=False):
if not type(aggregates) is list:
aggregates = [aggregates]
features = np.asarray(features)
for aggregate in aggregates:
if verbose and not (aggregate == 'song' or aggregate == 'corpus'):
print(' second order computation ({}): {}'.format(aggregates[0], aggregate))
if aggregate == 'log':
features = np.log(features)
elif aggregate == 'sqrt':
features = np.sqrt(features)
elif aggregate == 'square':
features = np.array(features)**2
elif aggregate == 'minlog':
features = np.log(1 - np.array(features))
elif aggregate == 'minsqrt':
features = np.sqrt(1 - np.array(features))
elif aggregate == 'logit':
features = np.log(np.array(features)) - np.log(1 - np.array(features))
elif aggregate == 'kld':
m = np.sum(features, axis=0)
m /= np.sum(m)
features = [stats.entropy(f.flatten(), m.flatten()) for f in features]
elif aggregate == 'tau':
m = np.sum(features, axis=0)
m /= np.sum(m)
features = [stats.kendalltau(f.flatten(), m.flatten())[0] for f in features]
elif aggregate == 'dot':
m = np.sum(features, axis=0)
features = [np.dot(f.flatten(), m.flatten()) for f in features]
elif aggregate == 'corr':
m = np.sum(features, axis=0)
features = [np.correlate(f.flatten(), m.flatten()) for f in features]
elif aggregate == 'crossentropy' or aggregate == 'information':
m = np.sum(features, axis=0)
m = m.flatten()/np.sum(m)
features = [-np.nansum(np.log(m) * f.flatten()/np.sum(f)) for f in features]
elif aggregate == 'pdf':
n, d = features.shape
finite_rows = np.all(np.isfinite(features), axis=1)
features = features[finite_rows]
s = np.std(features)
bw_factor = n**(-1./(5))*s if d == 1 and s > 0.0 else 1.0
kde = nn.KernelDensity(bandwidth=bw_factor)
kde.fit(features)
scores = kde.score_samples(features)
features = np.zeros((n,))
features[finite_rows] = np.exp(scores)
elif aggregate == 'indeppdf':
# above for independent dimensions: fit each dim and add log scores
kde = nn.KernelDensity(bandwidth=1.0)
scores = np.zeros(len(features))
for feat_dim in features.T:
feat_dim = feat_dim.reshape([-1, 1])
kde.fit(feat_dim)
scores += kde.score_samples(feat_dim)
features = np.exp(scores)
elif aggregate == 'cdf':
f0 = np.min(features)
kde = stats.gaussian_kde(features)
features = [kde.integrate_box(f0, f) for f in features]
elif aggregate == 'rank':
features = (np.argsort(np.argsort(features)) + 0.5) * (1.0 / len(features))
# features = [np.squeeze(f) for f in features]
return features
| mit |
perryjohnson/biplaneblade | lib/poly_utils.py | 1 | 3838 | """Preprocessing tools to cut polygons and write their new coordinates.
Author: Perry Roth-Johnson
Last modified: March 14, 2014
"""
import matplotlib.pyplot as plt
from shapely.geometry import Polygon, Point, LineString
from descartes import PolygonPatch
# cut up the layer polygons to prepare for grid generation
def cut_polygon(original, bounding, ext_label, area_threshold=1.0e-08):
"""Cut the original layer polygon with the bounding polygon."""
try:
p_new = original.intersection(bounding)
except:
raise Warning("The original polygon does not intersect the bounding polygon!")
# check if extra negligibly small polygons were created
if p_new.geom_type != 'Polygon':
fmt = " In '{0}' region, found a non-Polygon made of {1} polygons. "
print fmt.format(ext_label,len(p_new.geoms))
print ' -> area threshold set to', area_threshold
good_poly_index = None
num_good_polys_found = 0
for i,p in enumerate(p_new.geoms):
fmt2 = ' polygon[{0}]: area={1:5.3e}, centroid=({2:.4},{3:.4})'
print fmt2.format(i,p.area,p.centroid.xy[0][0],p.centroid.xy[1][0])
if p.area > area_threshold:
# only keep polygons with significant area
good_poly_index = i
num_good_polys_found += 1
print ' ...keep polygon[{0}]!'.format(i)
else:
# throw out any polygon with insignificant area
print ' ...throw out polygon[{0}]'.format(i)
# overwrite p_new with the good polygon
if num_good_polys_found > 1:
raise Warning("More than 1 good polygon found. Check bounding polygon coords!")
try:
p_new = p_new.geoms[good_poly_index]
except TypeError:
raise Warning("The original polygon does not intersect the bounding polygon!")
return p_new
def plot_polygon(p, face_color, edge_color='r'):
"""Plot a polygon on the current axes."""
# get the current axes, so we can add polygons to the plot
ax = plt.gcf().gca()
patch = PolygonPatch(p, fc=face_color, ec=edge_color, alpha=0.8)
ax.add_patch(patch)
def cut_plot_and_write_alt_layer(part, material, ext_label, b_polygon,
area_threshold=1.0e-08, airfoil=None):
"""Cut, plot, and write a polygon for an alternate layer.
Parameters
----------
part : object, the structural part containing the material layer to be cut
material : str, the name of the material layer to be cut
ext_label : str, extension label to name the new layer created from the cut
b_polygon : shapely.polygon object, the bounding polygon
area_threshold : float, polygons with areas smaller than this threshold
will be thrown out
airfoil : str, set to either 'lower' or 'upper', to designate which airfoil
should be cut in a biplane station (default=None for monoplane
stations)
"""
l = part.layer[material]
# cut polygon
p_new = cut_polygon(l.polygon, b_polygon, ext_label, area_threshold)
# plot polygon
plot_polygon(p_new, l.face_color)
# save alt layer as an attribute of this part
new_layer_name = material + ', ' + ext_label
l.parent_part.add_new_layer(new_layer_name, p_new, material)
new_layer = part.alt_layer[new_layer_name]
# write polygon exterior to text file in `station_path`
new_layer.write_polygon_edges(airfoil=airfoil)
# find and plot the corners of the new layer
new_layer.find_corners(b_polygon)
plot_corners(new_layer.corners)
def plot_corners(list_of_corners):
ax = plt.gca()
for corner in list_of_corners:
ax.scatter(corner.x, corner.y, s=40, alpha=0.8, zorder=100)
| gpl-3.0 |
alexchao56/sklearn-theano | sklearn_theano/feature_extraction/caffe/caffemodel.py | 2 | 15223 | """Makes .caffemodel files readable for sklearn-theano"""
from __future__ import print_function
import os
import numpy as np
from collections import OrderedDict
import theano
import theano.tensor as T
from ...datasets import get_dataset_dir, download
def _get_caffe_dir():
"""Function to find caffe installation. First checks for pycaffe. If not
present, checks for $CAFFE_DIR environment variable."""
try:
import caffe
from os.path import dirname
caffe_dir = dirname(dirname(dirname(caffe.__file__)))
except ImportError:
caffe_dir = os.environ.get("CAFFE_DIR", None)
return caffe_dir
def _compile_caffe_protobuf(caffe_proto=None,
proto_src_dir=None,
python_out_dir=None):
"""Compiles protocol buffer to python_out_dir"""
if caffe_proto is None:
caffe_dir = _get_caffe_dir()
if caffe_dir is None:
# No CAFFE_DIR found, neither could pycaffe be imported.
# Search for caffe.proto locally
caffe_dataset_dir = get_dataset_dir('caffe')
caffe_proto = os.path.join(caffe_dataset_dir, 'caffe.proto')
if os.path.exists(caffe_proto):
# Found caffe.proto, everything fine
pass
else:
print("Downloading caffe.proto")
url = ('https://raw.githubusercontent.com/'
'BVLC/caffe/master/src/caffe/proto/caffe.proto')
download(url, caffe_proto, progress_update_percentage=1)
# raise ValueError("Cannot find $CAFFE_DIR environment variable"
# " specifying location of Caffe files."
# " Nor does there seem to be pycaffe. Please"
# " provide path to caffe.proto file in the"
# " caffe_proto kwarg, e.g. "
# "/home/user/caffe/src/caffe/proto/caffe.proto")
else:
caffe_proto = os.path.join(caffe_dir, "src", "caffe", "proto",
"caffe.proto")
if not os.path.exists(caffe_proto):
raise ValueError(
("Could not find {pf}. Please specify the correct"
" caffe.proto file in the caffe_proto kwarg"
" e.g. /home/user/caffe/src/caffe/proto/caffe.proto").format(
pf=caffe_proto))
if proto_src_dir is None:
proto_src_dir = os.path.dirname(caffe_proto)
if python_out_dir is None:
python_out_dir = os.path.dirname(os.path.abspath(__file__))
protoc_command = ("protoc -I={srcdir}"
" --python_out={outdir} {protofile}").format(
srcdir=proto_src_dir, outdir=python_out_dir, protofile=caffe_proto)
import commands
status, output = commands.getstatusoutput(protoc_command)
if status != 0:
raise Exception(
"Error executing protoc: code {c}, message {m}".format(
c=status, m=output))
def _get_caffe_pb2():
from ...models.bvlc_googlenet import caffe_pb2
return caffe_pb2
def _open_caffe_model(caffemodel_file):
"""Opens binary format .caffemodel files. Returns protobuf object."""
caffe_pb2 = _get_caffe_pb2()
try:
open(caffemodel_file, 'r', encoding="latin1").close()
f = open(caffemodel_file, 'r', encoding="latin1")
except TypeError:
# Python 2 does not have encoding arg
f = open(caffemodel_file, 'rb')
binary_content = f.read()
from IPython import embed; embed()
raise ValueError()
protobuf = caffe_pb2.NetParameter()
protobuf.ParseFromString(binary_content)
return protobuf
def _blob_to_ndarray(blob):
"""Converts a caffe protobuf blob into an ndarray"""
dimnames = ["num", "channels", "height", "width"]
data = np.array(blob.data)
shape = tuple([getattr(blob, dimname) for dimname in dimnames])
return data.reshape(shape)
LAYER_PROPERTIES = dict(
DATA=None,
CONVOLUTION=('blobs',
('convolution_param', 'stride'),
('convolution_param', 'stride_h'),
('convolution_param', 'stride_w'),
('convolution_param', 'pad'),
('convolution_param', 'pad_h'),
('convolution_param', 'pad_w')),
RELU=None,
POOLING=(('pooling_param', 'kernel_size'),
('pooling_param', 'kernel_h'),
('pooling_param', 'kernel_w'),
('pooling_param', 'stride'),
('pooling_param', 'stride_h'),
('pooling_param', 'stride_w'),
('pooling_param', 'pad'),
('pooling_param', 'pad_h'),
('pooling_param', 'pad_w'),
('pooling_param', 'pool')
),
SPLIT=None,
LRN=(('lrn_param', 'local_size'),
('lrn_param', 'alpha'),
('lrn_param', 'beta'),
('lrn_param', 'norm_region')),
CONCAT=(('concat_param', 'concat_dim'),),
INNER_PRODUCT=('blobs',),
SOFTMAX_LOSS=None,
DROPOUT=None
)
def _get_property(obj, property_path):
if isinstance(property_path, tuple):
if len(property_path) == 1:
return getattr(obj, property_path[0])
else:
return _get_property(getattr(obj, property_path[0]),
property_path[1:])
else:
return getattr(obj, property_path)
def _parse_caffe_model(caffe_model):
caffe_pb2 = _get_caffe_pb2() # need to remove this dependence on pb here
try:
_layer_types = caffe_pb2.LayerParameter.LayerType.items()
except AttributeError:
_layer_types = caffe_pb2.V1LayerParameter.LayerType.items()
# create a dictionary that indexes both ways, number->name, name->number
layer_types = dict(_layer_types)
for v, k in _layer_types:
layer_types[k] = v
if not hasattr(caffe_model, "layers"):
# Consider it a filename
caffe_model = _open_caffe_model(caffe_model)
layers_raw = caffe_model.layers
parsed = []
for layer in layers_raw:
# standard properties
ltype = layer_types[layer.type]
layer_descriptor = dict(type=ltype,
name=layer.name,
top_blobs=tuple(layer.top),
bottom_blobs=tuple(layer.bottom))
parsed.append(layer_descriptor)
# specific properties
specifics = LAYER_PROPERTIES[ltype]
if specifics is None:
continue
for param in specifics:
if param == 'blobs':
layer_descriptor['blobs'] = map(_blob_to_ndarray,
layer.blobs)
else:
param_name = '__'.join(param)
param_value = _get_property(layer, param)
layer_descriptor[param_name] = param_value
return parsed
from sklearn_theano.base import (Convolution, Relu, MaxPool, FancyMaxPool,
LRN, Feedforward, ZeroPad,
CaffePool)
def parse_caffe_model(caffe_model, float_dtype='float32', verbose=0):
if isinstance(caffe_model, str) or not isinstance(caffe_model, list):
parsed_caffe_model = _parse_caffe_model(caffe_model)
else:
parsed_caffe_model = caffe_model
layers = OrderedDict()
inputs = OrderedDict()
blobs = OrderedDict()
for i, layer in enumerate(parsed_caffe_model):
layer_type = layer['type']
layer_name = layer['name']
top_blobs = layer['top_blobs']
bottom_blobs = layer['bottom_blobs']
layer_blobs = layer.get('blobs', None)
if verbose > 0:
print("%d\t%s\t%s" % (i, layer_type, layer_name))
if layer_type == 'DATA':
# DATA layers contain input data in top_blobs, create input
# variables, float for 'data' and int for 'label'
for data_blob_name in top_blobs:
if data_blob_name == 'label':
blobs['label'] = T.ivector()
inputs['label'] = blobs['label']
else:
blobs[data_blob_name] = T.tensor4(dtype=float_dtype)
inputs[data_blob_name] = blobs[data_blob_name]
elif layer_type == 'CONVOLUTION':
# CONVOLUTION layers take input from bottom_blob, convolve with
# layer_blobs[0], and add bias layer_blobs[1]
stride = layer['convolution_param__stride']
stride_h = max(layer['convolution_param__stride_h'], stride)
stride_w = max(layer['convolution_param__stride_w'], stride)
if stride_h > 1 or stride_w > 1:
subsample = (stride_h, stride_w)
else:
subsample = None
pad = layer['convolution_param__pad']
pad_h = max(layer['convolution_param__pad_h'], pad)
pad_w = max(layer['convolution_param__pad_w'], pad)
conv_filter = layer_blobs[0].astype(float_dtype)[..., ::-1, ::-1]
conv_bias = layer_blobs[1].astype(float_dtype).ravel()
convolution_input = blobs[bottom_blobs[0]]
convolution = Convolution(conv_filter, biases=conv_bias,
activation=None, subsample=subsample,
input_dtype=float_dtype)
# If padding is specified, need to pad. In practice, I think
# caffe prevents padding that would make the filter see only
# zeros, so technically this can also be obtained by sensibly
# cropping a border_mode=full convolution. However, subsampling
# may then be off by 1 and would have to be done separately :/
if pad_h > 0 or pad_w > 0:
zp = ZeroPad((pad_h, pad_w))
zp._build_expression(convolution_input)
expression = zp.expression_
layers[layer_name] = (zp, convolution)
else:
layers[layer_name] = convolution
expression = convolution_input
convolution._build_expression(expression)
expression = convolution.expression_
# if subsample is not None:
# expression = expression[:, :, ::subsample[0],
# ::subsample[1]]
blobs[top_blobs[0]] = expression
elif layer_type == "RELU":
# RELU layers take input from bottom_blobs, set everything
# negative to zero and write the result to top_blobs
relu_input = blobs[bottom_blobs[0]]
relu = Relu()
relu._build_expression(relu_input)
layers[layer_name] = relu
blobs[top_blobs[0]] = relu.expression_
elif layer_type == "POOLING":
# POOLING layers take input from bottom_blobs, perform max
# pooling according to stride and kernel size information
# and write the result to top_blobs
pooling_input = blobs[bottom_blobs[0]]
kernel_size = layer['pooling_param__kernel_size']
kernel_h = max(layer['pooling_param__kernel_h'], kernel_size)
kernel_w = max(layer['pooling_param__kernel_w'], kernel_size)
stride = layer['pooling_param__stride']
stride_h = max(layer['pooling_param__stride_h'], stride)
stride_w = max(layer['pooling_param__stride_w'], stride)
pad = layer['pooling_param__pad']
pad_h = max(layer['pooling_param__pad_h'], pad)
pad_w = max(layer['pooling_param__pad_w'], pad)
pool_types = {0: 'max', 1: 'avg'}
pool_type = pool_types[layer['pooling_param__pool']]
# print "POOL TYPE is %s" % pool_type
# pooling = FancyMaxPool((kernel_h, kernel_w),
# (stride_h, stride_w),
# ignore_border=False)
pooling = CaffePool((kernel_h, kernel_w),
(stride_h, stride_w),
(pad_h, pad_w),
pool_type=pool_type)
pooling._build_expression(pooling_input)
layers[layer_name] = pooling
blobs[top_blobs[0]] = pooling.expression_
elif layer_type == "DROPOUT":
# DROPOUT may figure in some networks, but it is only relevant
# at the learning stage, not at the prediction stage.
pass
elif layer_type == "SOFTMAX_LOSS":
softmax_input = blobs[bottom_blobs[0]]
# have to write our own softmax expression, because of shape
# issues
si = softmax_input.reshape((softmax_input.shape[0],
softmax_input.shape[1], -1))
shp = (si.shape[0], 1, si.shape[2])
exp = T.exp(si - si.max(axis=1).reshape(shp))
softmax_expression = (exp / exp.sum(axis=1).reshape(shp)
).reshape(softmax_input.shape)
layers[layer_name] = "SOFTMAX"
blobs[top_blobs[0]] = softmax_expression
elif layer_type == "SPLIT":
split_input = blobs[bottom_blobs[0]]
for top_blob in top_blobs:
blobs[top_blob] = split_input
# Should probably make a class to be able to add to layers
layers[layer_name] = "SPLIT"
elif layer_type == "LRN":
# Local normalization layer
lrn_input = blobs[bottom_blobs[0]]
lrn_factor = layer['lrn_param__alpha']
lrn_exponent = layer['lrn_param__beta']
axis = {0:'channels'}[layer['lrn_param__norm_region']]
nsize = layer['lrn_param__local_size']
lrn = LRN(nsize, lrn_factor, lrn_exponent, axis=axis)
lrn._build_expression(lrn_input)
layers[layer_name] = lrn
blobs[top_blobs[0]] = lrn.expression_
elif layer_type == "CONCAT":
input_expressions = [blobs[bottom_blob] for bottom_blob
in bottom_blobs]
axis = layer['concat_param__concat_dim']
output_expression = T.concatenate(input_expressions, axis=axis)
blobs[top_blobs[0]] = output_expression
layers[layer_name] = "CONCAT"
elif layer_type == "INNER_PRODUCT":
weights = layer_blobs[0].astype(float_dtype)
biases = layer_blobs[1].astype(float_dtype).squeeze()
fully_connected_input = blobs[bottom_blobs[0]]
# fc_layer = Feedforward(weights, biases, activation=None)
fc_layer = Convolution(weights.transpose((2, 3, 0, 1)), biases,
activation=None)
fc_layer._build_expression(fully_connected_input)
layers[layer_name] = fc_layer
blobs[top_blobs[0]] = fc_layer.expression_
else:
raise ValueError('layer type %s is not known to sklearn-theano'
% layer_type)
return layers, blobs, inputs
| bsd-3-clause |
calliope-project/calliope | calliope/time/funcs.py | 1 | 13849 | """
Copyright (C) since 2013 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
funcs.py
~~~~~~~~
Functions to process time series data.
"""
import logging
import datetime
import numpy as np
import pandas as pd
import xarray as xr
from calliope import exceptions
from calliope.time import clustering
logger = logging.getLogger(__name__)
def get_daily_timesteps(data, check_uniformity=False):
daily_timesteps = [
data.timestep_resolution.loc[i].values
for i in np.unique(data.timesteps.to_index().strftime("%Y-%m-%d"))
]
if check_uniformity:
if not np.all(daily_timesteps == daily_timesteps[0]):
raise exceptions.ModelError(
"For clustering, timestep resolution must be uniform."
)
return daily_timesteps[0]
def normalized_copy(data):
"""
Normalize timeseries data, using the maximum across all regions and timesteps.
Parameters
----------
data : xarray Dataset
Dataset with all non-time dependent variables removed
Returns
-------
ds : xarray Dataset
Copy of `data`, with the absolute taken and normalized to 0-1
"""
ds = data.copy(deep=True) # Work off a copy
for var in ds.data_vars:
ds[var] = abs(ds[var] / abs(ds[var]).groupby("techs").max(..., skipna=True))
return ds
def _copy_non_t_vars(data0, data1):
"""Copies non-t-indexed variables from data0 into data1, then
returns data1"""
non_t_vars = [
varname
for varname, vardata in data0.data_vars.items()
if "timesteps" not in vardata.dims
]
# Manually copy over variables not in `timesteps`. If we don't do this,
# these vars get polluted with a superfluous `timesteps` dimension
for v in non_t_vars:
data1[v] = data0[v]
return data1
def _combine_datasets(data0, data1):
"""Concatenates data0 and data1 along the time dimension"""
data_new = xr.concat([data0, data1], dim="timesteps")
# Ensure time dimension is ordered
data_new = data_new.loc[{"timesteps": data_new.timesteps.to_index().sort_values()}]
return data_new
def _drop_timestep_vars(data, timesteps):
timeseries_data = data.copy(deep=True)
# Save all coordinates, to ensure they can be added back in after clustering
data_coords = data.copy().coords
del data_coords["timesteps"]
if timesteps is not None:
timeseries_data = timeseries_data.loc[{"timesteps": timesteps}]
timeseries_data = timeseries_data.drop_vars(
[
varname
for varname, vardata in data.data_vars.items()
if "timesteps" not in vardata.dims
]
)
return timeseries_data, data_coords
def apply_clustering(
data,
timesteps,
clustering_func,
how,
normalize=True,
scale_clusters="mean",
storage_inter_cluster=True,
model_run=None,
**kwargs,
):
"""
Apply the given clustering function to the given data.
Parameters
----------
data : xarray.Dataset
timesteps : pandas.DatetimeIndex or list of timesteps or None
clustering_func : str
Name of clustering function. Can be `file=....csv:column_name`
if loading custom clustering. Custom clustering index = timeseries days.
If no column_name, the CSV file must have only one column of data.
how : str
How to map clusters to data. 'mean' or 'closest'.
normalize : bool, optional
If True (default), data is normalized before clustering is applied,
using :func:`~calliope.time.funcs.normalized_copy`.
scale_clusters : str or None, default = 'mean'
Scale the results of clustering such that the clusters match the metric
given by scale_clusters. For example, 'mean' scales along each loc_tech
and variable to match inputs and outputs. Other options for matching
include 'sum', 'max', and 'min'. If None, no scaling occurs.
**kwargs : optional
Arguments passed to clustering_func.
Returns
-------
data_new_scaled : xarray.Dataset
"""
assert how in ["mean", "closest"]
daily_timesteps = get_daily_timesteps(data, check_uniformity=True)
timesteps_per_day = len(daily_timesteps)
# get a copy of the dataset with only timeseries variables,
# and get all coordinates of the original dataset, to reinstate later
data_to_cluster, data_coords = _drop_timestep_vars(data, timesteps)
data_to_cluster = data_to_cluster.drop_vars(
["timestep_weights", "timestep_resolution"]
)
for dim in data_to_cluster.dims:
data_to_cluster[dim] = data[dim]
with pd.option_context("mode.use_inf_as_na", True):
if normalize:
data_normalized = normalized_copy(data_to_cluster)
else:
data_normalized = data_to_cluster
if "file=" in clustering_func:
file = clustering_func.split("=")[1]
if ":" in file:
file, column = file.rsplit(":", 1)
else:
column = None
df = model_run.timeseries_data[file]
if isinstance(df, pd.Series) and column is not None:
exceptions.warn(
"{} given as time clustering column, but only one column to "
"choose from in {}.".format(column, file)
)
clusters = df.resample("1D").mean()
elif isinstance(df, pd.DataFrame) and column is None:
raise exceptions.ModelError(
"No time clustering column given, but multiple columns found in "
"{0}. Choose one column and add it to {1} as {1}:name_of_column.".format(
file, clustering_func
)
)
elif isinstance(df, pd.DataFrame) and column not in df.columns:
raise KeyError(
"time clustering column {} not found in {}.".format(column, file)
)
elif isinstance(df, pd.DataFrame):
clusters = (
df.loc[:, column].dropna().groupby(pd.Grouper(freq="1D")).unique()
)
# Check there weren't instances of more than one cluster assigned to a day
# or days with no information assigned
if any([len(i) == 0 for i in clusters.values]):
raise exceptions.ModelError(
"Missing cluster days in `{}:{}`.".format(file, column)
)
elif any([len(i) > 1 for i in clusters.values]):
raise exceptions.ModelError(
"More than one cluster value assigned to a day in `{}:{}`. "
"Unique clusters per day: {}".format(file, column, clusters)
)
else:
clusters.loc[:] = [i[0] for i in clusters.values]
else:
result = clustering.get_clusters(
data_normalized,
clustering_func,
timesteps_per_day=timesteps_per_day,
**kwargs,
)
clusters = result[0] # Ignore other stuff returned
data_new = clustering.map_clusters_to_data(
data_to_cluster,
clusters,
how=how,
daily_timesteps=daily_timesteps,
storage_inter_cluster=storage_inter_cluster,
)
# It's now safe to add the original coordinates back in (preserving all the
# loc_tech sets that aren't used to index a variable in the DataArray)
data_new.update(data_coords)
data_new = _copy_non_t_vars(data, data_new)
if timesteps is not None:
data_new = _copy_non_t_vars(data, data_new)
data_new = _combine_datasets(data.drop_sel(timesteps=timesteps), data_new)
data_new = _copy_non_t_vars(data, data_new)
# Scale the new/combined data so that the mean for each (loc_tech, variable)
# combination matches that from the original data
data_new_scaled = data_new.copy(deep=True)
if scale_clusters:
data_vars_in_t = [
v
for v in data_new.data_vars
if "timesteps" in data_new[v].dims
and "timestep_" not in v
and v != "clusters"
]
for var in data_vars_in_t:
scale = getattr(data[var], scale_clusters)(dim="timesteps") / getattr(
data_new[var], scale_clusters
)(dim="timesteps")
data_new_scaled[var] = data_new[var] * scale.fillna(0)
lookup_clusters(data_new_scaled)
return data_new_scaled
def resample(data, timesteps, resolution):
"""
Function to resample timeseries data from the input resolution (e.g. 1H), to
the given resolution (e.g. 2H)
Parameters
----------
data : xarray.Dataset
calliope model data, containing only timeseries data variables
timesteps : str or list; optional
If given, apply resampling to a subset of the timeseries data
resolution : str
time resolution of the output data, given in Pandas time frequency format.
E.g. 1H = 1 hour, 1W = 1 week, 1M = 1 month, 1T = 1 minute. Multiples allowed.
"""
def _resample(var, how):
return getattr(var.resample(timesteps=resolution, keep_attrs=True), how)(
"timesteps"
)
# get a copy of the dataset with only timeseries variables,
# and get all coordinates of the original dataset, to reinstate later
data_new, data_coords = _drop_timestep_vars(data, timesteps)
# First create a new resampled dataset of the correct size by
# using first-resample, which should be a quick way to achieve this
data_rs = _resample(data_new, how="first")
for var in data_rs.data_vars:
if var in ["timestep_resolution", "resource"]:
data_rs[var] = _resample(data_new[var], how="sum")
else:
try:
data_rs[var] = _resample(data_new[var], how="mean")
except TypeError:
# If the var has a datatype of strings, it can't be resampled
logger.error(
"Dropping {} because it has a {} data type when integer or "
"float is expected for timeseries resampling.".format(
var, data_rs[var].dtype
)
)
data_rs = data_rs.drop_vars(var)
# Get rid of the filled-in NaN timestamps
data_rs = data_rs.dropna(dim="timesteps", how="all")
data_rs.attrs["allow_operate_mode"] = 1 # Resampling still permits operational mode
# It's now safe to add the original coordinates back in (preserving all the
# loc_tech sets that aren't used to index a variable in the DataArray)
data_rs.update(data_coords)
data_rs = _copy_non_t_vars(data, data_rs) # add back in non timeseries data
if timesteps is not None:
# Combine leftover parts of passed in data with new data
data_rs = _combine_datasets(data.drop_sel(timesteps=timesteps), data_rs)
data_rs = _copy_non_t_vars(data, data_rs)
# Having timesteps with different lengths does not permit operational mode
data_rs.attrs["allow_operate_mode"] = 0
return data_rs
def drop(data, timesteps):
"""
Drop timesteps from data, adjusting the timestep weight of remaining
timesteps accordingly. Returns updated dataset.
Parameters
----------
data : xarray.Dataset
Calliope model data.
timesteps : str or list or other iterable
Pandas-compatible timestep strings.
"""
# Turn timesteps into a pandas datetime index for subsetting, which also
# checks whether they are actually valid
try:
timesteps_pd = pd.to_datetime(timesteps)
except Exception as e:
raise exceptions.ModelError("Invalid timesteps: {}".format(timesteps))
# 'Distribute weight' of the dropped timesteps onto the remaining ones
dropped_weight = data.timestep_weights.loc[{"timesteps": timesteps_pd}].sum()
data = data.drop_sel(timesteps=timesteps_pd)
data["timestep_weights"] = data["timestep_weights"] + (
dropped_weight / len(data["timestep_weights"])
)
return data
def lookup_clusters(dataset):
"""
For any given timestep in a time clustered model, get:
1. the first and last timestep of the cluster,
2. the last timestep of the cluster corresponding to a date in the original timeseries
"""
data_dict_first = dict(dims=["timesteps"], data=[])
data_dict_last = dict(dims=["timesteps"], data=[])
for timestep in dataset.timesteps:
t = pd.to_datetime(timestep.item()).date().strftime("%Y-%m-%d")
timestep_first = dataset.timesteps.loc[t][0]
timestep_last = dataset.timesteps.loc[t][-1]
if timestep == timestep_first:
data_dict_first["data"].append(1)
data_dict_last["data"].append(timestep_last.values)
else:
data_dict_first["data"].append(0)
data_dict_last["data"].append(None)
dataset["lookup_cluster_first_timestep"] = xr.DataArray.from_dict(data_dict_first)
dataset["lookup_cluster_last_timestep"] = xr.DataArray.from_dict(data_dict_last)
if "datesteps" in dataset.dims:
last_timesteps = dict(dims=["datesteps"], data=[])
cluster_date = dataset.timestep_cluster.to_pandas().resample("1D").mean()
for datestep in dataset.datesteps.to_index():
cluster = dataset.lookup_datestep_cluster.loc[
datestep.strftime("%Y-%m-%d")
].item()
last_timesteps["data"].append(
datetime.datetime.combine(
cluster_date[cluster_date == cluster].index[0].date(),
dataset.timesteps.to_index().time[-1],
)
)
dataset["lookup_datestep_last_cluster_timestep"] = xr.DataArray.from_dict(
last_timesteps
)
return dataset
| apache-2.0 |
rkmaddox/mne-python | mne/viz/_figure.py | 2 | 113753 | # -*- coding: utf-8 -*-
"""Figure classes for MNE-Python's 2D plots.
Class Hierarchy
---------------
MNEFigParams Container object, attached to MNEFigure by default. Sets
close_key='escape' plus whatever other key-value pairs are
passed to its constructor.
matplotlib.figure.Figure
โ MNEFigure
โ MNEBrowseFigure Interactive figure for scrollable data.
โ Generated by:
โ - raw.plot()
โ - epochs.plot()
โ - ica.plot_sources(raw)
โ - ica.plot_sources(epochs)
โ
โ MNEAnnotationFigure GUI for adding annotations to Raw
โ
โ MNESelectionFigure GUI for spatial channel selection. raw.plot()
โ and epochs.plot() will generate one of these
โ alongside an MNEBrowseFigure when
โ group_by == 'selection' or 'position'
โ
โ MNELineFigure Interactive figure for non-scrollable data.
Generated by:
- raw.plot_psd()
- evoked.plot() TODO Not yet implemented
- evoked.plot_white() TODO Not yet implemented
- evoked.plot_joint() TODO Not yet implemented
"""
# Authors: Daniel McCloy <[email protected]>
#
# License: Simplified BSD
from contextlib import contextmanager
import platform
from copy import deepcopy
from itertools import cycle
from functools import partial
from collections import OrderedDict
import numpy as np
from matplotlib.figure import Figure
from .epochs import plot_epochs_image
from .ica import (_create_properties_layout, _fast_plot_ica_properties,
_prepare_data_ica_properties)
from .utils import (plt_show, plot_sensors, _setup_plot_projector, _events_off,
_set_window_title, _merge_annotations, DraggableLine,
_get_color_list, logger, _validate_if_list_of_axes,
_plot_psd)
from ..defaults import _handle_default
from ..utils import set_config, _check_option, _check_sphere, Bunch
from ..annotations import _sync_onset
from ..time_frequency import psd_welch, psd_multitaper
from ..io.pick import (pick_types, _picks_to_idx, channel_indices_by_type,
_DATA_CH_TYPES_SPLIT, _DATA_CH_TYPES_ORDER_DEFAULT,
_VALID_CHANNEL_TYPES, _FNIRS_CH_TYPES_SPLIT)
# CONSTANTS (inches)
ANNOTATION_FIG_PAD = 0.1
ANNOTATION_FIG_MIN_H = 2.9 # fixed part, not including radio buttons/labels
ANNOTATION_FIG_W = 5.0
ANNOTATION_FIG_CHECKBOX_COLUMN_W = 0.5
class MNEFigParams:
"""Container object for MNE figure parameters."""
def __init__(self, **kwargs):
# default key to close window
self.close_key = 'escape'
vars(self).update(**kwargs)
class MNEFigure(Figure):
"""Base class for 2D figures & dialogs; wraps matplotlib.figure.Figure."""
def __init__(self, **kwargs):
from matplotlib import rcParams
# figsize is the only kwarg we pass to matplotlib Figure()
figsize = kwargs.pop('figsize', None)
super().__init__(figsize=figsize)
# things we'll almost always want
defaults = dict(fgcolor=rcParams['axes.edgecolor'],
bgcolor=rcParams['axes.facecolor'])
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
# add our param object
self.mne = MNEFigParams(**kwargs)
def _close(self, event):
"""Handle close events."""
# remove references from parent fig to child fig
is_child = getattr(self.mne, 'parent_fig', None) is not None
is_named = getattr(self.mne, 'fig_name', None) is not None
if is_child:
self.mne.parent_fig.mne.child_figs.remove(self)
if is_named:
setattr(self.mne.parent_fig.mne, self.mne.fig_name, None)
def _keypress(self, event):
"""Handle keypress events."""
if event.key == self.mne.close_key:
from matplotlib.pyplot import close
close(self)
elif event.key == 'f11': # full screen
self.canvas.manager.full_screen_toggle()
def _buttonpress(self, event):
"""Handle buttonpress events."""
pass
def _pick(self, event):
"""Handle matplotlib pick events."""
pass
def _resize(self, event):
"""Handle window resize events."""
pass
def _add_default_callbacks(self, **kwargs):
"""Remove some matplotlib default callbacks and add MNE-Python ones."""
# Remove matplotlib default keypress catchers
default_callbacks = list(
self.canvas.callbacks.callbacks.get('key_press_event', {}))
for callback in default_callbacks:
self.canvas.callbacks.disconnect(callback)
# add our event callbacks
callbacks = dict(resize_event=self._resize,
key_press_event=self._keypress,
button_press_event=self._buttonpress,
close_event=self._close,
pick_event=self._pick)
callbacks.update(kwargs)
callback_ids = dict()
for event, callback in callbacks.items():
callback_ids[event] = self.canvas.mpl_connect(event, callback)
# store callback references so they aren't garbage-collected
self.mne._callback_ids = callback_ids
def _get_dpi_ratio(self):
"""Get DPI ratio (to handle hi-DPI screens)."""
dpi_ratio = 1.
for key in ('_dpi_ratio', '_device_scale'):
dpi_ratio = getattr(self.canvas, key, dpi_ratio)
return dpi_ratio
def _get_size_px(self):
"""Get figure size in pixels."""
dpi_ratio = self._get_dpi_ratio()
return self.get_size_inches() * self.dpi / dpi_ratio
def _inch_to_rel(self, dim_inches, horiz=True):
"""Convert inches to figure-relative distances."""
fig_w, fig_h = self.get_size_inches()
w_or_h = fig_w if horiz else fig_h
return dim_inches / w_or_h
class MNEAnnotationFigure(MNEFigure):
"""Interactive dialog figure for annotations."""
def _close(self, event):
"""Handle close events (via keypress or window [x])."""
parent = self.mne.parent_fig
# disable span selector
parent.mne.ax_main.selector.active = False
# clear hover line
parent._remove_annotation_hover_line()
# disconnect hover callback
callback_id = parent.mne._callback_ids['motion_notify_event']
parent.canvas.callbacks.disconnect(callback_id)
# do all the other cleanup activities
super()._close(event)
def _keypress(self, event):
"""Handle keypress events."""
text = self.label.get_text()
key = event.key
if key == self.mne.close_key:
from matplotlib.pyplot import close
close(self)
elif key == 'backspace':
text = text[:-1]
elif key == 'enter':
self.mne.parent_fig._add_annotation_label(event)
return
elif len(key) > 1 or key == ';': # ignore modifier keys
return
else:
text = text + key
self.label.set_text(text)
self.canvas.draw()
def _radiopress(self, event):
"""Handle Radiobutton clicks for Annotation label selection."""
# update which button looks active
buttons = self.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
idx = labels.index(buttons.value_selected)
self._set_active_button(idx)
# update click-drag rectangle color
color = buttons.circles[idx].get_edgecolor()
selector = self.mne.parent_fig.mne.ax_main.selector
selector.rect.set_color(color)
selector.rectprops.update(dict(facecolor=color))
def _click_override(self, event):
"""Override MPL radiobutton click detector to use transData."""
ax = self.mne.radio_ax
buttons = ax.buttons
if (buttons.ignore(event) or event.button != 1 or event.inaxes != ax):
return
pclicked = ax.transData.inverted().transform((event.x, event.y))
distances = {}
for i, (p, t) in enumerate(zip(buttons.circles, buttons.labels)):
if (t.get_window_extent().contains(event.x, event.y)
or np.linalg.norm(pclicked - p.center) < p.radius):
distances[i] = np.linalg.norm(pclicked - p.center)
if len(distances) > 0:
closest = min(distances, key=distances.get)
buttons.set_active(closest)
def _set_active_button(self, idx):
"""Set active button in annotation dialog figure."""
buttons = self.mne.radio_ax.buttons
with _events_off(buttons):
buttons.set_active(idx)
for circle in buttons.circles:
circle.set_facecolor(self.mne.parent_fig.mne.bgcolor)
# active circle gets filled in, partially transparent
color = list(buttons.circles[idx].get_edgecolor())
color[-1] = 0.5
buttons.circles[idx].set_facecolor(color)
self.canvas.draw()
class MNESelectionFigure(MNEFigure):
"""Interactive dialog figure for channel selections."""
def _close(self, event):
"""Handle close events."""
from matplotlib.pyplot import close
self.mne.parent_fig.mne.child_figs.remove(self)
self.mne.fig_selection = None
# selection fig & main fig tightly integrated; closing one closes both
close(self.mne.parent_fig)
def _keypress(self, event):
"""Handle keypress events."""
if event.key in ('up', 'down', 'b'):
self.mne.parent_fig._keypress(event)
else: # check for close key
super()._keypress(event)
def _radiopress(self, event):
"""Handle RadioButton clicks for channel selection groups."""
selections_dict = self.mne.parent_fig.mne.ch_selections
buttons = self.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
this_label = buttons.value_selected
parent = self.mne.parent_fig
if this_label == 'Custom' and not len(selections_dict['Custom']):
with _events_off(buttons):
buttons.set_active(self.mne.old_selection)
return
# clicking a selection cancels butterfly mode
if parent.mne.butterfly:
parent._toggle_butterfly()
with _events_off(buttons):
buttons.set_active(labels.index(this_label))
parent._update_selection()
def _set_custom_selection(self):
"""Set custom selection by lasso selector."""
chs = self.lasso.selection
parent = self.mne.parent_fig
buttons = self.mne.radio_ax.buttons
if not len(chs):
return
labels = [label.get_text() for label in buttons.labels]
inds = np.in1d(parent.mne.ch_names, chs)
parent.mne.ch_selections['Custom'] = inds.nonzero()[0]
buttons.set_active(labels.index('Custom'))
def _style_radio_buttons_butterfly(self):
"""Handle RadioButton state for keyboard interactions."""
# Show all radio buttons as selected when in butterfly mode
parent = self.mne.parent_fig
buttons = self.mne.radio_ax.buttons
color = (buttons.activecolor if parent.mne.butterfly else
parent.mne.bgcolor)
for circle in buttons.circles:
circle.set_facecolor(color)
# when leaving butterfly mode, make most-recently-used selection active
if not parent.mne.butterfly:
with _events_off(buttons):
buttons.set_active(self.mne.old_selection)
# update the sensors too
parent._update_highlighted_sensors()
class MNEBrowseFigure(MNEFigure):
"""Interactive figure with scrollbars, for data browsing."""
def __init__(self, inst, figsize, ica=None, xlabel='Time (s)', **kwargs):
from matplotlib.colors import to_rgba_array
from matplotlib.ticker import (FixedLocator, FixedFormatter,
FuncFormatter, NullFormatter)
from matplotlib.patches import Rectangle
from matplotlib.widgets import Button
from matplotlib.transforms import blended_transform_factory
from mpl_toolkits.axes_grid1.axes_size import Fixed
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from .. import BaseEpochs
from ..io import BaseRaw
from ..preprocessing import ICA
super().__init__(figsize=figsize, inst=inst, ica=ica, **kwargs)
# what kind of data are we dealing with?
if isinstance(ica, ICA):
self.mne.instance_type = 'ica'
elif isinstance(inst, BaseRaw):
self.mne.instance_type = 'raw'
elif isinstance(inst, BaseEpochs):
self.mne.instance_type = 'epochs'
else:
raise TypeError('Expected an instance of Raw, Epochs, or ICA, '
f'got {type(inst)}.')
self.mne.ica_type = None
if self.mne.instance_type == 'ica':
if isinstance(self.mne.ica_inst, BaseRaw):
self.mne.ica_type = 'raw'
elif isinstance(self.mne.ica_inst, BaseEpochs):
self.mne.ica_type = 'epochs'
self.mne.is_epochs = 'epochs' in (self.mne.instance_type,
self.mne.ica_type)
# things that always start the same
self.mne.ch_start = 0
self.mne.projector = None
self.mne.projs_active = np.array([p['active'] for p in self.mne.projs])
self.mne.whitened_ch_names = list()
self.mne.use_noise_cov = self.mne.noise_cov is not None
self.mne.zorder = dict(patch=0, grid=1, ann=2, events=3, bads=4,
data=5, mag=6, grad=7, scalebar=8, vline=9)
# additional params for epochs (won't affect raw / ICA)
self.mne.epoch_traces = list()
self.mne.bad_epochs = list()
# annotations
self.mne.annotations = list()
self.mne.hscroll_annotations = list()
self.mne.annotation_segments = list()
self.mne.annotation_texts = list()
self.mne.new_annotation_labels = list()
self.mne.annotation_segment_colors = dict()
self.mne.annotation_hover_line = None
self.mne.draggable_annotations = False
# lines
self.mne.event_lines = None
self.mne.event_texts = list()
self.mne.vline_visible = False
# scalings
self.mne.scale_factor = 0.5 if self.mne.butterfly else 1.
self.mne.scalebars = dict()
self.mne.scalebar_texts = dict()
# ancillary child figures
self.mne.child_figs = list()
self.mne.fig_help = None
self.mne.fig_proj = None
self.mne.fig_histogram = None
self.mne.fig_selection = None
self.mne.fig_annotation = None
# MAIN AXES: default sizes (inches)
# XXX simpler with constrained_layout? (when it's no longer "beta")
l_margin = 1.
r_margin = 0.1
b_margin = 0.45
t_margin = 0.25
scroll_width = 0.25
hscroll_dist = 0.25
vscroll_dist = 0.1
help_width = scroll_width * 2
# MAIN AXES: default margins (figure-relative coordinates)
left = self._inch_to_rel(l_margin - vscroll_dist - help_width)
right = 1 - self._inch_to_rel(r_margin)
bottom = self._inch_to_rel(b_margin, horiz=False)
top = 1 - self._inch_to_rel(t_margin, horiz=False)
width = right - left
height = top - bottom
position = [left, bottom, width, height]
# Main axes must be a subplot for subplots_adjust to work (so user can
# adjust margins). That's why we don't use the Divider class directly.
ax_main = self.add_subplot(1, 1, 1, position=position)
self.subplotpars.update(left=left, bottom=bottom, top=top, right=right)
div = make_axes_locatable(ax_main)
# this only gets shown in zen mode
self.mne.zen_xlabel = ax_main.set_xlabel(xlabel)
self.mne.zen_xlabel.set_visible(not self.mne.scrollbars_visible)
# SCROLLBARS
ax_hscroll = div.append_axes(position='bottom',
size=Fixed(scroll_width),
pad=Fixed(hscroll_dist))
ax_vscroll = div.append_axes(position='right',
size=Fixed(scroll_width),
pad=Fixed(vscroll_dist))
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel(xlabel)
ax_vscroll.set_axis_off()
# HORIZONTAL SCROLLBAR PATCHES (FOR MARKING BAD EPOCHS)
if self.mne.is_epochs:
epoch_nums = self.mne.inst.selection
for ix, _ in enumerate(epoch_nums):
start = self.mne.boundary_times[ix]
width = np.diff(self.mne.boundary_times[ix:ix + 2])[0]
ax_hscroll.add_patch(
Rectangle((start, 0), width, 1, color='none',
zorder=self.mne.zorder['patch']))
# add epoch boundaries & center epoch numbers between boundaries
midpoints = np.convolve(self.mne.boundary_times, np.ones(2),
mode='valid') / 2
# both axes, major ticks: gridlines
for _ax in (ax_main, ax_hscroll):
_ax.xaxis.set_major_locator(
FixedLocator(self.mne.boundary_times[1:-1]))
_ax.xaxis.set_major_formatter(NullFormatter())
grid_kwargs = dict(color=self.mne.fgcolor, axis='x',
zorder=self.mne.zorder['grid'])
ax_main.grid(linewidth=2, linestyle='dashed', **grid_kwargs)
ax_hscroll.grid(alpha=0.5, linewidth=0.5, linestyle='solid',
**grid_kwargs)
# main axes, minor ticks: ticklabel (epoch number) for every epoch
ax_main.xaxis.set_minor_locator(FixedLocator(midpoints))
ax_main.xaxis.set_minor_formatter(FixedFormatter(epoch_nums))
# hscroll axes, minor ticks: up to 20 ticklabels (epoch numbers)
ax_hscroll.xaxis.set_minor_locator(
FixedLocator(midpoints, nbins=20))
ax_hscroll.xaxis.set_minor_formatter(
FuncFormatter(lambda x, pos: self._get_epoch_num_from_time(x)))
# hide some ticks
ax_main.tick_params(axis='x', which='major', bottom=False)
ax_hscroll.tick_params(axis='x', which='both', bottom=False)
# VERTICAL SCROLLBAR PATCHES (COLORED BY CHANNEL TYPE)
ch_order = self.mne.ch_order
for ix, pick in enumerate(ch_order):
this_color = (self.mne.ch_color_bad
if self.mne.ch_names[pick] in self.mne.info['bads']
else self.mne.ch_color_dict)
if isinstance(this_color, dict):
this_color = this_color[self.mne.ch_types[pick]]
ax_vscroll.add_patch(
Rectangle((0, ix), 1, 1, color=this_color,
zorder=self.mne.zorder['patch']))
ax_vscroll.set_ylim(len(ch_order), 0)
ax_vscroll.set_visible(not self.mne.butterfly)
# SCROLLBAR VISIBLE SELECTION PATCHES
sel_kwargs = dict(alpha=0.3, linewidth=4, clip_on=False,
edgecolor=self.mne.fgcolor)
vsel_patch = Rectangle((0, 0), 1, self.mne.n_channels,
facecolor=self.mne.bgcolor, **sel_kwargs)
ax_vscroll.add_patch(vsel_patch)
hsel_facecolor = np.average(
np.vstack((to_rgba_array(self.mne.fgcolor),
to_rgba_array(self.mne.bgcolor))),
axis=0, weights=(3, 1)) # 75% foreground, 25% background
hsel_patch = Rectangle((self.mne.t_start, 0), self.mne.duration, 1,
facecolor=hsel_facecolor, **sel_kwargs)
ax_hscroll.add_patch(hsel_patch)
ax_hscroll.set_xlim(self.mne.first_time, self.mne.first_time +
self.mne.n_times / self.mne.info['sfreq'])
# VLINE
vline_color = (0., 0.75, 0.)
vline_kwargs = dict(visible=False, zorder=self.mne.zorder['vline'])
if self.mne.is_epochs:
x = np.arange(self.mne.n_epochs)
vline = ax_main.vlines(
x, 0, 1, colors=vline_color, **vline_kwargs)
vline.set_transform(blended_transform_factory(ax_main.transData,
ax_main.transAxes))
vline_hscroll = None
else:
vline = ax_main.axvline(0, color=vline_color, **vline_kwargs)
vline_hscroll = ax_hscroll.axvline(0, color=vline_color,
**vline_kwargs)
vline_text = ax_hscroll.text(
self.mne.first_time, 1.2, '', fontsize=10, ha='right', va='bottom',
color=vline_color, **vline_kwargs)
# HELP BUTTON: initialize in the wrong spot...
ax_help = div.append_axes(position='left',
size=Fixed(help_width),
pad=Fixed(vscroll_dist))
# HELP BUTTON: ...move it down by changing its locator
loc = div.new_locator(nx=0, ny=0)
ax_help.set_axes_locator(loc)
# HELP BUTTON: make it a proper button
with _patched_canvas(ax_help.figure):
self.mne.button_help = Button(ax_help, 'Help')
# PROJ BUTTON
ax_proj = None
if len(self.mne.projs) and not inst.proj:
proj_button_pos = [
1 - self._inch_to_rel(r_margin + scroll_width), # left
self._inch_to_rel(b_margin, horiz=False), # bottom
self._inch_to_rel(scroll_width), # width
self._inch_to_rel(scroll_width, horiz=False) # height
]
loc = div.new_locator(nx=4, ny=0)
ax_proj = self.add_axes(proj_button_pos)
ax_proj.set_axes_locator(loc)
with _patched_canvas(ax_help.figure):
self.mne.button_proj = Button(ax_proj, 'Prj')
# INIT TRACES
self.mne.trace_kwargs = dict(antialiased=True, linewidth=0.5)
self.mne.traces = ax_main.plot(
np.full((1, self.mne.n_channels), np.nan), **self.mne.trace_kwargs)
# SAVE UI ELEMENT HANDLES
vars(self.mne).update(
ax_main=ax_main, ax_help=ax_help, ax_proj=ax_proj,
ax_hscroll=ax_hscroll, ax_vscroll=ax_vscroll,
vsel_patch=vsel_patch, hsel_patch=hsel_patch, vline=vline,
vline_hscroll=vline_hscroll, vline_text=vline_text)
def _close(self, event):
"""Handle close events (via keypress or window [x])."""
from matplotlib.pyplot import close
# write out bad epochs (after converting epoch numbers to indices)
if self.mne.instance_type == 'epochs':
bad_ixs = np.in1d(self.mne.inst.selection,
self.mne.bad_epochs).nonzero()[0]
self.mne.inst.drop(bad_ixs)
# write bad channels back to instance (don't do this for proj;
# proj checkboxes are for viz only and shouldn't modify the instance)
if self.mne.instance_type in ('raw', 'epochs'):
self.mne.inst.info['bads'] = self.mne.info['bads']
logger.info(
f"Channels marked as bad: {self.mne.info['bads'] or 'none'}")
# ICA excludes
elif self.mne.instance_type == 'ica':
self.mne.ica.exclude = [self.mne.ica._ica_names.index(ch)
for ch in self.mne.info['bads']]
# write window size to config
size = ','.join(self.get_size_inches().astype(str))
set_config('MNE_BROWSE_RAW_SIZE', size, set_env=False)
# Clean up child figures (don't pop(), child figs remove themselves)
while len(self.mne.child_figs):
fig = self.mne.child_figs[-1]
close(fig)
def _resize(self, event):
"""Handle resize event for mne_browse-style plots (Raw/Epochs/ICA)."""
old_width, old_height = self.mne.fig_size_px
new_width, new_height = self._get_size_px()
new_margins = _calc_new_margins(
self, old_width, old_height, new_width, new_height)
self.subplots_adjust(**new_margins)
# zen mode bookkeeping
self.mne.zen_w *= old_width / new_width
self.mne.zen_h *= old_height / new_height
self.mne.fig_size_px = (new_width, new_height)
self.canvas.draw_idle()
def _hover(self, event):
"""Handle motion event when annotating."""
if (event.button is not None or event.xdata is None or
event.inaxes != self.mne.ax_main):
return
if not self.mne.draggable_annotations:
self._remove_annotation_hover_line()
return
from matplotlib.patheffects import Stroke, Normal
for coll in self.mne.annotations:
if coll.contains(event)[0]:
path = coll.get_paths()
assert len(path) == 1
path = path[0]
color = coll.get_edgecolors()[0]
ylim = self.mne.ax_main.get_ylim()
# are we on the left or right edge?
_l = path.vertices[:, 0].min()
_r = path.vertices[:, 0].max()
x = _l if abs(event.xdata - _l) < abs(event.xdata - _r) else _r
mask = path.vertices[:, 0] == x
def drag_callback(x0):
path.vertices[mask, 0] = x0
# create or update the DraggableLine
hover_line = self.mne.annotation_hover_line
if hover_line is None:
line = self.mne.ax_main.plot([x, x], ylim, color=color,
linewidth=2, pickradius=5.)[0]
hover_line = DraggableLine(
line, self._modify_annotation, drag_callback)
else:
hover_line.set_x(x)
hover_line.drag_callback = drag_callback
# style the line
line = hover_line.line
patheff = [Stroke(linewidth=4, foreground=color, alpha=0.5),
Normal()]
line.set_path_effects(patheff if line.contains(event)[0] else
patheff[1:])
self.mne.ax_main.selector.active = False
self.mne.annotation_hover_line = hover_line
self.canvas.draw_idle()
return
self._remove_annotation_hover_line()
def _keypress(self, event):
"""Handle keypress events."""
key = event.key
n_channels = self.mne.n_channels
if self.mne.is_epochs:
last_time = self.mne.n_times / self.mne.info['sfreq']
else:
last_time = self.mne.inst.times[-1]
# scroll up/down
if key in ('down', 'up', 'shift+down', 'shift+up'):
key = key.split('+')[-1]
direction = -1 if key == 'up' else 1
# butterfly case
if self.mne.butterfly:
return
# group_by case
elif self.mne.fig_selection is not None:
buttons = self.mne.fig_selection.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
current_label = buttons.value_selected
current_idx = labels.index(current_label)
selections_dict = self.mne.ch_selections
penult = current_idx < (len(labels) - 1)
pre_penult = current_idx < (len(labels) - 2)
has_custom = selections_dict.get('Custom', None) is not None
def_custom = len(selections_dict.get('Custom', list()))
up_ok = key == 'up' and current_idx > 0
down_ok = key == 'down' and (
pre_penult or
(penult and not has_custom) or
(penult and has_custom and def_custom))
if up_ok or down_ok:
buttons.set_active(current_idx + direction)
# normal case
else:
ceiling = len(self.mne.ch_order) - n_channels
ch_start = self.mne.ch_start + direction * n_channels
self.mne.ch_start = np.clip(ch_start, 0, ceiling)
self._update_picks()
self._update_vscroll()
self._redraw()
# scroll left/right
elif key in ('right', 'left', 'shift+right', 'shift+left'):
old_t_start = self.mne.t_start
direction = 1 if key.endswith('right') else -1
if self.mne.is_epochs:
denom = 1 if key.startswith('shift') else self.mne.n_epochs
else:
denom = 1 if key.startswith('shift') else 4
t_max = last_time - self.mne.duration
t_start = self.mne.t_start + direction * self.mne.duration / denom
self.mne.t_start = np.clip(t_start, self.mne.first_time, t_max)
if self.mne.t_start != old_t_start:
self._update_hscroll()
self._redraw(annotations=True)
# scale traces
elif key in ('=', '+', '-'):
scaler = 1 / 1.1 if key == '-' else 1.1
self.mne.scale_factor *= scaler
self._redraw(update_data=False)
# change number of visible channels
elif (key in ('pageup', 'pagedown') and
self.mne.fig_selection is None and
not self.mne.butterfly):
new_n_ch = n_channels + (1 if key == 'pageup' else -1)
self.mne.n_channels = np.clip(new_n_ch, 1, len(self.mne.ch_order))
# add new chs from above if we're at the bottom of the scrollbar
ch_end = self.mne.ch_start + self.mne.n_channels
if ch_end > len(self.mne.ch_order) and self.mne.ch_start > 0:
self.mne.ch_start -= 1
self._update_vscroll()
# redraw only if changed
if self.mne.n_channels != n_channels:
self._update_picks()
self._update_trace_offsets()
self._redraw(annotations=True)
# change duration
elif key in ('home', 'end'):
dur_delta = 1 if key == 'end' else -1
if self.mne.is_epochs:
self.mne.n_epochs = np.clip(self.mne.n_epochs + dur_delta,
1, len(self.mne.inst))
min_dur = len(self.mne.inst.times) / self.mne.info['sfreq']
dur_delta *= min_dur
else:
min_dur = 3 * np.diff(self.mne.inst.times[:2])[0]
old_dur = self.mne.duration
new_dur = self.mne.duration + dur_delta
self.mne.duration = np.clip(new_dur, min_dur, last_time)
if self.mne.duration != old_dur:
if self.mne.t_start + self.mne.duration > last_time:
self.mne.t_start = last_time - self.mne.duration
self._update_hscroll()
self._redraw(annotations=True)
elif key == '?': # help window
self._toggle_help_fig(event)
elif key == 'a': # annotation mode
self._toggle_annotation_fig()
elif key == 'b' and self.mne.instance_type != 'ica': # butterfly mode
self._toggle_butterfly()
elif key == 'd': # DC shift
self.mne.remove_dc = not self.mne.remove_dc
self._redraw()
elif key == 'h' and self.mne.instance_type == 'epochs': # histogram
self._toggle_epoch_histogram()
elif key == 'j' and len(self.mne.projs): # SSP window
self._toggle_proj_fig()
elif key == 'J' and len(self.mne.projs):
self._toggle_proj_checkbox(event, toggle_all=True)
elif key == 'p': # toggle draggable annotations
self._toggle_draggable_annotations(event)
if self.mne.fig_annotation is not None:
checkbox = self.mne.fig_annotation.mne.drag_checkbox
with _events_off(checkbox):
checkbox.set_active(0)
elif key == 's': # scalebars
self._toggle_scalebars(event)
elif key == 'w': # toggle noise cov whitening
if self.mne.noise_cov is not None:
self.mne.use_noise_cov = not self.mne.use_noise_cov
self._update_projector()
self._update_yaxis_labels() # add/remove italics
self._redraw()
elif key == 'z': # zen mode: hide scrollbars and buttons
self._toggle_scrollbars()
self._redraw(update_data=False)
else: # check for close key / fullscreen toggle
super()._keypress(event)
def _buttonpress(self, event):
"""Handle mouse clicks."""
butterfly = self.mne.butterfly
annotating = self.mne.fig_annotation is not None
ax_main = self.mne.ax_main
inst = self.mne.inst
# ignore middle clicks, scroll wheel events, and clicks outside axes
if event.button not in (1, 3) or event.inaxes is None:
return
elif event.button == 1: # left-click (primary)
# click in main axes
if (event.inaxes == ax_main and not annotating):
if self.mne.instance_type == 'epochs' or not butterfly:
for line in self.mne.traces + self.mne.epoch_traces:
if line.contains(event)[0]:
if self.mne.instance_type == 'epochs':
self._toggle_bad_epoch(event)
else:
idx = self.mne.traces.index(line)
self._toggle_bad_channel(idx)
return
self._show_vline(event.xdata) # butterfly / not on data trace
self._redraw(update_data=False, annotations=False)
return
# click in vertical scrollbar
elif event.inaxes == self.mne.ax_vscroll:
if self.mne.fig_selection is not None:
self._change_selection_vscroll(event)
elif self._check_update_vscroll_clicked(event):
self._redraw()
# click in horizontal scrollbar
elif event.inaxes == self.mne.ax_hscroll:
if self._check_update_hscroll_clicked(event):
self._redraw(annotations=True)
# click on proj button
elif event.inaxes == self.mne.ax_proj:
self._toggle_proj_fig(event)
# click on help button
elif event.inaxes == self.mne.ax_help:
self._toggle_help_fig(event)
else: # right-click (secondary)
if annotating:
if any(c.contains(event)[0] for c in ax_main.collections):
xdata = event.xdata - self.mne.first_time
start = _sync_onset(inst, inst.annotations.onset)
end = start + inst.annotations.duration
ann_idx = np.where((xdata > start) & (xdata < end))[0]
for idx in sorted(ann_idx)[::-1]:
# only remove visible annotation spans
descr = inst.annotations[idx]['description']
if self.mne.visible_annotations[descr]:
inst.annotations.delete(idx)
self._remove_annotation_hover_line()
self._draw_annotations()
self.canvas.draw_idle()
elif event.inaxes == ax_main:
self._toggle_vline(False)
def _pick(self, event):
"""Handle matplotlib pick events."""
from matplotlib.text import Text
if self.mne.butterfly:
return
# clicked on channel name
if isinstance(event.artist, Text):
ch_name = event.artist.get_text()
ind = self.mne.ch_names[self.mne.picks].tolist().index(ch_name)
if event.mouseevent.button == 1: # left click
self._toggle_bad_channel(ind)
elif event.mouseevent.button == 3: # right click
self._create_ch_context_fig(ind)
def _new_child_figure(self, fig_name, **kwargs):
"""Instantiate a new MNE dialog figure (with event listeners)."""
fig = _figure(toolbar=False, parent_fig=self, fig_name=fig_name,
**kwargs)
fig._add_default_callbacks()
self.mne.child_figs.append(fig)
if isinstance(fig_name, str):
setattr(self.mne, fig_name, fig)
return fig
def _create_ch_context_fig(self, idx):
"""Show context figure; idx is index of **visible** channels."""
inst = self.mne.instance_type
pick = self.mne.picks[idx]
if inst == 'raw':
self._create_ch_location_fig(pick)
elif inst == 'ica':
self._create_ica_properties_fig(pick)
else:
self._create_epoch_image_fig(pick)
def _create_ch_location_fig(self, pick):
"""Show channel location figure."""
from .utils import _channel_type_prettyprint
ch_name = self.mne.ch_names[pick]
ch_type = self.mne.ch_types[pick]
if ch_type not in _DATA_CH_TYPES_SPLIT:
return
# create figure and axes
fig = self._new_child_figure(figsize=(4, 4), fig_name=None,
window_title=f'Location of {ch_name}')
ax = fig.add_subplot(111)
title = f'{ch_name} position ({_channel_type_prettyprint[ch_type]})'
_ = plot_sensors(self.mne.info, ch_type=ch_type, axes=ax,
title=title, kind='select')
# highlight desired channel & disable interactivity
inds = np.in1d(fig.lasso.ch_names, [ch_name])
fig.lasso.disconnect()
fig.lasso.alpha_other = 0.3
fig.lasso.linewidth_selected = 3
fig.lasso.style_sensors(inds)
plt_show(fig=fig)
def _create_ica_properties_fig(self, idx):
"""Show ICA properties for the selected component."""
ch_name = self.mne.ch_names[idx]
if ch_name not in self.mne.ica._ica_names: # for EOG chans: do nothing
return
pick = self.mne.ica._ica_names.index(ch_name)
fig = self._new_child_figure(figsize=(7, 6), fig_name=None,
window_title=f'{ch_name} properties')
fig, axes = _create_properties_layout(fig=fig)
if not hasattr(self.mne, 'data_ica_properties'):
# Precompute epoch sources only once
self.mne.data_ica_properties = _prepare_data_ica_properties(
self.mne.ica_inst, self.mne.ica)
_fast_plot_ica_properties(
self.mne.ica, self.mne.ica_inst, picks=pick, axes=axes,
precomputed_data=self.mne.data_ica_properties)
def _create_epoch_image_fig(self, pick):
"""Show epochs image for the selected channel."""
from matplotlib.gridspec import GridSpec
ch_name = self.mne.ch_names[pick]
fig = self._new_child_figure(figsize=(6, 4), fig_name=None,
window_title=f'Epochs image ({ch_name})')
gs = GridSpec(nrows=3, ncols=10)
fig.add_subplot(gs[:2, :9])
fig.add_subplot(gs[2, :9])
fig.add_subplot(gs[:2, 9])
plot_epochs_image(self.mne.inst, picks=pick, fig=fig)
def _toggle_epoch_histogram(self):
"""Show or hide peak-to-peak histogram of channel amplitudes."""
if self.mne.fig_histogram is None:
self._create_epoch_histogram()
plt_show(fig=self.mne.fig_histogram)
else:
from matplotlib.pyplot import close
close(self.mne.fig_histogram)
def _create_epoch_histogram(self):
"""Create peak-to-peak histogram of channel amplitudes."""
epochs = self.mne.inst
data = OrderedDict()
ptp = np.ptp(epochs.get_data(), axis=2)
for ch_type in ('eeg', 'mag', 'grad'):
if ch_type in epochs:
data[ch_type] = ptp.T[self.mne.ch_types == ch_type].ravel()
units = _handle_default('units')
titles = _handle_default('titles')
colors = _handle_default('color')
scalings = _handle_default('scalings')
title = 'Histogram of peak-to-peak amplitudes'
figsize = (4, 1 + 1.5 * len(data))
fig = self._new_child_figure(figsize=figsize, fig_name='fig_histogram',
window_title=title)
for ix, (_ch_type, _data) in enumerate(data.items()):
ax = fig.add_subplot(len(data), 1, ix + 1)
ax.set(title=titles[_ch_type], xlabel=units[_ch_type],
ylabel='Count')
# set histogram bin range based on rejection thresholds
reject = None
_range = None
if epochs.reject is not None and _ch_type in epochs.reject:
reject = epochs.reject[_ch_type] * scalings[_ch_type]
_range = (0., reject * 1.1)
# plot it
ax.hist(_data * scalings[_ch_type], bins=100,
color=colors[_ch_type], range=_range)
if reject is not None:
ax.plot((reject, reject), (0, ax.get_ylim()[1]), color='r')
# finalize
fig.suptitle(title, y=0.99)
kwargs = dict(bottom=fig._inch_to_rel(0.5, horiz=False),
top=1 - fig._inch_to_rel(0.5, horiz=False),
left=fig._inch_to_rel(0.75),
right=1 - fig._inch_to_rel(0.25))
fig.subplots_adjust(hspace=0.7, **kwargs)
self.mne.fig_histogram = fig
plt_show(fig=fig)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# HELP DIALOG
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _create_help_fig(self):
"""Create help dialog window."""
text = {key: val for key, val in self._get_help_text().items()
if val is not None}
keys = ''
vals = ''
for key, val in text.items():
newsection = '\n' if key.startswith('_') else ''
key = key[1:] if key.startswith('_') else key
newlines = '\n' * len(val.split('\n')) # handle multiline values
keys += f'{newsection}{key} {newlines}'
vals += f'{newsection}{val}\n'
# calc figure size
n_lines = len(keys.split('\n'))
longest_key = max(len(k) for k in text.keys())
longest_val = max(max(len(w) for w in v.split('\n')) if '\n' in v else
len(v) for v in text.values())
width = (longest_key + longest_val) / 12
height = (n_lines) / 5
# create figure and axes
fig = self._new_child_figure(figsize=(width, height),
fig_name='fig_help',
window_title='Help')
ax = fig.add_axes((0.01, 0.01, 0.98, 0.98))
ax.set_axis_off()
kwargs = dict(va='top', linespacing=1.5, usetex=False)
ax.text(0.42, 1, keys, ma='right', ha='right', **kwargs)
ax.text(0.42, 1, vals, ma='left', ha='left', **kwargs)
def _toggle_help_fig(self, event):
"""Show/hide the help dialog window."""
if self.mne.fig_help is None:
self._create_help_fig()
plt_show(fig=self.mne.fig_help)
else:
from matplotlib.pyplot import close
close(self.mne.fig_help)
def _get_help_text(self):
"""Generate help dialog text; `None`-valued entries removed later."""
inst = self.mne.instance_type
is_raw = inst == 'raw'
is_epo = inst == 'epochs'
is_ica = inst == 'ica'
has_proj = bool(len(self.mne.projs))
# adapt keys to different platforms
is_mac = platform.system() == 'Darwin'
dur_keys = ('fn + โ', 'fn + โ') if is_mac else ('Home', 'End')
ch_keys = ('fn + โ', 'fn + โ') if is_mac else ('Page up', 'Page down')
# adapt descriptions to different instance types
ch_cmp = 'component' if is_ica else 'channel'
ch_epo = 'epoch' if is_epo else 'channel'
ica_bad = 'Mark/unmark component for exclusion'
dur_vals = ([f'Show {n} epochs' for n in ('fewer', 'more')]
if self.mne.is_epochs else
[f'Show {d} time window' for d in ('shorter', 'longer')])
ch_vals = [f'{inc_dec} number of visible {ch_cmp}s' for inc_dec in
('Increase', 'Decrease')]
lclick_data = ica_bad if is_ica else f'Mark/unmark bad {ch_epo}'
lclick_name = (ica_bad if is_ica else 'Mark/unmark bad channel')
rclick_name = dict(ica='Show diagnostics for component',
epochs='Show imageplot for channel',
raw='Show channel location')[inst]
# TODO not yet implemented
# ldrag = ('Show spectrum plot for selected time span;\nor (in '
# 'annotation mode) add annotation') if inst== 'raw' else None
ldrag = 'add annotation (in annotation mode)' if is_raw else None
noise_cov = (None if self.mne.noise_cov is None else
'Toggle signal whitening')
scrl = '1 epoch' if self.mne.is_epochs else 'ยผ window'
# below, value " " is a hack to make "\n".split(value) have length 1
help_text = OrderedDict([
('_NAVIGATION', ' '),
('โ', f'Scroll {scrl} right (scroll full window with Shift + โ)'),
('โ', f'Scroll {scrl} left (scroll full window with Shift + โ)'),
(dur_keys[0], dur_vals[0]),
(dur_keys[1], dur_vals[1]),
('โ', f'Scroll up ({ch_cmp}s)'),
('โ', f'Scroll down ({ch_cmp}s)'),
(ch_keys[0], ch_vals[0]),
(ch_keys[1], ch_vals[1]),
('_SIGNAL TRANSFORMATIONS', ' '),
('+ or =', 'Increase signal scaling'),
('-', 'Decrease signal scaling'),
('b', 'Toggle butterfly mode' if not is_ica else None),
('d', 'Toggle DC removal' if is_raw else None),
('w', noise_cov),
('_USER INTERFACE', ' '),
('a', 'Toggle annotation mode' if is_raw else None),
('h', 'Toggle peak-to-peak histogram' if is_epo else None),
('j', 'Toggle SSP projector window' if has_proj else None),
('shift+j', 'Toggle all SSPs'),
('p', 'Toggle draggable annotations' if is_raw else None),
('s', 'Toggle scalebars' if not is_ica else None),
('z', 'Toggle scrollbars'),
('F11', 'Toggle fullscreen' if not is_mac else None),
('?', 'Open this help window'),
('esc', 'Close focused figure or dialog window'),
('_MOUSE INTERACTION', ' '),
(f'Left-click {ch_cmp} name', lclick_name),
(f'Left-click {ch_cmp} data', lclick_data),
('Left-click-and-drag on plot', ldrag),
('Left-click on plot background', 'Place vertical guide'),
('Right-click on plot background', 'Clear vertical guide'),
('Right-click on channel name', rclick_name)
])
return help_text
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# ANNOTATIONS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _create_annotation_fig(self):
"""Create the annotation dialog window."""
from matplotlib.widgets import Button, SpanSelector, CheckButtons
from mpl_toolkits.axes_grid1.axes_size import Fixed
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
# make figure
labels = np.array(sorted(set(self.mne.inst.annotations.description)))
radio_button_h = self._compute_annotation_figsize(len(labels))
figsize = (ANNOTATION_FIG_W, ANNOTATION_FIG_MIN_H + radio_button_h)
fig = self._new_child_figure(figsize=figsize,
FigureClass=MNEAnnotationFigure,
fig_name='fig_annotation',
window_title='Annotations')
# make main axes
left = fig._inch_to_rel(ANNOTATION_FIG_PAD)
bottom = fig._inch_to_rel(ANNOTATION_FIG_PAD, horiz=False)
width = 1 - 2 * left
height = 1 - 2 * bottom
fig.mne.radio_ax = fig.add_axes((left, bottom, width, height),
frame_on=False, aspect='equal')
div = make_axes_locatable(fig.mne.radio_ax)
# append show/hide checkboxes at right
fig.mne.show_hide_ax = div.append_axes(
position='right', size=Fixed(ANNOTATION_FIG_CHECKBOX_COLUMN_W),
pad=Fixed(ANNOTATION_FIG_PAD), aspect='equal',
sharey=fig.mne.radio_ax)
# populate w/ radio buttons & labels
self._update_annotation_fig()
# append instructions at top
instructions_ax = div.append_axes(position='top', size=Fixed(1),
pad=Fixed(5 * ANNOTATION_FIG_PAD))
# XXX when we support a newer matplotlib (something >3.0) the
# instructions can have inline bold formatting:
# instructions = '\n'.join(
# [r'$\mathbf{Leftโclick~&~drag~on~plot:}$ create/modify annotation', # noqa E501
# r'$\mathbf{Rightโclick~on~plot~annotation:}$ delete annotation',
# r'$\mathbf{Type~in~annotation~window:}$ modify new label name',
# r'$\mathbf{Enter~(or~click~button):}$ add new label to list',
# r'$\mathbf{Esc:}$ exit annotation mode & close this window'])
instructions = '\n'.join(
['Left click & drag on plot: create/modify annotation',
'Right click on annotation highlight: delete annotation',
'Type in this window: modify new label name',
'Enter (or click button): add new label to list',
'Esc: exit annotation mode & close this dialog window'])
instructions_ax.text(0, 1, instructions, va='top', ha='left',
linespacing=1.7,
usetex=False) # force use of MPL mathtext parser
instructions_ax.set_axis_off()
# append text entry axes at bottom
text_entry_ax = div.append_axes(position='bottom',
size=Fixed(3 * ANNOTATION_FIG_PAD),
pad=Fixed(ANNOTATION_FIG_PAD))
text_entry_ax.text(0.4, 0.5, 'New label:', va='center', ha='right',
weight='bold')
fig.label = text_entry_ax.text(0.5, 0.5, 'BAD_', va='center',
ha='left')
text_entry_ax.set_axis_off()
# append button at bottom
button_ax = div.append_axes(position='bottom',
size=Fixed(3 * ANNOTATION_FIG_PAD),
pad=Fixed(ANNOTATION_FIG_PAD))
fig.button = Button(button_ax, 'Add new label')
fig.button.on_clicked(self._add_annotation_label)
plt_show(fig=fig)
# add "draggable" checkbox
drag_ax_height = 3 * ANNOTATION_FIG_PAD
drag_ax = div.append_axes('bottom', size=Fixed(drag_ax_height),
pad=Fixed(ANNOTATION_FIG_PAD),
aspect='equal')
checkbox = CheckButtons(drag_ax, labels=('Draggable edges?',),
actives=(self.mne.draggable_annotations,))
checkbox.on_clicked(self._toggle_draggable_annotations)
fig.mne.drag_checkbox = checkbox
# reposition & resize axes
width_in, height_in = fig.get_size_inches()
width_ax = fig._inch_to_rel(width_in
- ANNOTATION_FIG_CHECKBOX_COLUMN_W
- 3 * ANNOTATION_FIG_PAD)
aspect = width_ax / fig._inch_to_rel(drag_ax_height)
drag_ax.set_xlim(0, aspect)
drag_ax.set_axis_off()
# reposition & resize checkbox & label
rect = checkbox.rectangles[0]
_pad, _size = (0.2, 0.6)
rect.set_bounds(_pad, _pad, _size, _size)
lines = checkbox.lines[0]
for line, direction in zip(lines, (1, -1)):
line.set_xdata((_pad, _pad + _size)[::direction])
line.set_ydata((_pad, _pad + _size))
text = checkbox.labels[0]
text.set(position=(3 * _pad + _size, 0.45), va='center')
for artist in lines + (rect, text):
artist.set_transform(drag_ax.transData)
# setup interactivity in plot window
col = ('#ff0000' if len(fig.mne.radio_ax.buttons.circles) < 1 else
fig.mne.radio_ax.buttons.circles[0].get_edgecolor())
# TODO: we would like useblit=True here, but it behaves oddly when the
# first span is dragged (subsequent spans seem to work OK)
selector = SpanSelector(self.mne.ax_main, self._select_annotation_span,
'horizontal', minspan=0.1, useblit=False,
rectprops=dict(alpha=0.5, facecolor=col))
self.mne.ax_main.selector = selector
self.mne._callback_ids['motion_notify_event'] = \
self.canvas.mpl_connect('motion_notify_event', self._hover)
def _toggle_visible_annotations(self, event):
"""Enable/disable display of annotations on a per-label basis."""
checkboxes = self.mne.show_hide_annotation_checkboxes
labels = [t.get_text() for t in checkboxes.labels]
actives = checkboxes.get_status()
self.mne.visible_annotations = dict(zip(labels, actives))
self._redraw(update_data=False, annotations=True)
def _toggle_draggable_annotations(self, event):
"""Enable/disable draggable annotation edges."""
self.mne.draggable_annotations = not self.mne.draggable_annotations
def _get_annotation_labels(self):
"""Get the unique labels in the raw object and added in the UI."""
return sorted(set(self.mne.inst.annotations.description) |
set(self.mne.new_annotation_labels))
def _update_annotation_fig(self):
"""Draw or redraw the radio buttons and annotation labels."""
from matplotlib.widgets import RadioButtons, CheckButtons
# define shorthand variables
fig = self.mne.fig_annotation
ax = fig.mne.radio_ax
labels = self._get_annotation_labels()
# compute new figsize
radio_button_h = self._compute_annotation_figsize(len(labels))
fig.set_size_inches(ANNOTATION_FIG_W,
ANNOTATION_FIG_MIN_H + radio_button_h,
forward=True)
# populate center axes with labels & radio buttons
ax.clear()
title = 'Existing labels:' if len(labels) else 'No existing labels'
ax.set_title(title, size=None, loc='left')
ax.buttons = RadioButtons(ax, labels)
# adjust xlim to keep equal aspect & full width (keep circles round)
aspect = (ANNOTATION_FIG_W - ANNOTATION_FIG_CHECKBOX_COLUMN_W
- 3 * ANNOTATION_FIG_PAD) / radio_button_h
ax.set_xlim((0, aspect))
# style the buttons & adjust spacing
radius = 0.15
circles = ax.buttons.circles
for circle, label in zip(circles, ax.buttons.labels):
circle.set_transform(ax.transData)
center = ax.transData.inverted().transform(
ax.transAxes.transform((0.1, 0)))
circle.set_center((center[0], circle.center[1]))
circle.set_edgecolor(
self.mne.annotation_segment_colors[label.get_text()])
circle.set_linewidth(4)
circle.set_radius(radius / len(labels))
# style the selected button
if len(labels):
fig._set_active_button(0)
# add event listeners
ax.buttons.disconnect_events() # clear MPL default listeners
ax.buttons.on_clicked(fig._radiopress)
ax.buttons.connect_event('button_press_event', fig._click_override)
# now do the show/hide checkboxes
show_hide_ax = fig.mne.show_hide_ax
show_hide_ax.clear()
show_hide_ax.set_axis_off()
aspect = ANNOTATION_FIG_CHECKBOX_COLUMN_W / radio_button_h
show_hide_ax.set(xlim=(0, aspect), ylim=(0, 1))
# ensure new labels have checkbox values
check_values = {label: False for label in labels}
check_values.update(self.mne.visible_annotations) # existing checks
actives = [check_values[label] for label in labels]
# regenerate checkboxes
checkboxes = CheckButtons(ax=fig.mne.show_hide_ax,
labels=labels,
actives=actives)
checkboxes.on_clicked(self._toggle_visible_annotations)
# add title, hide labels
show_hide_ax.set_title('show/\nhide ', size=None, loc='right')
for label in checkboxes.labels:
label.set_visible(False)
# fix aspect and right-align
if len(labels) == 1:
bounds = (0.05, 0.375, 0.25, 0.25) # undo MPL special case
checkboxes.rectangles[0].set_bounds(bounds)
for line, step in zip(checkboxes.lines[0], (1, -1)):
line.set_xdata((bounds[0], bounds[0] + bounds[2]))
line.set_ydata((bounds[1], bounds[1] + bounds[3])[::step])
for rect in checkboxes.rectangles:
rect.set_transform(show_hide_ax.transData)
bbox = rect.get_bbox()
bounds = (aspect, bbox.ymin, -bbox.width, bbox.height)
rect.set_bounds(bounds)
rect.set_clip_on(False)
for line in np.array(checkboxes.lines).ravel():
line.set_transform(show_hide_ax.transData)
line.set_xdata(aspect + 0.05 - np.array(line.get_xdata()))
# store state
self.mne.visible_annotations = check_values
self.mne.show_hide_annotation_checkboxes = checkboxes
def _toggle_annotation_fig(self):
"""Show/hide the annotation dialog window."""
if self.mne.fig_annotation is None:
self._create_annotation_fig()
else:
from matplotlib.pyplot import close
close(self.mne.fig_annotation)
def _compute_annotation_figsize(self, n_labels):
"""Adapt size of Annotation UI to accommodate the number of buttons.
self._create_annotation_fig() implements the following:
Fixed part of height:
0.1 top margin
1.0 instructions
0.5 padding below instructions
--- (variable-height axis for label list, returned by this method)
0.1 padding above text entry
0.3 text entry
0.1 padding above button
0.3 button
0.1 padding above checkbox
0.3 checkbox
0.1 bottom margin
------------------------------------------
2.9 total fixed height
"""
return max(ANNOTATION_FIG_PAD, 0.7 * n_labels)
def _add_annotation_label(self, event):
"""Add new annotation description."""
text = self.mne.fig_annotation.label.get_text()
self.mne.new_annotation_labels.append(text)
self._setup_annotation_colors()
self._update_annotation_fig()
# automatically activate new label's radio button
idx = [label.get_text() for label in
self.mne.fig_annotation.mne.radio_ax.buttons.labels].index(text)
self.mne.fig_annotation._set_active_button(idx)
# simulate a click on the radiobutton โ update the span selector color
self.mne.fig_annotation._radiopress(event=None)
# reset the text entry box's text
self.mne.fig_annotation.label.set_text('BAD_')
def _setup_annotation_colors(self):
"""Set up colors for annotations; init some annotation vars."""
segment_colors = getattr(self.mne, 'annotation_segment_colors', dict())
labels = self._get_annotation_labels()
colors, red = _get_color_list(annotations=True)
color_cycle = cycle(colors)
for key, color in segment_colors.items():
if color != red and key in labels:
next(color_cycle)
for idx, key in enumerate(labels):
if key in segment_colors:
continue
elif key.lower().startswith('bad') or \
key.lower().startswith('edge'):
segment_colors[key] = red
else:
segment_colors[key] = next(color_cycle)
self.mne.annotation_segment_colors = segment_colors
# init a couple other annotation-related variables
self.mne.visible_annotations = {label: True for label in labels}
self.mne.show_hide_annotation_checkboxes = None
def _select_annotation_span(self, vmin, vmax):
"""Handle annotation span selector."""
onset = _sync_onset(self.mne.inst, vmin, True) - self.mne.first_time
duration = vmax - vmin
buttons = self.mne.fig_annotation.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
active_idx = labels.index(buttons.value_selected)
_merge_annotations(onset, onset + duration, labels[active_idx],
self.mne.inst.annotations)
# if adding a span with an annotation label that is hidden, show it
if not self.mne.visible_annotations[buttons.value_selected]:
self.mne.show_hide_annotation_checkboxes.set_active(active_idx)
self._redraw(update_data=False, annotations=True)
def _remove_annotation_hover_line(self):
"""Remove annotation line from the plot and reactivate selector."""
if self.mne.annotation_hover_line is not None:
self.mne.annotation_hover_line.remove()
self.mne.annotation_hover_line = None
self.mne.ax_main.selector.active = True
self.canvas.draw()
def _modify_annotation(self, old_x, new_x):
"""Modify annotation."""
segment = np.array(np.where(self.mne.annotation_segments == old_x))
if segment.shape[1] == 0:
return
raw = self.mne.inst
annotations = raw.annotations
first_time = self.mne.first_time
idx = [segment[0][0], segment[1][0]]
onset = _sync_onset(raw, self.mne.annotation_segments[idx[0]][0], True)
ann_idx = np.where(annotations.onset == onset - first_time)[0]
if idx[1] == 0: # start of annotation
onset = _sync_onset(raw, new_x, True) - first_time
duration = annotations.duration[ann_idx] + old_x - new_x
else: # end of annotation
onset = annotations.onset[ann_idx]
duration = _sync_onset(raw, new_x, True) - onset - first_time
if duration < 0:
onset += duration
duration *= -1.
_merge_annotations(onset, onset + duration,
annotations.description[ann_idx],
annotations, ann_idx)
self._draw_annotations()
self._remove_annotation_hover_line()
self.canvas.draw_idle()
def _clear_annotations(self):
"""Clear all annotations from the figure."""
for annot in list(self.mne.annotations):
annot.remove()
self.mne.annotations.remove(annot)
for annot in list(self.mne.hscroll_annotations):
annot.remove()
self.mne.hscroll_annotations.remove(annot)
for text in list(self.mne.annotation_texts):
text.remove()
self.mne.annotation_texts.remove(text)
def _draw_annotations(self):
"""Draw (or redraw) the annotation spans."""
self._clear_annotations()
self._update_annotation_segments()
segments = self.mne.annotation_segments
times = self.mne.times
ax = self.mne.ax_main
ylim = ax.get_ylim()
for idx, (start, end) in enumerate(segments):
descr = self.mne.inst.annotations.description[idx]
segment_color = self.mne.annotation_segment_colors[descr]
kwargs = dict(color=segment_color, alpha=0.3,
zorder=self.mne.zorder['ann'])
if self.mne.visible_annotations[descr]:
# draw all segments on ax_hscroll
annot = self.mne.ax_hscroll.fill_betweenx((0, 1), start, end,
**kwargs)
self.mne.hscroll_annotations.append(annot)
# draw only visible segments on ax_main
visible_segment = np.clip([start, end], times[0], times[-1])
if np.diff(visible_segment) > 0:
annot = ax.fill_betweenx(ylim, *visible_segment, **kwargs)
self.mne.annotations.append(annot)
xy = (visible_segment.mean(), ylim[1])
text = ax.annotate(descr, xy, xytext=(0, 9),
textcoords='offset points', ha='center',
va='baseline', color=segment_color)
self.mne.annotation_texts.append(text)
def _update_annotation_segments(self):
"""Update the array of annotation start/end times."""
segments = list()
raw = self.mne.inst
if len(raw.annotations):
for idx, annot in enumerate(raw.annotations):
annot_start = _sync_onset(raw, annot['onset'])
annot_end = annot_start + max(annot['duration'],
1 / self.mne.info['sfreq'])
segments.append((annot_start, annot_end))
self.mne.annotation_segments = np.array(segments)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CHANNEL SELECTION GUI
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _create_selection_fig(self):
"""Create channel selection dialog window."""
from matplotlib.colors import to_rgb
from matplotlib.widgets import RadioButtons
from matplotlib.gridspec import GridSpec
# make figure
fig = self._new_child_figure(figsize=(3, 7),
FigureClass=MNESelectionFigure,
fig_name='fig_selection',
window_title='Channel selection')
# XXX when matplotlib 3.3 is min version, replace this with
# XXX gs = fig.add_gridspec(15, 1)
gs = GridSpec(nrows=15, ncols=1)
# add sensor plot at top
fig.mne.sensor_ax = fig.add_subplot(gs[:5])
plot_sensors(self.mne.info, kind='select', ch_type='all', title='',
axes=fig.mne.sensor_ax, ch_groups=self.mne.group_by,
show=False)
fig.subplots_adjust(bottom=0.01, top=0.99, left=0.01, right=0.99)
# style the sensors so the selection is easier to distinguish
fig.lasso.linewidth_selected = 2
self._update_highlighted_sensors()
# add radio button axes
radio_ax = fig.add_subplot(gs[5:-3], frame_on=False, aspect='equal')
fig.mne.radio_ax = radio_ax
selections_dict = self.mne.ch_selections
selections_dict.update(Custom=np.array([], dtype=int)) # for lasso
labels = list(selections_dict)
# make & style the radio buttons
activecolor = to_rgb(self.mne.fgcolor) + (0.5,)
radio_ax.buttons = RadioButtons(radio_ax, labels,
activecolor=activecolor)
fig.mne.old_selection = 0
for circle in radio_ax.buttons.circles:
circle.set_radius(0.25 / len(labels))
circle.set_linewidth(2)
circle.set_edgecolor(self.mne.fgcolor)
fig._style_radio_buttons_butterfly()
# add instructions at bottom
instructions = (
'To use a custom selection, first click-drag on the sensor plot '
'to "lasso" the sensors you want to select, or hold Ctrl while '
'clicking individual sensors. Holding Ctrl while click-dragging '
'allows a lasso selection adding to (rather than replacing) the '
'existing selection.')
instructions_ax = fig.add_subplot(gs[-3:], frame_on=False)
instructions_ax.text(0.04, 0.08, instructions, va='bottom', ha='left',
ma='left', wrap=True)
instructions_ax.set_axis_off()
# add event listeners
radio_ax.buttons.on_clicked(fig._radiopress)
fig.canvas.mpl_connect('lasso_event', fig._set_custom_selection)
def _change_selection_vscroll(self, event):
"""Handle clicks on vertical scrollbar when using selections."""
buttons = self.mne.fig_selection.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
offset = 0
selections_dict = self.mne.ch_selections
for idx, label in enumerate(labels):
offset += len(selections_dict[label])
if event.ydata < offset:
with _events_off(buttons):
buttons.set_active(idx)
self.mne.fig_selection._radiopress(event)
return
def _update_selection(self):
"""Update visible channels based on selection dialog interaction."""
selections_dict = self.mne.ch_selections
fig = self.mne.fig_selection
buttons = fig.mne.radio_ax.buttons
label = buttons.value_selected
labels = [_label.get_text() for _label in buttons.labels]
self.mne.fig_selection.mne.old_selection = labels.index(label)
self.mne.picks = selections_dict[label]
self.mne.n_channels = len(self.mne.picks)
self._update_highlighted_sensors()
# if "Vertex" is defined, some channels appear twice, so if
# "Vertex" is selected, ch_start should be the *first* match;
# otherwise it should be the *last* match (since "Vertex" is
# always the first selection group, if it exists).
index = 0 if label == 'Vertex' else -1
ch_order = np.concatenate(list(selections_dict.values()))
ch_start = np.where(ch_order == self.mne.picks[0])[0][index]
self.mne.ch_start = ch_start
self._update_trace_offsets()
self._update_vscroll()
self._redraw(annotations=True)
def _make_butterfly_selections_dict(self):
"""Make an altered copy of the selections dict for butterfly mode."""
from ..utils import _get_stim_channel
selections_dict = deepcopy(self.mne.ch_selections)
# remove potential duplicates
for selection_group in ('Vertex', 'Custom'):
selections_dict.pop(selection_group, None)
# if present, remove stim channel from non-misc selection groups
stim_ch = _get_stim_channel(None, self.mne.info, raise_error=False)
if len(stim_ch):
stim_pick = self.mne.ch_names.tolist().index(stim_ch[0])
for _sel, _picks in selections_dict.items():
if _sel != 'Misc':
stim_mask = np.in1d(_picks, [stim_pick], invert=True)
selections_dict[_sel] = np.array(_picks)[stim_mask]
return selections_dict
def _update_highlighted_sensors(self):
"""Update the sensor plot to show what is selected."""
inds = np.in1d(self.mne.fig_selection.lasso.ch_names,
self.mne.ch_names[self.mne.picks]).nonzero()[0]
self.mne.fig_selection.lasso.select_many(inds)
def _update_bad_sensors(self, pick, mark_bad):
"""Update the sensor plot to reflect (un)marked bad channels."""
# replicate plotting order from plot_sensors(), to get index right
sensor_picks = list()
ch_indices = channel_indices_by_type(self.mne.info)
for this_type in _DATA_CH_TYPES_SPLIT:
if this_type in self.mne.ch_types:
sensor_picks.extend(ch_indices[this_type])
sensor_idx = np.in1d(sensor_picks, pick).nonzero()[0]
# change the sensor color
fig = self.mne.fig_selection
fig.lasso.ec[sensor_idx, 0] = float(mark_bad) # change R of RGBA array
fig.lasso.collection.set_edgecolors(fig.lasso.ec)
fig.canvas.draw_idle()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# PROJECTORS & BAD CHANNELS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _create_proj_fig(self):
"""Create the projectors dialog window."""
from matplotlib.widgets import Button, CheckButtons
projs = self.mne.projs
labels = [p['desc'] for p in projs]
for ix, active in enumerate(self.mne.projs_active):
if active:
labels[ix] += ' (already applied)'
# make figure
width = max([4.5, max([len(label) for label in labels]) / 8 + 0.5])
height = (len(projs) + 1) / 6 + 1.5
fig = self._new_child_figure(figsize=(width, height),
fig_name='fig_proj',
window_title='SSP projection vectors')
# pass through some proj fig keypresses to the parent
fig.canvas.mpl_connect(
'key_press_event',
lambda ev: self._keypress(ev) if ev.key in 'jJ' else None)
# make axes
offset = (1 / 6 / height)
position = (0, offset, 1, 0.8 - offset)
ax = fig.add_axes(position, frame_on=False, aspect='equal')
# make title
first_line = ('Projectors already applied to the data are dimmed.\n'
if any(self.mne.projs_active) else '')
second_line = 'Projectors marked with "X" are active on the plot.'
ax.set_title(f'{first_line}{second_line}')
# draw checkboxes
checkboxes = CheckButtons(ax, labels=labels, actives=self.mne.projs_on)
# gray-out already applied projectors
for label, rect, lines in zip(checkboxes.labels,
checkboxes.rectangles,
checkboxes.lines):
if label.get_text().endswith('(already applied)'):
label.set_color('0.5')
rect.set_edgecolor('0.7')
[x.set_color('0.7') for x in lines]
rect.set_linewidth(1)
# add "toggle all" button
ax_all = fig.add_axes((0.25, 0.01, 0.5, offset), frame_on=True)
fig.mne.proj_all = Button(ax_all, 'Toggle all')
# add event listeners
checkboxes.on_clicked(self._toggle_proj_checkbox)
fig.mne.proj_all.on_clicked(partial(self._toggle_proj_checkbox,
toggle_all=True))
# save params
fig.mne.proj_checkboxes = checkboxes
# show figure
self.mne.fig_proj.canvas.draw()
plt_show(fig=self.mne.fig_proj, warn=False)
def _toggle_proj_fig(self, event=None):
"""Show/hide the projectors dialog window."""
if self.mne.fig_proj is None:
self._create_proj_fig()
else:
from matplotlib.pyplot import close
close(self.mne.fig_proj)
def _toggle_proj_checkbox(self, event, toggle_all=False):
"""Perform operations when proj boxes clicked."""
on = self.mne.projs_on
applied = self.mne.projs_active
fig = self.mne.fig_proj
new_state = (np.full_like(on, not all(on)) if toggle_all else
np.array(fig.mne.proj_checkboxes.get_status()))
# update Xs when toggling all
if fig is not None:
if toggle_all:
with _events_off(fig.mne.proj_checkboxes):
for ix in np.where(on != new_state)[0]:
fig.mne.proj_checkboxes.set_active(ix)
# don't allow disabling already-applied projs
with _events_off(fig.mne.proj_checkboxes):
for ix in np.where(applied)[0]:
if not new_state[ix]:
fig.mne.proj_checkboxes.set_active(ix)
new_state[applied] = True
# update the data if necessary
if not np.array_equal(on, new_state):
self.mne.projs_on = new_state
self._update_projector()
self._redraw()
def _update_projector(self):
"""Update the data after projectors (or bads) have changed."""
inds = np.where(self.mne.projs_on)[0] # doesn't include "active" projs
# copy projs from full list (self.mne.projs) to info object
self.mne.info['projs'] = [deepcopy(self.mne.projs[ix]) for ix in inds]
# compute the projection operator
proj, wh_chs = _setup_plot_projector(self.mne.info, self.mne.noise_cov,
True, self.mne.use_noise_cov)
self.mne.whitened_ch_names = list(wh_chs)
self.mne.projector = proj
def _toggle_bad_channel(self, idx):
"""Mark/unmark bad channels; `idx` is index of *visible* channels."""
pick = self.mne.picks[idx]
ch_name = self.mne.ch_names[pick]
# add/remove from bads list
bads = self.mne.info['bads']
marked_bad = ch_name not in bads
if marked_bad:
bads.append(ch_name)
color = self.mne.ch_color_bad
else:
while ch_name in bads: # to make sure duplicates are removed
bads.remove(ch_name)
color = self.mne.ch_colors[idx]
self.mne.info['bads'] = bads
# update sensor color (if in selection mode)
if self.mne.fig_selection is not None:
self._update_bad_sensors(pick, marked_bad)
# update vscroll color
vscroll_idx = (self.mne.ch_order == pick).nonzero()[0]
for _idx in vscroll_idx:
self.mne.ax_vscroll.patches[_idx].set_color(color)
# redraw
self._update_projector()
self._redraw()
def _toggle_bad_epoch(self, event):
"""Mark/unmark bad epochs."""
epoch_num = self._get_epoch_num_from_time(event.xdata)
epoch_ix = self.mne.inst.selection.tolist().index(epoch_num)
if epoch_num in self.mne.bad_epochs:
self.mne.bad_epochs.remove(epoch_num)
color = 'none'
else:
self.mne.bad_epochs.append(epoch_num)
self.mne.bad_epochs.sort()
color = self.mne.epoch_color_bad
self.mne.ax_hscroll.patches[epoch_ix].set_color(color)
self._redraw(update_data=False)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SCROLLBARS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _update_zen_mode_offsets(self):
"""Compute difference between main axes edges and scrollbar edges."""
self.mne.fig_size_px = self._get_size_px()
self.mne.zen_w = (self.mne.ax_vscroll.get_position().xmax -
self.mne.ax_main.get_position().xmax)
self.mne.zen_h = (self.mne.ax_main.get_position().ymin -
self.mne.ax_hscroll.get_position().ymin)
def _toggle_scrollbars(self):
"""Show or hide scrollbars (A.K.A. zen mode)."""
self._update_zen_mode_offsets()
# grow/shrink main axes to take up space from (or make room for)
# scrollbars. We can't use ax.set_position() because axes are
# locatable, so we use subplots_adjust
should_show = not self.mne.scrollbars_visible
margins = {side: getattr(self.subplotpars, side)
for side in ('left', 'bottom', 'right', 'top')}
# if should_show, bottom margin moves up; right margin moves left
margins['bottom'] += (1 if should_show else -1) * self.mne.zen_h
margins['right'] += (-1 if should_show else 1) * self.mne.zen_w
self.subplots_adjust(**margins)
# handle x-axis label
self.mne.zen_xlabel.set_visible(not should_show)
# show/hide other UI elements
for elem in ('ax_hscroll', 'ax_vscroll', 'ax_proj', 'ax_help'):
if elem == 'ax_vscroll' and self.mne.butterfly:
continue
# sometimes we don't have a proj button (ax_proj)
if getattr(self.mne, elem, None) is not None:
getattr(self.mne, elem).set_visible(should_show)
self.mne.scrollbars_visible = should_show
def _update_vscroll(self):
"""Update the vertical scrollbar (channel) selection indicator."""
self.mne.vsel_patch.set_xy((0, self.mne.ch_start))
self.mne.vsel_patch.set_height(self.mne.n_channels)
self._update_yaxis_labels()
def _update_hscroll(self):
"""Update the horizontal scrollbar (time) selection indicator."""
self.mne.hsel_patch.set_xy((self.mne.t_start, 0))
self.mne.hsel_patch.set_width(self.mne.duration)
def _check_update_hscroll_clicked(self, event):
"""Handle clicks on horizontal scrollbar."""
time = event.xdata - self.mne.duration / 2
max_time = (self.mne.n_times / self.mne.info['sfreq'] +
self.mne.first_time - self.mne.duration)
time = np.clip(time, self.mne.first_time, max_time)
if self.mne.is_epochs:
ix = np.searchsorted(self.mne.boundary_times[1:], time)
time = self.mne.boundary_times[ix]
if self.mne.t_start != time:
self.mne.t_start = time
self._update_hscroll()
return True
return False
def _check_update_vscroll_clicked(self, event):
"""Update vscroll patch on click, return True if location changed."""
new_ch_start = np.clip(
int(round(event.ydata - self.mne.n_channels / 2)),
0, len(self.mne.ch_order) - self.mne.n_channels)
if self.mne.ch_start != new_ch_start:
self.mne.ch_start = new_ch_start
self._update_picks()
self._update_vscroll()
return True
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SCALEBARS & Y-AXIS LABELS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _show_scalebars(self):
"""Add channel scale bars."""
for offset, pick in zip(self.mne.trace_offsets, self.mne.picks):
this_name = self.mne.ch_names[pick]
this_type = self.mne.ch_types[pick]
if (this_type not in self.mne.scalebars and
this_type != 'stim' and
this_type in self.mne.scalings and
this_type in getattr(self.mne, 'units', {}) and
this_type in getattr(self.mne, 'unit_scalings', {}) and
this_name not in self.mne.info['bads'] and
this_name not in self.mne.whitened_ch_names):
x = (self.mne.times[0] + self.mne.first_time,) * 2
denom = 4 if self.mne.butterfly else 2
y = tuple(np.array([-1, 1]) / denom + offset)
self._draw_one_scalebar(x, y, this_type)
def _hide_scalebars(self):
"""Remove channel scale bars."""
for bar in self.mne.scalebars.values():
bar.remove()
for text in self.mne.scalebar_texts.values():
text.remove()
self.mne.scalebars = dict()
self.mne.scalebar_texts = dict()
def _toggle_scalebars(self, event):
"""Show/hide the scalebars."""
if self.mne.scalebars_visible:
self._hide_scalebars()
else:
self._update_picks()
self._show_scalebars()
# toggle
self.mne.scalebars_visible = not self.mne.scalebars_visible
self._redraw(update_data=False)
def _draw_one_scalebar(self, x, y, ch_type):
"""Draw a scalebar."""
from .utils import _simplify_float
color = '#AA3377' # purple
kwargs = dict(color=color, zorder=self.mne.zorder['scalebar'])
scaler = 1 if self.mne.butterfly else 2
inv_norm = (scaler *
self.mne.scalings[ch_type] *
self.mne.unit_scalings[ch_type] /
self.mne.scale_factor)
bar = self.mne.ax_main.plot(x, y, lw=4, **kwargs)[0]
label = f'{_simplify_float(inv_norm)} {self.mne.units[ch_type]} '
text = self.mne.ax_main.text(x[1], y[1], label, va='baseline',
ha='right', size='xx-small', **kwargs)
self.mne.scalebars[ch_type] = bar
self.mne.scalebar_texts[ch_type] = text
def _update_yaxis_labels(self):
"""Change the y-axis labels."""
if self.mne.butterfly and self.mne.fig_selection is not None:
exclude = ('Vertex', 'Custom')
ticklabels = list(self.mne.ch_selections)
keep_mask = np.in1d(ticklabels, exclude, invert=True)
ticklabels = [t.replace('Left-', 'L-').replace('Right-', 'R-')
for t in ticklabels] # avoid having to rotate labels
ticklabels = np.array(ticklabels)[keep_mask]
elif self.mne.butterfly:
_, ixs, _ = np.intersect1d(_DATA_CH_TYPES_ORDER_DEFAULT,
self.mne.ch_types, return_indices=True)
ixs.sort()
ticklabels = np.array(_DATA_CH_TYPES_ORDER_DEFAULT)[ixs]
else:
ticklabels = self.mne.ch_names[self.mne.picks]
texts = self.mne.ax_main.set_yticklabels(ticklabels, picker=True)
for text in texts:
sty = ('italic' if text.get_text() in self.mne.whitened_ch_names
else 'normal')
text.set_style(sty)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DATA TRACES
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _toggle_butterfly(self):
"""Enter or leave butterfly mode."""
self.mne.ax_vscroll.set_visible(self.mne.butterfly)
self.mne.butterfly = not self.mne.butterfly
self.mne.scale_factor *= 0.5 if self.mne.butterfly else 2.
self._update_picks()
self._update_trace_offsets()
self._redraw(annotations=True)
if self.mne.fig_selection is not None:
self.mne.fig_selection._style_radio_buttons_butterfly()
def _update_picks(self):
"""Compute which channel indices to show."""
if self.mne.butterfly and self.mne.ch_selections is not None:
selections_dict = self._make_butterfly_selections_dict()
self.mne.picks = np.concatenate(tuple(selections_dict.values()))
elif self.mne.butterfly:
self.mne.picks = np.arange(self.mne.ch_names.shape[0])
else:
_slice = slice(self.mne.ch_start,
self.mne.ch_start + self.mne.n_channels)
self.mne.picks = self.mne.ch_order[_slice]
self.mne.n_channels = len(self.mne.picks)
def _get_epoch_num_from_time(self, time):
epoch_nums = self.mne.inst.selection
return epoch_nums[np.searchsorted(self.mne.boundary_times[1:], time)]
def _load_data(self, start=None, stop=None):
"""Retrieve the bit of data we need for plotting."""
if 'raw' in (self.mne.instance_type, self.mne.ica_type):
return self.mne.inst[:, start:stop]
else:
ix = np.searchsorted(self.mne.boundary_times, self.mne.t_start)
item = slice(ix, ix + self.mne.n_epochs)
data = np.concatenate(self.mne.inst.get_data(item=item), axis=-1)
times = np.arange(len(self.mne.inst) * len(self.mne.inst.times)
)[start:stop] / self.mne.info['sfreq']
return data, times
def _update_data(self):
"""Update self.mne.data after user interaction."""
from ..filter import _overlap_add_filter, _filtfilt
# update time
start_sec = self.mne.t_start - self.mne.first_time
stop_sec = start_sec + self.mne.duration
if self.mne.is_epochs:
start, stop = np.round(np.array([start_sec, stop_sec])
* self.mne.info['sfreq']).astype(int)
else:
start, stop = self.mne.inst.time_as_index((start_sec, stop_sec))
# get the data
data, times = self._load_data(start, stop)
# apply projectors
if self.mne.projector is not None:
data = self.mne.projector @ data
# get only the channels we're displaying
picks = self.mne.picks
data = data[picks]
# remove DC
if self.mne.remove_dc:
data -= data.mean(axis=1, keepdims=True)
# filter (with same defaults as raw.filter())
if self.mne.filter_coefs is not None:
starts, stops = self.mne.filter_bounds
mask = (starts < stop) & (stops > start)
starts = np.maximum(starts[mask], start) - start
stops = np.minimum(stops[mask], stop) - start
for _start, _stop in zip(starts, stops):
_picks = np.where(np.in1d(picks, self.mne.picks_data))[0]
if len(_picks) == 0:
break
this_data = data[_picks, _start:_stop]
if isinstance(self.mne.filter_coefs, np.ndarray): # FIR
this_data = _overlap_add_filter(
this_data, self.mne.filter_coefs, copy=False)
else: # IIR
this_data = _filtfilt(
this_data, self.mne.filter_coefs, None, 1, False)
data[_picks, _start:_stop] = this_data
# scale the data for display in a 1-vertical-axis-unit slot
this_names = self.mne.ch_names[picks]
this_types = self.mne.ch_types[picks]
stims = this_types == 'stim'
white = np.logical_and(np.in1d(this_names, self.mne.whitened_ch_names),
np.in1d(this_names, self.mne.info['bads'],
invert=True))
norms = np.vectorize(self.mne.scalings.__getitem__)(this_types)
norms[stims] = data[stims].max(axis=-1)
norms[white] = self.mne.scalings['whitened']
norms[norms == 0] = 1
data /= 2 * norms[:, np.newaxis]
self.mne.data = data
self.mne.times = times
def _update_trace_offsets(self):
"""Compute viewport height and adjust offsets."""
# simultaneous selection and butterfly modes
if self.mne.butterfly and self.mne.ch_selections is not None:
self._update_picks()
selections_dict = self._make_butterfly_selections_dict()
n_offsets = len(selections_dict)
sel_order = list(selections_dict)
offsets = np.array([])
for pick in self.mne.picks:
for sel in sel_order:
if pick in selections_dict[sel]:
offsets = np.append(offsets, sel_order.index(sel))
# butterfly only
elif self.mne.butterfly:
unique_ch_types = set(self.mne.ch_types)
n_offsets = len(unique_ch_types)
ch_type_order = [_type for _type in _DATA_CH_TYPES_ORDER_DEFAULT
if _type in unique_ch_types]
offsets = np.array([ch_type_order.index(ch_type)
for ch_type in self.mne.ch_types])
# normal mode
else:
n_offsets = self.mne.n_channels
offsets = np.arange(n_offsets, dtype=float)
# update ylim, ticks, vertline, and scrollbar patch
ylim = (n_offsets - 0.5, -0.5) # inverted y axis โ new chs at bottom
self.mne.ax_main.set_ylim(ylim)
self.mne.ax_main.set_yticks(np.unique(offsets))
self.mne.vsel_patch.set_height(self.mne.n_channels)
# store new offsets, update axis labels
self.mne.trace_offsets = offsets
self._update_yaxis_labels()
def _draw_traces(self):
"""Draw (or redraw) the channel data."""
from matplotlib.colors import to_rgba_array
from matplotlib.patches import Rectangle
# clear scalebars
if self.mne.scalebars_visible:
self._hide_scalebars()
# get info about currently visible channels
picks = self.mne.picks
ch_names = self.mne.ch_names[picks]
ch_types = self.mne.ch_types[picks]
bad_bool = np.in1d(ch_names, self.mne.info['bads'])
# colors
good_ch_colors = [self.mne.ch_color_dict[_type] for _type in ch_types]
ch_colors = to_rgba_array(
[self.mne.ch_color_bad if _bad else _color
for _bad, _color in zip(bad_bool, good_ch_colors)])
self.mne.ch_colors = np.array(good_ch_colors) # use for unmarking bads
labels = self.mne.ax_main.yaxis.get_ticklabels()
if self.mne.butterfly:
for label in labels:
label.set_color(self.mne.fgcolor)
else:
for label, color in zip(labels, ch_colors):
label.set_color(color)
# decim
decim = np.ones_like(picks)
data_picks_mask = np.in1d(picks, self.mne.picks_data)
decim[data_picks_mask] = self.mne.decim
# decim can vary by channel type, so compute different `times` vectors
decim_times = {decim_value:
self.mne.times[::decim_value] + self.mne.first_time
for decim_value in set(decim)}
# add more traces if needed
n_picks = len(picks)
if n_picks > len(self.mne.traces):
n_new_chs = n_picks - len(self.mne.traces)
new_traces = self.mne.ax_main.plot(np.full((1, n_new_chs), np.nan),
**self.mne.trace_kwargs)
self.mne.traces.extend(new_traces)
# remove extra traces if needed
extra_traces = self.mne.traces[n_picks:]
for trace in extra_traces:
trace.remove()
self.mne.traces = self.mne.traces[:n_picks]
# check for bad epochs
time_range = (self.mne.times + self.mne.first_time)[[0, -1]]
if self.mne.instance_type == 'epochs':
epoch_ix = np.searchsorted(self.mne.boundary_times, time_range)
epoch_ix = np.arange(epoch_ix[0], epoch_ix[1])
epoch_nums = self.mne.inst.selection[epoch_ix[0]:epoch_ix[-1] + 1]
visible_bad_epochs = epoch_nums[
np.in1d(epoch_nums, self.mne.bad_epochs).nonzero()]
while len(self.mne.epoch_traces):
self.mne.epoch_traces.pop(-1).remove()
# handle custom epoch colors (for autoreject integration)
if self.mne.epoch_colors is None:
# shape: n_traces ร RGBA โ n_traces ร n_epochs ร RGBA
custom_colors = np.tile(ch_colors[:, None, :],
(1, self.mne.n_epochs, 1))
else:
custom_colors = np.empty((len(self.mne.picks),
self.mne.n_epochs, 4))
for ii, _epoch_ix in enumerate(epoch_ix):
this_colors = self.mne.epoch_colors[_epoch_ix]
custom_colors[:, ii] = to_rgba_array([this_colors[_ch]
for _ch in picks])
# override custom color on bad epochs
for _bad in visible_bad_epochs:
_ix = epoch_nums.tolist().index(_bad)
_cols = np.array([self.mne.epoch_color_bad,
self.mne.ch_color_bad])[bad_bool.astype(int)]
custom_colors[:, _ix] = to_rgba_array(_cols)
# update traces
ylim = self.mne.ax_main.get_ylim()
for ii, line in enumerate(self.mne.traces):
this_name = ch_names[ii]
this_type = ch_types[ii]
this_offset = self.mne.trace_offsets[ii]
this_times = decim_times[decim[ii]]
this_data = this_offset - self.mne.data[ii] * self.mne.scale_factor
this_data = this_data[..., ::decim[ii]]
# clip
if self.mne.clipping == 'clamp':
this_data = np.clip(this_data, -0.5, 0.5)
elif self.mne.clipping is not None:
clip = self.mne.clipping * (0.2 if self.mne.butterfly else 1)
bottom = max(this_offset - clip, ylim[1])
height = min(2 * clip, ylim[0] - bottom)
rect = Rectangle(xy=np.array([time_range[0], bottom]),
width=time_range[1] - time_range[0],
height=height,
transform=self.mne.ax_main.transData)
line.set_clip_path(rect)
# prep z order
is_bad_ch = this_name in self.mne.info['bads']
this_z = self.mne.zorder['bads' if is_bad_ch else 'data']
if self.mne.butterfly and not is_bad_ch:
this_z = self.mne.zorder.get(this_type, this_z)
# plot each trace multiple times to get the desired epoch coloring.
# use masked arrays to plot discontinuous epochs that have the same
# color in a single plot() call.
if self.mne.instance_type == 'epochs':
this_colors = custom_colors[ii]
for cix, color in enumerate(np.unique(this_colors, axis=0)):
bool_ixs = (this_colors == color).all(axis=1)
mask = np.zeros_like(this_times, dtype=bool)
_starts = self.mne.boundary_times[epoch_ix][bool_ixs]
_stops = self.mne.boundary_times[epoch_ix + 1][bool_ixs]
for _start, _stop in zip(_starts, _stops):
_mask = np.logical_and(_start < this_times,
this_times <= _stop)
mask = mask | _mask
_times = np.ma.masked_array(this_times, mask=~mask)
# always use the existing traces first
if cix == 0:
line.set_xdata(_times)
line.set_ydata(this_data)
line.set_color(color)
line.set_zorder(this_z)
else: # make new traces as needed
_trace = self.mne.ax_main.plot(
_times, this_data, color=color, zorder=this_z,
**self.mne.trace_kwargs)
self.mne.epoch_traces.extend(_trace)
else:
line.set_xdata(this_times)
line.set_ydata(this_data)
line.set_color(ch_colors[ii])
line.set_zorder(this_z)
# update xlim
self.mne.ax_main.set_xlim(*time_range)
# draw scalebars maybe
if self.mne.scalebars_visible:
self._show_scalebars()
# redraw event lines
if self.mne.event_times is not None:
self._draw_event_lines()
def _redraw(self, update_data=True, annotations=False):
"""Redraw (convenience method for frequently grouped actions)."""
if update_data:
self._update_data()
if self.mne.vline_visible and self.mne.is_epochs:
# prevent flickering
_ = self._recompute_epochs_vlines(None)
self._draw_traces()
if annotations and not self.mne.is_epochs:
self._draw_annotations()
self.canvas.draw_idle()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# EVENT LINES AND MARKER LINES
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _draw_event_lines(self):
"""Draw the event lines and their labels."""
from matplotlib.colors import to_rgba_array
from matplotlib.collections import LineCollection
if self.mne.event_nums is not None:
mask = np.logical_and(self.mne.event_times >= self.mne.times[0],
self.mne.event_times <= self.mne.times[-1])
this_event_times = self.mne.event_times[mask]
this_event_nums = self.mne.event_nums[mask]
n_visible_events = len(this_event_times)
colors = to_rgba_array([self.mne.event_color_dict[n]
for n in this_event_nums])
# create event lines
ylim = self.mne.ax_main.get_ylim()
xs = np.repeat(this_event_times, 2)
ys = np.tile(ylim, n_visible_events)
segs = np.vstack([xs, ys]).T.reshape(n_visible_events, 2, 2)
event_lines = LineCollection(segs, linewidths=0.5, colors=colors,
zorder=self.mne.zorder['events'])
self.mne.ax_main.add_collection(event_lines)
self.mne.event_lines = event_lines
# create event labels
while len(self.mne.event_texts):
self.mne.event_texts.pop().remove()
for _t, _n, _c in zip(this_event_times, this_event_nums, colors):
label = self.mne.event_id_rev.get(_n, _n)
this_text = self.mne.ax_main.annotate(
label, (_t, ylim[1]), ha='center', va='baseline',
color=self.mne.fgcolor, xytext=(0, 2),
textcoords='offset points', fontsize=8)
self.mne.event_texts.append(this_text)
def _recompute_epochs_vlines(self, xdata):
"""Recompute vline x-coords for epochs plots (after scrolling, etc)."""
# special case: changed view duration w/ "home" or "end" key
# (no click event, hence no xdata)
if xdata is None:
xdata = np.array(self.mne.vline.get_segments())[0, 0, 0]
# compute the (continuous) times for the lines on each epoch
epoch_dur = np.diff(self.mne.boundary_times[:2])[0]
rel_time = xdata % epoch_dur
abs_time = self.mne.times[0]
xs = np.arange(self.mne.n_epochs) * epoch_dur + abs_time + rel_time
segs = np.array(self.mne.vline.get_segments())
# recreate segs from scratch in case view duration changed
# (i.e., handle case when n_segments != n_epochs)
segs = np.tile([[0.], [1.]], (len(xs), 1, 2)) # y values
segs[..., 0] = np.tile(xs[:, None], 2) # x values
self.mne.vline.set_segments(segs)
return rel_time
def _show_vline(self, xdata):
"""Show the vertical line(s)."""
if self.mne.is_epochs:
# convert xdata to be epoch-relative (for the text)
rel_time = self._recompute_epochs_vlines(xdata)
xdata = rel_time + self.mne.inst.times[0]
else:
self.mne.vline.set_xdata(xdata)
self.mne.vline_hscroll.set_xdata(xdata)
self.mne.vline_text.set_text(f'{xdata:0.2f} s ')
self._toggle_vline(True)
def _toggle_vline(self, visible):
"""Show or hide the vertical line(s)."""
for artist in (self.mne.vline, self.mne.vline_hscroll,
self.mne.vline_text):
if artist is not None:
artist.set_visible(visible)
self.draw_artist(artist)
self.mne.vline_visible = visible
self.canvas.draw_idle()
class MNELineFigure(MNEFigure):
"""Interactive figure for non-scrolling line plots."""
def __init__(self, inst, n_axes, figsize, **kwargs):
super().__init__(figsize=figsize, inst=inst, **kwargs)
# AXES: default margins (inches)
l_margin = 0.8
r_margin = 0.2
b_margin = 0.65
t_margin = 0.35
# AXES: default margins (figure-relative coordinates)
left = self._inch_to_rel(l_margin)
right = 1 - self._inch_to_rel(r_margin)
bottom = self._inch_to_rel(b_margin, horiz=False)
top = 1 - self._inch_to_rel(t_margin, horiz=False)
# AXES: make subplots
axes = [self.add_subplot(n_axes, 1, 1)]
for ix in range(1, n_axes):
axes.append(self.add_subplot(n_axes, 1, ix + 1, sharex=axes[0]))
self.subplotpars.update(left=left, bottom=bottom, top=top, right=right,
hspace=0.4)
# save useful things
self.mne.ax_list = axes
def _resize(self, event):
"""Handle resize event."""
old_width, old_height = self.mne.fig_size_px
new_width, new_height = self._get_size_px()
new_margins = _calc_new_margins(
self, old_width, old_height, new_width, new_height)
self.subplots_adjust(**new_margins)
self.mne.fig_size_px = (new_width, new_height)
def _figure(toolbar=True, FigureClass=MNEFigure, **kwargs):
"""Instantiate a new figure."""
from matplotlib import rc_context
from matplotlib.pyplot import figure
title = kwargs.pop('window_title', None) # extract title before init
rc = dict() if toolbar else dict(toolbar='none')
with rc_context(rc=rc):
fig = figure(FigureClass=FigureClass, **kwargs)
if title is not None:
_set_window_title(fig, title)
# add event callbacks
fig._add_default_callbacks()
return fig
def _browse_figure(inst, **kwargs):
"""Instantiate a new MNE browse-style figure."""
from .utils import _get_figsize_from_config
figsize = kwargs.pop('figsize', _get_figsize_from_config())
fig = _figure(inst=inst, toolbar=False, FigureClass=MNEBrowseFigure,
figsize=figsize, **kwargs)
# initialize zen mode (can't do in __init__ due to get_position() calls)
fig.canvas.draw()
fig._update_zen_mode_offsets()
fig._resize(None) # needed for MPL >=3.4
# if scrollbars are supposed to start hidden, set to True and then toggle
if not fig.mne.scrollbars_visible:
fig.mne.scrollbars_visible = True
fig._toggle_scrollbars()
return fig
def _line_figure(inst, axes=None, picks=None, **kwargs):
"""Instantiate a new line figure."""
from matplotlib.axes import Axes
# if picks is None, only show data channels
allowed_ch_types = (_DATA_CH_TYPES_SPLIT if picks is None else
_VALID_CHANNEL_TYPES)
# figure out expected number of axes
ch_types = np.array(inst.get_channel_types())
if picks is not None:
ch_types = ch_types[picks]
n_axes = len(np.intersect1d(ch_types, allowed_ch_types))
# handle user-provided axes
if axes is not None:
if isinstance(axes, Axes):
axes = [axes]
_validate_if_list_of_axes(axes, n_axes)
fig = axes[0].get_figure()
else:
figsize = kwargs.pop('figsize', (10, 2.5 * n_axes + 1))
fig = _figure(inst=inst, toolbar=True, FigureClass=MNELineFigure,
figsize=figsize, n_axes=n_axes, **kwargs)
fig.mne.fig_size_px = fig._get_size_px() # can't do in __init__
axes = fig.mne.ax_list
return fig, axes
def _psd_figure(inst, proj, picks, axes, area_mode, tmin, tmax, fmin, fmax,
n_jobs, color, area_alpha, dB, estimate, average,
spatial_colors, xscale, line_alpha, sphere, window, **kwargs):
"""Instantiate a new power spectral density figure."""
from .. import BaseEpochs
from ..io import BaseRaw
# triage kwargs for different PSD methods (rawโwelch, epochsโmultitaper)
welch_kwargs = ('n_fft', 'n_overlap', 'reject_by_annotation')
multitaper_kwargs = ('bandwidth', 'adaptive', 'low_bias', 'normalization')
psd_kwargs = dict()
for kw in welch_kwargs + multitaper_kwargs:
if kw in kwargs:
psd_kwargs[kw] = kwargs.pop(kw)
if isinstance(inst, BaseRaw):
psd_func = partial(psd_welch, window=window)
elif isinstance(inst, BaseEpochs):
psd_func = psd_multitaper
else:
raise TypeError('Expected an instance of Raw or Epochs, got '
f'{type(inst)}.')
# arg checking
if np.isfinite(fmax) and (fmax > inst.info['sfreq'] / 2):
raise ValueError(
f'Requested fmax ({fmax} Hz) must not exceed ยฝ the sampling '
f'frequency of the data ({0.5 * inst.info["sfreq"]}).')
_check_option('area_mode', area_mode, [None, 'std', 'range'])
_check_option('xscale', xscale, ('log', 'linear'))
sphere = _check_sphere(sphere, inst.info)
picks = _picks_to_idx(inst.info, picks)
titles = _handle_default('titles', None)
units = _handle_default('units', None)
scalings = _handle_default('scalings', None)
# containers
picks_list = list()
units_list = list()
titles_list = list()
scalings_list = list()
psd_list = list()
# initialize figure
fig, axes = _line_figure(inst, axes, picks, **kwargs)
# split picks, units, etc, for each subplot
(picks_list, units_list, scalings_list, titles_list
) = _split_picks_by_type(inst, picks, units, scalings, titles)
del picks
# don't add ylabels & titles if figure has unexpected number of axes
make_label = len(axes) == len(fig.axes)
# Plot Frequency [Hz] xlabel only on the last axis
xlabels_list = [False] * (len(axes) - 1) + [True]
# compute PSDs
for picks in picks_list:
psd, freqs = psd_func(inst, tmin=tmin, tmax=tmax, picks=picks,
fmin=fmin, fmax=fmax, proj=proj, n_jobs=n_jobs,
**psd_kwargs)
if isinstance(inst, BaseEpochs):
psd = np.mean(psd, axis=0)
psd_list.append(psd)
# plot
_plot_psd(inst, fig, freqs, psd_list, picks_list, titles_list, units_list,
scalings_list, axes, make_label, color, area_mode, area_alpha,
dB, estimate, average, spatial_colors, xscale, line_alpha,
sphere, xlabels_list)
return fig
def _split_picks_by_type(inst, picks, units, scalings, titles):
"""Separate picks, units, etc, for plotting on separate subplots."""
picks_list = list()
units_list = list()
scalings_list = list()
titles_list = list()
# if picks is None, only show data channels
allowed_ch_types = (_DATA_CH_TYPES_SPLIT if picks is None else
_VALID_CHANNEL_TYPES)
for ch_type in allowed_ch_types:
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
if ch_type in ('mag', 'grad'):
pick_kwargs['meg'] = ch_type
elif ch_type in _FNIRS_CH_TYPES_SPLIT:
pick_kwargs['fnirs'] = ch_type
else:
pick_kwargs[ch_type] = True
these_picks = pick_types(inst.info, **pick_kwargs)
these_picks = np.intersect1d(these_picks, picks)
if len(these_picks) > 0:
picks_list.append(these_picks)
units_list.append(units[ch_type])
scalings_list.append(scalings[ch_type])
titles_list.append(titles[ch_type])
if len(picks_list) == 0:
raise RuntimeError('No data channels found')
return picks_list, units_list, scalings_list, titles_list
def _calc_new_margins(fig, old_width, old_height, new_width, new_height):
"""Compute new figure-relative values to maintain fixed-size margins."""
new_margins = dict()
for side in ('left', 'right', 'bottom', 'top'):
ratio = ((old_width / new_width) if side in ('left', 'right') else
(old_height / new_height))
rel_dim = getattr(fig.subplotpars, side)
if side in ('right', 'top'):
new_margins[side] = 1 - ratio * (1 - rel_dim)
else:
new_margins[side] = ratio * rel_dim
# gh-8304: don't allow resizing too small
if (new_margins['bottom'] < new_margins['top'] and
new_margins['left'] < new_margins['right']):
return(new_margins)
@contextmanager
def _patched_canvas(fig):
old_canvas = fig.canvas
if fig.canvas is None: # XXX old MPL (at least 3.0.3) does this for Agg
fig.canvas = Bunch(mpl_connect=lambda event, callback: None)
try:
yield
finally:
fig.canvas = old_canvas
| bsd-3-clause |
soazig/project-epsilon-1 | code/utils/scripts/correlation_script.py | 4 | 2353 | # - compatibility with Python 3
from __future__ import print_function # print('me') instead of print 'me'
from __future__ import division # 1/2 == 0.5, not 0
from __future__ import absolute_import
import sys
sys.path.append(".././utils")
# - import common modules
import numpy as np
import matplotlib.pyplot as plt
from find_activated_voxel_functions import *
from convolution_normal_script import X_matrix
import nibabel as nib
from scipy.ndimage import gaussian_filter
from matplotlib import colors
import matplotlib
# import events2neural from stimuli module
from stimuli import events2neural
import nibabel as nib
#import load data modules
from load_BOLD import *
location_of_data="../../data/ds005/"
location_of_plot = "../../plots/"
# Load the sub001 run001 image
data = load_img(1,1)
data = gaussian_filter(data, [2, 2, 2, 0])
# Get the number of volumes
n_trs = data.shape[-1]
#nice map
nice_cmap_values = np.loadtxt('actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
# Identify the TR (time between scans)
TR = 2
# Call the events2neural function to generate the on-off values for each volume
task_name = location_of_data +"sub001/model/model001/onsets/task001_run001/cond002.txt"
task = np.loadtxt(task_name)
time_course = events2neural(task, TR, n_trs)
# Make a single brain volume-size array of all zero to hold the correlations
correlations = np.zeros(data.shape[:-1])
# Loop over all voxel indices on the first, then second, then third dimension
# Extract the voxel time courses at each voxel coordinate in the image
# Get the correlation between the voxel time course and neural prediction
# Fill in the value in the correlations array
for i in range(data.shape[0]):
for j in range(data.shape[1]):
for k in range(data.shape[2]):
vox_values = data[i, j, k]
correlations[i, j, k] = np.corrcoef(time_course, vox_values)[1, 0]
#set up the label font size
matplotlib.rc('xtick', labelsize=5)
matplotlib.rc('ytick', labelsize=5)
# Plot the correlations array
for i in range(34):
plt.subplot(5,7,i+1)
plt.imshow(correlations[:,:,i],cmap = nice_cmap, alpha=0.5)
plt.title("Slice"+str(i+1), fontsize=5)
plt.tight_layout()
plt.suptitle("Subject 1 Run 1 Correlation in Condition 2 for Different Slices\n")
plt.colorbar()
plt.savefig(location_of_plot+"correlation_s1r1c2")
| bsd-3-clause |
wangyum/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 9 | 67662 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
# Expected is 100, but because of the global step increment bug, this is 51.
self.assertEqual(51, step_counter.steps)
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=100)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=100)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/learn/__init__.py | 17 | 2736 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning (DEPRECATED).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
See the @{$python/contrib.learn} guide.
@@BaseEstimator
@@Estimator
@@Trainable
@@Evaluable
@@KMeansClustering
@@ModeKeys
@@ModelFnOps
@@MetricSpec
@@PredictionKey
@@DNNClassifier
@@DNNEstimator
@@DNNRegressor
@@DNNLinearCombinedRegressor
@@DNNLinearCombinedEstimator
@@DNNLinearCombinedClassifier
@@DynamicRnnEstimator
@@LinearClassifier
@@LinearEstimator
@@LinearRegressor
@@LogisticRegressor
@@StateSavingRnnEstimator
@@SVM
@@SKCompat
@@Head
@@multi_class_head
@@multi_label_head
@@binary_svm_head
@@regression_head
@@poisson_regression_head
@@multi_head
@@no_op_train_fn
@@Experiment
@@ExportStrategy
@@TaskType
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@infer_real_valued_columns_from_input
@@infer_real_valued_columns_from_input_fn
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
@@read_keyed_batch_examples
@@read_keyed_batch_examples_shared_queue
@@read_keyed_batch_features
@@read_keyed_batch_features_shared_queue
@@InputFnOps
@@ProblemType
@@build_parsing_serving_input_fn
@@make_export_strategy
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.contrib.learn.python.learn import learn_runner_lib as learn_runner
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['datasets', 'head', 'io', 'learn_runner', 'models',
'monitors', 'NotFittedError', 'ops', 'preprocessing',
'utils', 'graph_actions']
remove_undocumented(__name__, _allowed_symbols)
| mit |
rs2/pandas | pandas/tests/indexes/multi/test_setops.py | 1 | 11439 | import numpy as np
import pytest
import pandas as pd
from pandas import MultiIndex, Series
import pandas._testing as tm
@pytest.mark.parametrize("case", [0.5, "xxx"])
@pytest.mark.parametrize(
"method", ["intersection", "union", "difference", "symmetric_difference"]
)
def test_set_ops_error_cases(idx, case, sort, method):
# non-iterable input
msg = "Input must be Index or array-like"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)(case, sort=sort)
@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list])
def test_intersection_base(idx, sort, klass):
first = idx[2::-1] # first 3 elements reversed
second = idx[:5]
if klass is not MultiIndex:
second = klass(second.values)
intersect = first.intersection(second, sort=sort)
if sort is None:
expected = first.sort_values()
else:
expected = first
tm.assert_index_equal(intersect, expected)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.intersection([1, 2, 3], sort=sort)
@pytest.mark.arm_slow
@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list])
def test_union_base(idx, sort, klass):
first = idx[::-1]
second = idx[:5]
if klass is not MultiIndex:
second = klass(second.values)
union = first.union(second, sort=sort)
if sort is None:
expected = first.sort_values()
else:
expected = first
tm.assert_index_equal(union, expected)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.union([1, 2, 3], sort=sort)
def test_difference_base(idx, sort):
second = idx[4:]
answer = idx[:4]
result = idx.difference(second, sort=sort)
if sort is None:
answer = answer.sort_values()
assert result.equals(answer)
tm.assert_index_equal(result, answer)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = idx.difference(case, sort=sort)
tm.assert_index_equal(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
idx.difference([1, 2, 3], sort=sort)
def test_symmetric_difference(idx, sort):
first = idx[1:]
second = idx[:-1]
answer = idx[[-1, 0]]
result = first.symmetric_difference(second, sort=sort)
if sort is None:
answer = answer.sort_values()
tm.assert_index_equal(result, answer)
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.symmetric_difference(case, sort=sort)
tm.assert_index_equal(result, answer)
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.symmetric_difference([1, 2, 3], sort=sort)
def test_multiindex_symmetric_difference():
# GH 13490
idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=["a", "b"])
result = idx ^ idx
assert result.names == idx.names
idx2 = idx.copy().rename(["A", "B"])
result = idx ^ idx2
assert result.names == [None, None]
def test_empty(idx):
# GH 15270
assert not idx.empty
assert idx[:0].empty
def test_difference(idx, sort):
first = idx
result = first.difference(idx[-3:], sort=sort)
vals = idx[:-3].values
if sort is None:
vals = sorted(vals)
expected = MultiIndex.from_tuples(vals, sortorder=0, names=idx.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == idx.names
tm.assert_index_equal(result, expected)
# empty difference: reflexive
result = idx.difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: superset
result = idx[-3:].difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# empty difference: degenerate
result = idx[:0].difference(idx, sort=sort)
expected = idx[:0]
assert result.equals(expected)
assert result.names == idx.names
# names not the same
chunklet = idx[-3:]
chunklet.names = ["foo", "baz"]
result = first.difference(chunklet, sort=sort)
assert result.names == (None, None)
# empty, but non-equal
result = idx.difference(idx.sortlevel(1)[0], sort=sort)
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values, sort=sort)
assert result.equals(first[:0])
# name from empty array
result = first.difference([], sort=sort)
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([("foo", "one")], sort=sort)
expected = pd.MultiIndex.from_tuples(
[("bar", "one"), ("baz", "two"), ("foo", "two"), ("qux", "one"), ("qux", "two")]
)
expected.names = first.names
assert first.names == result.names
msg = "other must be a MultiIndex or a list of tuples"
with pytest.raises(TypeError, match=msg):
first.difference([1, 2, 3, 4, 5], sort=sort)
def test_difference_sort_special():
# GH-24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
# sort=None, the default
result = idx.difference([])
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_difference_sort_special_true():
# TODO decide on True behaviour
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
result = idx.difference([], sort=True)
expected = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(result, expected)
def test_difference_sort_incomparable():
# GH-24959
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]])
other = pd.MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]])
# sort=None, the default
# MultiIndex.difference deviates here from other difference
# implementations in not catching the TypeError
msg = "'<' not supported between instances of 'Timestamp' and 'int'"
with pytest.raises(TypeError, match=msg):
result = idx.difference(other)
# sort=False
result = idx.difference(other, sort=False)
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_difference_sort_incomparable_true():
# TODO decide on True behaviour
# # sort=True, raises
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]])
other = pd.MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]])
with pytest.raises(TypeError):
idx.difference(other, sort=True)
def test_union(idx, sort):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_union = piece1.union(piece2, sort=sort)
if sort is None:
tm.assert_index_equal(the_union, idx.sort_values())
assert tm.equalContents(the_union, idx)
# corner case, pass self or empty thing:
the_union = idx.union(idx, sort=sort)
assert the_union is idx
the_union = idx.union(idx[:0], sort=sort)
assert the_union is idx
# FIXME: dont leave commented-out
# won't work in python 3
# tuples = _index.values
# result = _index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(idx)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = _index.union(other)
# assert result.equals(result2)
def test_intersection(idx, sort):
piece1 = idx[:5][::-1]
piece2 = idx[3:]
the_int = piece1.intersection(piece2, sort=sort)
if sort is None:
tm.assert_index_equal(the_int, idx[3:5])
assert tm.equalContents(the_int, idx[3:5])
# corner case, pass self
the_int = idx.intersection(idx, sort=sort)
assert the_int is idx
# empty intersection: disjoint
empty = idx[:2].intersection(idx[2:], sort=sort)
expected = idx[:0]
assert empty.equals(expected)
# FIXME: dont leave commented-out
# can't do in python 3
# tuples = _index.values
# result = _index & tuples
# assert result.equals(tuples)
def test_intersect_equal_sort():
# GH-24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
tm.assert_index_equal(idx.intersection(idx, sort=None), idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_intersect_equal_sort_true():
# TODO decide on True behaviour
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
sorted_ = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
@pytest.mark.parametrize("slice_", [slice(None), slice(0)])
def test_union_sort_other_empty(slice_):
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
# default, sort=None
other = idx[slice_]
tm.assert_index_equal(idx.union(other), idx)
# MultiIndex does not special case empty.union(idx)
# tm.assert_index_equal(other.union(idx), idx)
# sort=False
tm.assert_index_equal(idx.union(other, sort=False), idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_union_sort_other_empty_sort(slice_):
# TODO decide on True behaviour
# # sort=True
idx = pd.MultiIndex.from_product([[1, 0], ["a", "b"]])
other = idx[:0]
result = idx.union(other, sort=True)
expected = pd.MultiIndex.from_product([[0, 1], ["a", "b"]])
tm.assert_index_equal(result, expected)
def test_union_sort_other_incomparable():
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]])
# default, sort=None
with tm.assert_produces_warning(RuntimeWarning):
result = idx.union(idx[:1])
tm.assert_index_equal(result, idx)
# sort=False
result = idx.union(idx[:1], sort=False)
tm.assert_index_equal(result, idx)
@pytest.mark.xfail(reason="Not implemented.")
def test_union_sort_other_incomparable_sort():
# TODO decide on True behaviour
# # sort=True
idx = pd.MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]])
with pytest.raises(TypeError, match="Cannot compare"):
idx.union(idx[:1], sort=True)
def test_union_non_object_dtype_raises():
# GH#32646 raise NotImplementedError instead of less-informative error
mi = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
idx = mi.levels[1]
msg = "Can only union MultiIndex with MultiIndex or Index of tuples"
with pytest.raises(NotImplementedError, match=msg):
mi.union(idx)
@pytest.mark.parametrize(
"method", ["union", "intersection", "difference", "symmetric_difference"]
)
def test_setops_disallow_true(method):
idx1 = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
idx2 = pd.MultiIndex.from_product([["b", "c"], [1, 2]])
with pytest.raises(ValueError, match="The 'sort' keyword only takes"):
getattr(idx1, method)(idx2, sort=True)
| bsd-3-clause |
vlas-sokolov/pyspeckit | pyspeckit/spectrum/Spectrum1D.py | 7 | 9758 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Spectrum1D class. It is eventually supposed to migrate to astropy core
# Python packages
from abc import ABCMeta, abstractmethod, abstractproperty
from copy import copy, deepcopy
# External packages
import numpy as np
# Local packages
from astropy.nddata import NDData
class SpectrumErrorArray(object):
""" Represents the error axis of a Spectrum object
.. warning::
This is skeleton code!
Spectral data errors can be represented in a variety of ways, e.g. 1-sigma
errors, inverse variance, variance, etc. In order to provide generic functionality,
the error data must carry with it some metadata and operations that help
handle transformations of the error array.
Parameters
----------
array : `~numpy.ndarray`
The error values
Notes
-----
This class is abstract and can only be subclassed and used by
classes that will access its API.
"""
# This is an abstract class and can only be subclassed.
__metaclass__ = ABCMeta
def __init__(self, values):
self.values = values
@abstractmethod
def interpolate(self, new_dispersion):
pass
class InverseVarianceErrorArray(SpectrumErrorArray):
# ...
def interpolate(self, new_dispersion):
pass
class VarianceErrorArray(SpectrumErrorArray):
# ...
def interpolate(self, new_dispersion):
pass
class Spectrum1DBase(NDData):
""" Base class for Spectrum1D objects.
.. warning::
This is skeleton code!
`Spectrum1DBase` is a stripped-down superclass of the Spectrum1D object
that allows for creation of a Spectrum-like object **without** an internal
dispersion array. This allows for the possibility of creating a
SpectrumCollection type class that could contain many spectra that share
the same dispersion axis (e.g. SDSS spectra from the same plate).
Parameters
----------
flux : `~numpy.ndarray`
The flux data as an array
Notes
-----
This class should be 'private' in that it should really only be subclassed
and used by Spectrum1D and SpectrumCollection
"""
def __init__(self):
pass # not yet implemented
@property
def spectrum1DBase(self):
""" This method returns an object equivalent to this spectrum but as
a Spectrum1DBase object, i.e. without a dispersion array.
It is left to subclasses to handle this in more detail (e.g., see Spectrum1D).
"""
return self
class Spectrum1D(Spectrum1DBase):
""" Class for 1-dimensional Spectrum objects.
.. warning::
This is skeleton code!
`Spectrum1D` provides a container for 1-dimensional spectral data as well
as generic operations for this data.
Parameters
----------
flux : `~numpy.ndarray`
The flux data as an array
dispersion : `~numpy.ndarray`
An array of data representing the dispersion axis of the spectral data,
e.g. wavelength, frequency, energy, velocity, etc.
<NDData parameters...>
Notes
-----
"""
# This is an abstract class and can only be subclassed.
__metaclass__ = ABCMeta
@property
def spectrum1DBase(self):
""" Return a new Spectrum1DBase object from this spectrum.
This basically is the same thing but without the dispersion array,
but we'll see how this develops. """
# There will be a better way to do this, but this is the general idea.
# Basically I want to create a Spectrum1DBase object from a Spectrum1D object.
spectrum_copy = deepcopy(self)
spectrum_copy.dispersion = None
return spectrum_copy
def slice_dispersion(self, start=None, end=None):
""" Slices the data arrays based on values in the dispersion array
Parameters
----------
start : any numeric type, optional
The minimum value in the dispersion axis to slice on
end : any numeric type, optional
The maximum value in the dispersion axis to slice on
"""
pass
def slice_pixel(self, start=None, end=None):
""" Slices the data arrays on pixel index (e.g. array index)
Parameters
----------
start : int, optional
The starting index to slice the arrays on
end : int, optional
The ending index to slice the arrays on
Notes
-----
This is equivalent to slicing each array like array[start:end]
"""
pass
def interpolate_linear(self, new_dispersion):
""" Uses linear interpolation to resample the internal arrays onto
the specified dispersion axis.
Parameters
----------
new_dispersion : `~numpy.ndarray`
The new dispersion array to interpolate on to
"""
pass
def interpolate_bspline(self, new_dispersion):
""" Uses B-spline interpolation to resample the internal arrays onto
the specified dispersion axis.
Parameters
----------
new_dispersion : `~numpy.ndarray`
The new dispersion array to interpolate on to
"""
pass
def smooth_boxcar(self, width):
""" Uses boxcar smoothing to smooth the internal arrays.
Parameters
----------
width : integer
The boxcar width in pixels.
Reference: <a href="http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.boxcar.html" target="_blank">scipy.signal.boxcar</a>
"""
# I don't know if this is really the proper way to do this... should be tested!!
from scipy.signal import convolve, boxcar
self.flux = convolve(self.flux, boxcar(M=width))
def smooth_custom(self):
""" Allows for a user defined smoothing operation.
This is simply a template function meant to be overwritten
by a custom function e.g.:
def so_smooth(self):
# Do something with flux, dispersion, error
spectrumObject.custom_smooth = so_smooth
spectrumObject.custom_smooth()
"""
pass
def plot(self, **kwargs):
""" Plotting utility for the spectrum.
Parameters
----------
axes : `~matplotlib.pyplot.Axes`, optional
A matplotlib Axes() object to plot the spectrum on
filename : str, optional
A filename to save the plot to
show_error : bool, optional
Decide whether to use the error data when plotting
Usage
-----
s = Spectrum1D(...)
s.plot() - Attempt to display the object on screen
using matplotlib's `show()` command.
s.plot(axes=ax) - Plot the data onto the specified
Axes() object.
s.plot(axes=ax) - Plot the data onto the specified
Axes() object.
s.plot(filename="myPlot.png") - Plot the data onto a new figure and
save to the specified filename and path
(default path if not specified).
The format of the file will be deduced from the
extension (e.g. ps, png, pdf).
Notes
-----
Where it makes sense (i.e. not conflicting), multiple parameters
can be specified. For example,
s.plot(filename="myPlot.pdf", axes=ax)
will both write a file to the disk and return a matplotlib.Axes() object.
"""
pass
class Spectrum1DCollection(object):
""" A collection object for spectra that share the same dispersion information.
"""
def __init__(self):
self.spectra = list()
self.dispersion = None
def append(self, spectrum):
""" Add a spectrum (of type Spectrum1D or Spectrum1DBase) to this collection.
"""
self.spectra.append(new_spectrum.spectrum1DBase)
def len(self):
return len(self.spectra)
@property
def dispersion(self):
return self.dispersion
@dispersion.setter
def dispersion(self, new_dispersion):
""" Set the dispersion array to be used for all spectra in this collection.
The dispersion argument accepts both a numpy array and a Spectrum1D object.
When the latter is specified, the dispersion is extracted from the object.
"""
if is_instance(new_dispersion, Spectrum1D):
self.dispersion = new_dispersion.dispersion
elif is_instance(new_dispersion, list):
self.dispersion = np.array(new_dispersion)
elif is_instance(new_dispersion, np.array):
self.dispersion = new_dispersion
else:
raise ValueError("The dispersion specified could is not a known type. Should be a list, a numpy array, or a Spectrum1D object.")
# Add other types that unambiguously define a dispersion array.
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/tests/test_dviread.py | 15 | 1788 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_equal
import matplotlib.dviread as dr
import os.path
original_find_tex_file = dr.find_tex_file
def setup():
dr.find_tex_file = lambda x: x
def teardown():
dr.find_tex_file = original_find_tex_file
def test_PsfontsMap():
filename = os.path.join(
os.path.dirname(__file__),
'baseline_images', 'dviread', 'test.map')
fontmap = dr.PsfontsMap(filename)
# Check all properties of a few fonts
for n in [1, 2, 3, 4, 5]:
key = 'TeXfont%d' % n
entry = fontmap[key]
assert_equal(entry.texname, key)
assert_equal(entry.psname, 'PSfont%d' % n)
if n not in [3, 5]:
assert_equal(entry.encoding, 'font%d.enc' % n)
elif n == 3:
assert_equal(entry.encoding, 'enc3.foo')
# We don't care about the encoding of TeXfont5, which specifies
# multiple encodings.
if n not in [1, 5]:
assert_equal(entry.filename, 'font%d.pfa' % n)
else:
assert_equal(entry.filename, 'font%d.pfb' % n)
if n == 4:
assert_equal(entry.effects, {'slant': -0.1, 'extend': 2.2})
else:
assert_equal(entry.effects, {})
# Some special cases
entry = fontmap['TeXfont6']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont7']
assert_equal(entry.filename, None)
assert_equal(entry.encoding, 'font7.enc')
entry = fontmap['TeXfont8']
assert_equal(entry.filename, 'font8.pfb')
assert_equal(entry.encoding, None)
entry = fontmap['TeXfont9']
assert_equal(entry.filename, '/absolute/font9.pfb')
| mit |
kinverarity1/sa-groundwater-wells | waterconnect_gw/wells.py | 2 | 26493 | import collections
import csv
from datetime import datetime, date, time, timedelta
import re
def parse_well_identifiers(well_ids):
'''Parse a string and return list of possible well IDs.
Args:
well_ids (str): a comma-separated list of possible well IDs.
Returns: a list, each item is a dictionary containing a key and the ID, e.g.
>>> parse_well_identifiers('6528-393')
[{'unit_no': 652800393}]
Note that unit numbers are converted to the "long" format.
>>> parse_well_identifiers('14856')
[{'permit_no': 14856}, {'dh_no': 14856}]
Note that 14856 returns two results because it matches the
regular expressions for both permit numbers and drillhole numbers. It
could refer to two different wells - without knowing more information
we don't know any better.
>>> parse_well_identifiers('YAT110, 652800393,SLE69')
[{'obswell_no': 'YAT110'}, {'unit_no': '6528-393'}, {'obswell_no': 'SLE069'}]
Note that SLE69 is converted to SLE069 - always three digits for the sequence
number.
'''
results = []
well_id_patterns = {
'unit_no': [ # Unit numbers
r'(\d{4})(\d{5})', # 652800393 (map no. is 6528, seq. no. is 393)
r'(\d{4})-(\d+)', # 6528-393 (map no. is 6528, seq. no is 393)
],
'permit_no': [ # Permit numbers
r'(\d{4,6})', # 14856
r'[Pp][Nn]\.??[ -]?(\d{4,6})' # PN14856, pn14856, Pn-14856, p. 14856
],
'dh_no': [ # Drillhole numbers
r'(\d{4,6})', # 28083
],
'obswell_no': [ # Obswell numbers.
r'([a-zA-Z]{3})[ -]?(\d{1,3})'
]
}
for each in well_ids.split(','):
each = each.strip()
for ptype, patterns in well_id_patterns.items():
for pattern in patterns:
m = re.match(pattern, each)
if m:
if len(m.group(0)) < len(each):
break
if ptype == 'unit_no':
map_no, seq_no = m.group(1, 2)
seq_no = int(seq_no)
results.append({ptype: '%s-%s' % (map_no, seq_no)})
break
elif ptype == 'obswell_no':
results.append({ptype: '%s%03.0f' % (m.group(1), float(m.group(2)))})
break
else:
results.append({ptype: m.group(1)})
break
return results
def ldicts2table(ldicts, keys=None):
'''Convert a list of dicts to a "table".
Args:
ldicts (list): a list, each item is a dict, with the values being
single objects. Each dict should have the same keys.
keys (list): optional, these keys will be used as the keys for the
output table. If keys is None, then the keys in the first dict
in ldicts will be used.
Returns: table (a dict with 1D lists as values)
'''
if len(ldicts) == 0:
return {}
if keys is None and len(ldicts) > 0:
keys = ldicts[0].keys()
table = collections.OrderedDict()
for key in keys:
table[key] = []
for row in ldicts:
for key in keys:
table[key].append(row[key])
return table
def table2ldicts(table, keys=None):
'''Convert a table to a list of dicts -- see below.
Args:
table (dict): the keys are the column headings. Each value is
a one-dimensional list. All value lists are the same length.
keys (list): optional, keys to be used for each dict in ldicts;
if keys is None, the keys of table will be used.
Returns: ldicts (see docstring for ldicts2table(...) for explanation.
'''
if keys is None:
keys = list(table.keys())
ldicts = []
values = list(table.values())
if len(values) > 0: # no of records
n = len(values[0]) # no of keys (from the first record)
for i in range(n):
row = collections.OrderedDict()
for key in keys:
row[key] = table[key][i]
ldicts.append(row)
return ldicts
def to_csv(fileobj, ldicts=None, table=None):
'''Write either a list of dicts or a table to a CSV file.
Args:
fileobj (file-like object): file to write CSV data to.
ldicts (list of dicts): optional, list of dicts
table (dict): optional, dict of lists
'''
assert (not ldicts is None) and (not table is None)
if ldicts is None:
ldicts = table2ldicts(table)
if len(ldicts):
writer = csv.DictWriter(fileobj, ldicts[0].keys(), lineterminator='\n')
writer.writeheader()
for row in ldicts:
writer.writerow(row)
def to_df(table, index='UnitNumber'):
'''Convert a table (dict of lists) to a pandas DataFrame.
Args:
table (dict): dict of lists
index (str): key to the value in table which should be used
as the index column for the DataFrame.
Returns: pandas.DataFrame.
'''
import pandas as pd
df = pd.DataFrame(table)
if index in table.keys():
df = df.set_index(index)
return df
def summary_table(wells):
'''Get a table of summary information from a list of well JSON data.
Args:
wells (list of dicts): e.g. a list of data from JSON files.
'''
table = collections.OrderedDict()
keys = [
'UnitNumber',
'DRILLHOLE_NO',
'DH_NAME',
'DH_OTHER_NAME',
'OBSWELL_NO',
'AMG_EASTING',
'AMG_NORTHING',
'AMG_ZONE',
'LON',
'LAT',
'DrillDate',
'Depth',
'ORIG_DRILLED_DEPTH',
'LATEST_OPEN_DEPTH',
'LATEST_OPEN_DEPTH_DATE',
'CasedTo',
'LATEST_MIN_DIAM',
'LATEST_REF_ELEVATION',
'DrillClass',
'DrillClassTT',
'DrillStatus',
'DrillStatusTT',
'PURPOSE_CODE',
'PURPOSE_CODETT',
'Aquifer',
'AquiferTT',
'SWL',
'LATEST_RSWL',
'LATEST_EC',
'TDS',
'Yield',
'SALINITY_STATUS',
'STAND_WATER_LEVEL_STATUS',
'LATEST_SAL_DATE',
'LATEST_SWL_DATE',
'LATEST_YIELD_DATE',
'GROUP_CODE',
'HUNDRED_NAME',
'NRM_REGION_CODE',
'PRESCRIBED_WELL_AREA_CODE',
]
for key in keys:
table[key] = []
for well in wells:
for key in keys:
table[key].append(well['summary'][0].get(key, ''))
return table
def drillers_log(well):
'''Get a table of drillers log information from a list of well JSON data.
Args:
wells (list of dicts): e.g. a list of data from JSON files.
'''
columns = [
'UnitNumber', 'OBSNUMBER', 'DH_NAME',
'DRILLER_DEPTH_FROM', 'DRILLER_DEPTH_TO', 'LITHO_DRILLER_CODE', 'LITHO_DESC',
'GEOL_LOGGING_DATE', 'GEOL_LOGGER_NAME', 'DRILLHOLE_NO', 'LOG_NO',]
summary = well['summary'][0]
drillers_logs = well['drillers_log']
ldicts = []
for drillers_log in drillers_logs:
rowbase = {
'UnitNumber': summary['UnitNumber'],
'OBSNUMBER': summary.get('OBSWELL_NO', ''),
'DH_NAME': summary.get('DH_NAME', ''),
'GEOL_LOGGING_DATE': drillers_log.get('GEOL_LOGGING_DATE', ''),
'GEOL_LOGGER_NAME': drillers_log.get('GEOL_LOGGER_NAME', ''),
'DRILLHOLE_NO': drillers_log.get('DRILLHOLE_NO', ''),
'LOG_NO': drillers_log.get('LOG_NO', ''),
}
if len(drillers_log['details']):
for item in drillers_log['details']:
row = dict(rowbase)
row['DRILLER_DEPTH_FROM'] = item.get('DRILLER_DEPTH_FROM', '')
row['DRILLER_DEPTH_TO'] = item.get('DRILLER_DEPTH_TO', '')
row['LITHO_DRILLER_CODE'] = item.get('LITHO_DRILLER_CODE', '')
row['LITHO_DESC'] = item.get('LITHO_DESC', '')
ldicts.append(row)
return ldicts2table(ldicts, columns)
def lith_log(well):
'''Get a table of lithological log information from a list of well JSON data.
Args:
wells (list of dicts): e.g. a list of data from JSON files.
'''
columns = [
'UnitNumber', 'OBSNUMBER', 'DH_NAME',
'LITHO_DEPTH_FROM',
'LITHO_DEPTH_TO',
'LITHO_CODE_MAJOR', # 'LITHO_APPROVED_CODE1',
'LITHO_CODE_MINOR', # 'LITHO_APPROVED_CODE2',
'LITHO_DESC',
'DRILLHOLE_NO',]
summary = well['summary'][0]
lith_logs = well['lith_log']
ldicts = []
if len(lith_logs):
for unit in lith_logs:
row = {
'UnitNumber': summary['UnitNumber'],
'OBSNUMBER': summary.get('OBSWELL_NO', ''),
'DH_NAME': summary.get('DH_NAME', ''),
'DRILLHOLE_NO': summary.get('DRILLHOLE_NO', ''),
'LITHO_DEPTH_FROM': unit.get('LITHO_DEPTH_FROM', ''),
'LITHO_DEPTH_TO': unit.get('LITHO_DEPTH_TO', ''),
'LITHO_CODE_MAJOR': unit.get('LITHO_APPROVED_CODE1', ''),
'LITHO_CODE_MINOR': unit.get('LITHO_APPROVED_CODE2', ''),
'LITHO_DESC': unit.get('LITHO_DESC', ''),
}
ldicts.append(row)
return ldicts2table(ldicts, columns)
def strat_log(well):
'''Get a table of stratigraphic log information from a list of well JSON data.
Args:
wells (list of dicts): e.g. a list of data from JSON files.
'''
columns = [
'UnitNumber', 'OBSNUMBER', 'DH_NAME',
'STRAT_DEPTH_FROM', 'STRAT_DEPTH_TO',
'STRAT_UNIT_NO', 'MAP_SYMBOL', 'GIS_CODE',
'STRAT_NAME', 'STRAT_DESC',
'THICKNESS_MAX', 'STRAT_UNIT_USAGE_CODE',
'DRILLHOLE_NO',]
ldicts = []
summary = well['summary'][0]
strat_logs = well['strat_log']
if len(strat_logs):
for strat_log in strat_logs:
row = {
'UnitNumber': summary['UnitNumber'],
'OBSNUMBER': summary.get('OBSWELL_NO', ''),
'DH_NAME': summary.get('DH_NAME', ''),
'DRILLHOLE_NO': summary.get('DRILLHOLE_NO', ''),
'STRAT_DEPTH_FROM': strat_log.get('STRAT_DEPTH_FROM', ''),
'STRAT_DEPTH_TO': strat_log.get('STRAT_DEPTH_TO', ''),
'STRAT_UNIT_NO': strat_log.get('STRAT_UNIT_NO', ''),
'MAP_SYMBOL': strat_log.get('MAP_SYMBOL', ''),
'GIS_CODE': strat_log.get('GIS_CODE', ''),
'STRAT_NAME': strat_log.get('STRAT_NAME', ''),
'STRAT_DESC': strat_log.get('STRAT_DESC', ''),
'THICKNESS_MAX': strat_log.get('THICKNESS_MAX', ''),
'STRAT_UNIT_USAGE_CODE': strat_log.get('STRAT_UNIT_USAGE_CODE', ''),
}
ldicts.append(row)
return ldicts2table(ldicts, columns)
def hydrostrat_log(well):
'''Get a table of hydrostratigraphic log information from a list of well JSON data.
Args:
wells (list of dicts): e.g. a list of data from JSON files.
'''
columns = [
'UnitNumber', 'OBSNUMBER', 'DH_NAME',
'HYDRO_DEPTH_FROM', 'HYDRO_DEPTH_TO',
'MAP_SYMBOL', 'STRAT_NAME', 'HYDRO_SUBUNIT_DESC', 'HYDRO_SUBUNIT_CODE',
'HYDRO_DEPTH_TO_GREATER_FLAG',
'HYDRO_INT_NO', 'STRAT_UNIT_NO',
'DRILLHOLE_NO',]
ldicts = []
summary = well['summary'][0]
hydrostrat_logs = well['hydrostrat_log']
if len(hydrostrat_logs):
units = []
for unit in hydrostrat_logs:
units.append(unit)
if len(unit['extra']) > 0:
units += unit['extra']
for unit in units:
row = {
'UnitNumber': summary['UnitNumber'],
'OBSNUMBER': summary.get('OBSWELL_NO', ''),
'DH_NAME': summary.get('DH_NAME', ''),
'DRILLHOLE_NO': summary.get('DRILLHOLE_NO', ''),
'HYDRO_DEPTH_FROM': unit.get('HYDRO_DEPTH_FROM', ''),
'HYDRO_DEPTH_TO': unit.get('HYDRO_DEPTH_TO', ''),
'MAP_SYMBOL': unit.get('MAP_SYMBOL', ''),
'STRAT_NAME': unit.get('STRAT_NAME', ''),
'HYDRO_SUBUNIT_DESC': unit.get('HYDRO_SUBUNIT_DESC', ''),
'HYDRO_SUBUNIT_CODE': unit.get('HYDRO_SUBUNIT_CODE', ''),
'HYDRO_DEPTH_TO_GREATER_FLAG': unit.get('HYDRO_DEPTH_TO_GREATER_FLAG', ''),
'HYDRO_INT_NO': unit.get('HYDRO_INT_NO', ''),
'STRAT_UNIT_NO': unit.get('STRAT_UNIT_NO', ''),
}
ldicts.append(row)
return ldicts2table(ldicts, columns)
def completion_datesort(record):
'''Return the completion date from a record, suitable for sorting.
Args:
record (dict): a completion record.
Returns: datetime object.
'''
if 'COMPLETION_DATE' in record:
return datetime.strptime(record['COMPLETION_DATE'], '%Y-%m-%d')
else:
return datetime(1800, 1, 1)
def water_cuts(well):
'''Get a table of water cut information from a list of well JSON data.
Args:
wells (list of dicts): e.g. a list of data from JSON files.
'''
columns = [
'UnitNumber', 'OBSNUMBER', 'DH_NAME', 'DRILLHOLE_NO',
'EVENT_TYPE',
'DATE',
'DEPTH_TO_WATER',
'DEPTH_FROM',
'DEPTH_TO',
'WELL_YIELD',
'EC',
'TDS',
'MEAS_NO',
'EXTR_METHOD_CODE',
'SAMPLE_TYPE',
'COMPL_TOTAL_DEPTH',
'COMPL_FINAL_DEPTH',
'COMPL_CURRENT_DEPTH',
'COMPLETION_DATE',
'COMPLETION_COMMENTS',
'PERMIT_NO',
]
ldicts = []
summary = well['summary'][0]
completions = well['construction']
completions.sort(key=completion_datesort)
if len(completions):
records = []
compl_date_found = False
for compl in completions:
for record in compl['water_cut']:
record['EVENT_TYPE'] = compl.get('CONSTRN_FLAG', '')
if record['EVENT_TYPE'] == 'C':
record['EVENT_TYPE'] = 'Construction'
elif record['EVENT_TYPE'] == 'S':
record['EVENT_TYPE'] = 'Survey'
record['COMPL_FINAL_DEPTH'] = compl.get('FINAL_DPTH', '')
record['COMPL_TOTAL_DEPTH'] = compl.get('TOTAL_DPTH', '')
record['COMPL_CURRENT_DEPTH'] = compl.get('CURRENT_DPTH', '')
record['COMPLETION_DATE'] = compl.get('COMPLETION_DATE', '')
record['COMPLETION_COMMENTS'] = compl.get('COMMENTS', '')
record['PERMIT_NO'] = compl.get('PERMIT_NO', '')
records.append(record)
if len(records) > 0:
for record in records:
row = {
'UnitNumber': summary['UnitNumber'],
'OBSNUMBER': summary.get('OBSWELL_NO', ''),
'DH_NAME': summary.get('DH_NAME', ''),
'DRILLHOLE_NO': summary.get('DRILLHOLE_NO', ''),
'DATE': record.get('WATER_CUT_DATE', ''),
'DEPTH_TO_WATER': record.get('DEPTH_TO_WATER', ''),
'DEPTH_FROM': record.get('WATER_CUT_DEPTH_FROM', ''),
'DEPTH_TO': record.get('WATER_CUT_DEPTH_TO', ''),
'WELL_YIELD': record.get('WELL_YIELD', ''),
'EC': record.get('EC', ''),
'TDS': record.get('TDS', ''),
'MEAS_NO': record.get('WATER_CUT_MEAS_NO', ''),
'EXTR_METHOD_CODE': record.get('EXTR_METHOD_CODE', ''),
'SAMPLE_TYPE': record.get('SAMPLE_TYPE', ''),
'EVENT_TYPE': record.get('EVENT_TYPE', ''),
'COMPL_TOTAL_DEPTH': record.get('COMPL_TOTAL_DEPTH', ''),
'COMPL_FINAL_DEPTH': record.get('COMPL_FINAL_DEPTH', ''),
'COMPL_CURRENT_DEPTH': record.get('COMPL_CURRENT_DEPTH', ''),
'COMPLETION_DATE': record.get('COMPLETION_DATE', ''),
'COMPLETION_COMMENTS': record.get('COMPLETION_COMMENTS', ''),
'PERMIT_NO': record.get('PERMIT_NO', ''),
}
ldicts.append(row)
return ldicts2table(ldicts, columns)
def drilling_record(well):
'''Get a table of drilling information from a list of well JSON data.
Args:
wells (list of dicts): e.g. a list of data from JSON files.
'''
columns = [
'UnitNumber', 'OBSNUMBER', 'DH_NAME', 'DRILLHOLE_NO',
'EVENT_TYPE',
'DEPTH_FROM',
'DEPTH_TO',
'DRILL_METH',
'DIAM',
'DRILL_COMMENTS',
'COMPL_TOTAL_DEPTH',
'COMPL_FINAL_DEPTH',
'COMPL_CURRENT_DEPTH',
'COMPLETION_DATE',
'COMPLETION_COMMENTS',
'PERMIT_NO',
]
ldicts = []
summary = well['summary'][0]
completions = well['construction']
completions.sort(key=completion_datesort)
if len(completions):
records = []
compl_date_found = False
for compl in completions:
for record in compl['drilling']:
record['EVENT_TYPE'] = compl.get('CONSTRN_FLAG', '')
if record['EVENT_TYPE'] == 'C':
record['EVENT_TYPE'] = 'Construction'
elif record['EVENT_TYPE'] == 'S':
record['EVENT_TYPE'] = 'Survey'
record['COMPL_FINAL_DEPTH'] = compl.get('FINAL_DPTH', '')
record['COMPL_TOTAL_DEPTH'] = compl.get('TOTAL_DPTH', '')
record['COMPL_CURRENT_DEPTH'] = compl.get('CURRENT_DPTH', '')
record['COMPLETION_DATE'] = compl.get('COMPLETION_DATE', '')
record['COMPLETION_COMMENTS'] = compl.get('COMMENTS', '')
record['PERMIT_NO'] = compl.get('PERMIT_NO', '')
records.append(record)
if len(records) > 0:
for record in records:
row = {
'UnitNumber': summary['UnitNumber'],
'OBSNUMBER': summary.get('OBSWELL_NO', ''),
'DH_NAME': summary.get('DH_NAME', ''),
'DRILLHOLE_NO': summary.get('DRILLHOLE_NO', ''),
'DEPTH_FROM': record.get('DRILL_FR', ''),
'DEPTH_TO': record.get('DRILL_TO', ''),
'DRILL_METH': record.get('DRILL_METH', ''),
'DIAM': record.get('DIAM', ''),
'DRILL_COMMENTS': record.get('COMMENTS', ''),
'EVENT_TYPE': record.get('EVENT_TYPE', ''),
'COMPL_TOTAL_DEPTH': record.get('COMPL_TOTAL_DEPTH', ''),
'COMPL_FINAL_DEPTH': record.get('COMPL_FINAL_DEPTH', ''),
'COMPL_CURRENT_DEPTH': record.get('COMPL_CURRENT_DEPTH', ''),
'COMPLETION_DATE': record.get('COMPLETION_DATE', ''),
'COMPLETION_COMMENTS': record.get('COMPLETION_COMMENTS', ''),
'PERMIT_NO': record.get('PERMIT_NO', ''),
}
ldicts.append(row)
return ldicts2table(ldicts, columns)
def casing_record(well):
'''Get a table of casing/construction information from a list of well JSON data.
Args:
wells (list of dicts): e.g. a list of data from JSON files.
'''
columns = [
'UnitNumber', 'OBSNUMBER', 'DH_NAME', 'DRILLHOLE_NO',
'EVENT_TYPE',
'CASE_FROM',
'CASE_TO',
'CASE_MATERIAL',
'CASE_DIAM',
'APERTURE',
'PZONE_TYPE_CODE',
'CEMENT_FROM',
'CEMENT_TO',
'CEMENT_TYPE',
'COMMENTS',
'COMPL_TOTAL_DEPTH',
'COMPL_FINAL_DEPTH',
'COMPL_CURRENT_DEPTH',
'COMPLETION_DATE',
'COMPLETION_COMMENTS',
'PERMIT_NO',
]
ldicts = []
summary = well['summary'][0]
completions = well['construction']
completions.sort(key=completion_datesort)
if len(completions):
records = []
compl_date_found = False
for compl in completions:
for record in compl['casing']:
record['EVENT_TYPE'] = compl.get('CONSTRN_FLAG', '')
if record['EVENT_TYPE'] == 'C':
record['EVENT_TYPE'] = 'Construction'
elif record['EVENT_TYPE'] == 'S':
record['EVENT_TYPE'] = 'Survey'
record['COMPL_FINAL_DEPTH'] = compl.get('FINAL_DPTH', '')
record['COMPL_TOTAL_DEPTH'] = compl.get('TOTAL_DPTH', '')
record['COMPL_CURRENT_DEPTH'] = compl.get('CURRENT_DPTH', '')
record['COMPLETION_DATE'] = compl.get('COMPLETION_DATE', '')
record['COMPLETION_COMMENTS'] = compl.get('COMMENTS', '')
record['PERMIT_NO'] = compl.get('PERMIT_NO', '')
records.append(record)
for record in compl['production_zone']:
record['EVENT_TYPE'] = compl.get('CONSTRN_FLAG', '')
if record['EVENT_TYPE'] == 'C':
record['EVENT_TYPE'] = 'Construction'
elif record['EVENT_TYPE'] == 'S':
record['EVENT_TYPE'] = 'Survey'
record['COMPL_FINAL_DEPTH'] = compl.get('FINAL_DPTH', '')
record['COMPL_TOTAL_DEPTH'] = compl.get('TOTAL_DPTH', '')
record['COMPL_CURRENT_DEPTH'] = compl.get('CURRENT_DPTH', '')
record['COMPLETION_DATE'] = compl.get('COMPLETION_DATE', '')
record['COMPLETION_COMMENTS'] = compl.get('COMMENTS', '')
record['PERMIT_NO'] = compl.get('PERMIT_NO', '')
record['CASE_DIAM'] = record.get('PZONE_DIAM', '')
record['CASE_FR'] = record.get('PZONE_FR', '')
record['CASE_TO'] = record.get('PZONE_TO', '')
record['CASE_MTRL'] = record.get('PZONE_MTRL', '')
records.append(record)
if len(records) > 0:
for record in records:
row = {
'UnitNumber': summary['UnitNumber'],
'OBSNUMBER': summary.get('OBSWELL_NO', ''),
'DH_NAME': summary.get('DH_NAME', ''),
'DRILLHOLE_NO': summary.get('DRILLHOLE_NO', ''),
'CASE_DIAM': record.get('CASE_DIAM', ''),
'CASE_FROM': record.get('CASE_FR', ''),
'CASE_MATERIAL': record.get('CASE_MTRL', ''),
'CASE_TO': record.get('CASE_TO', ''),
'CEMENT_FROM': record.get('CEM_FR', ''),
'CEMENT_TO': record.get('CEM_TO', ''),
'CEMENT_TYPE': record.get('CEM_TYPE', ''),
'COMMENTS': record.get('COMMENTS', ''),
'APERTURE': record.get('APERTURE', ''),
'PZONE_TYPE_CODE': record.get('PZONE_TYPE_CODE', ''),
'EVENT_TYPE': record.get('EVENT_TYPE', ''),
'COMPL_TOTAL_DEPTH': record.get('COMPL_TOTAL_DEPTH', ''),
'COMPL_FINAL_DEPTH': record.get('COMPL_FINAL_DEPTH', ''),
'COMPL_CURRENT_DEPTH': record.get('COMPL_CURRENT_DEPTH', ''),
'COMPLETION_DATE': record.get('COMPLETION_DATE', ''),
'COMPLETION_COMMENTS': record.get('COMPLETION_COMMENTS', ''),
'PERMIT_NO': record.get('PERMIT_NO', ''),
}
ldicts.append(row)
return ldicts2table(ldicts, columns)
def append_tables(*tables):
'''Append tables together.
Args:
table (dict): a dict of lists
Returns: table (all tables passed, with values appended together)
'''
if len(tables) > 1:
table = tables[0]
ldict = table2ldicts(table)
for t in tables[1:]:
l = table2ldicts(t)
ldict += l
return ldicts2table(ldict)
else:
return tables[0]
def json2csv_script_entry():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--dhno-column", default="DHNO", help="column with drillhole numbers")
parser.add_argument("-i", "--input-table", help="Input CSV or Excel file")
parser.add_argument("-j", "--json-path", help="Location of JSON files")
parser.add_argument("-o", "--output-path", help="Output path for CSV files")
args = parser.parse_args(sys.argv[1:])
assert os.path.isfile(args.input_table)
input_data = read(args.input_table)
output_path = os.path.abspath(args.output_path)
if not os.path.isdir(output_path):
os.makedirs(output_path)
assert os.path.isdir(output_path)
dhnos = {int(d) for d in input_data[args.dhno_column].values}
csv_types = {
'casing': casing_record,
'drillers_log': drillers_log,
'drilling': drilling_record,
'hydrostrat_log': hydrostrat_log,
'lith_log': lith_log,
'strat_log': strat_log,
'water_cut': water_cuts,
}
data = {}
for csv_type in csv_types.keys():
data[csv_type] = []
well_list = []
for jsonfn in glob.glob(os.path.join(args.json_path, '*.json')):
with open(jsonfn, 'r') as f:
well = json.load(f)
# print(sorted(well['summary'][0].keys()))
if int(well['summary'][0]['DRILLHOLE_NO']) in dhnos:
for i, (csv_type, func) in enumerate(csv_types.items()):
try:
t = func(well)
l = table2ldicts(t)
data[csv_type] += l
except:
print('%s %s' % (csv_type, l))
raise
well_list.append(well)
for typ, l in data.items():
with open(os.path.join(args.output_path, '%s.csv' % typ), 'w') as f:
to_csv(f, ldicts=data[typ])
table = summary_table(well_list)
with open(os.path.join(args.output_path, 'summary.csv'), 'w') as f:
to_csv(f, table=table) | mit |
fergalbyrne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backend_bases.py | 69 | 69740 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
"""
from __future__ import division
import os, warnings, time
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
from matplotlib import rcParams
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
def open_group(self, s):
"""
Open a grouping element with label *s*. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
tpath = trans.transform_path(path)
for vertices, codes in tpath.iter_segments():
if len(vertices):
x,y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans + transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
"""
Draws a collection of paths, selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before
being applied.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
draw_path. Some backends may want to override this in order
to render each set of path data only once, and then reference
that path multiple times with the different offsets, colors,
styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
path, transform = path_id
transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)
self.draw_path(gc, path, transform, rgbFace)
def draw_quad_mesh(self, master_transform, cliprect, clippath,
clippath_trans, meshWidth, meshHeight, coordinates,
offsets, offsetTrans, facecolors, antialiased,
showedges):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if showedges:
edgecolors = np.array([[0.0, 0.0, 0.0, 1.0]], np.float_)
linewidths = np.array([1.0], np.float_)
else:
edgecolors = facecolors
linewidths = np.array([0.0], np.float_)
return self.draw_path_collection(
master_transform, cliprect, clippath, clippath_trans,
paths, [], offsets, offsetTrans, facecolors, edgecolors,
linewidths, [], [antialiased], [None])
def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc = self.new_gc()
gc.set_clip_rectangle(cliprect)
if clippath is not None:
clippath = transforms.TransformedPath(clippath, clippath_trans)
gc.set_clip_path(clippath)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
gc.set_foreground(edgecolors[i % Nedgecolors])
if Nlinewidths:
gc.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc.set_dashes(*linestyles[i % Nlinestyles])
if rgbFace is not None and len(rgbFace)==4:
gc.set_alpha(rgbFace[-1])
rgbFace = rgbFace[:3]
gc.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc, rgbFace
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the image instance into the current axes;
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
*bbox*
a :class:`matplotlib.transforms.Bbox` instance for clipping, or
None
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
overwrite this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
raise NotImplementedError
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text in display coords
*s*
a :class:`matplotlib.text.Text` instance
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
raise NotImplementedError
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
raise NotImplementedError
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
pass
def stop_rasterizing(self):
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid' : (None, None),
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'miter'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0)
self._hatch = None
self._url = None
self._snap = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._hatch = gc._hatch
self._url = gc._url
self._snap = gc._snap
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox` instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three floats from 0-1. color can be a
matlab format string, a html hex color string, or a rgb tuple
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
"""
self._alpha = alpha
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b: self._antialiased = 1
else: self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points. ``(None, None)`` specifies a solid line
"""
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGB=False):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
The :class:`GraphicsContextBase` converts colors to rgb
internally. If you know the color is rgb already, you can set
``isRGB=True`` to avoid the performace hit of the conversion
"""
if isRGB:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._rgb = (frac, frac, frac)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
try:
offset, dashes = self.dashd[style]
except:
raise ValueError('Unrecognized linestyle: %s' % style)
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas,guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class LocationEvent(Event):
"""
A event that has a screen location
The following additional attributes are defined and shown with
their default values
In addition to the :class:`Event` attributes, the following event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y,guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas,guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
axes_list = [a for a in self.canvas.figure.get_axes() if a.in_axes(self)]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axCmp = lambda _x,_y: cmp(_x.zorder, _y.zorder)
axes_list.sort(axCmp)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
xdata, ydata = self.inaxes.transData.inverted().transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes!=self.inaxes:
# process axes enter/leave events
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event', 'button_release_event', 'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used for scroll events)
*key*
the key pressed: None, chr(range(255), 'shift', 'win', or 'control'
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print 'on pick line:', zip(xdata[ind], ydata[ind])
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist, guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key pressed: None, chr(range(255), shift, win, or control
This interface may change slightly when better support for
modifier keys is included.
Example usage::
def on_key(event):
print 'you pressed', event.key, event.xdata, event.ydata
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase:
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event'
]
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry(self.events)
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event',self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event',self.pick)
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event',self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [ (h.zorder, h) for h in artists ]
L.sort()
return [ h for zorder, h in L ]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under: h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
print "Removing",h
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self,'_active'): self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a,'get_color'):
a.set_color(self._active[a])
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a,'get_color'):
self._active[a] = a.get_color()
elif hasattr(a,'get_edgecolor'):
self._active[a] = (a.get_edgecolor(),a.get_facecolor())
else: self._active[a] = None
for a in enter:
if hasattr(a,'get_color'):
a.set_color('red')
elif hasattr(a,'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else: self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
def enter_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
'call when GUI is idle'
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'emf': 'Enhanced Metafile',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'png': 'Portable Network Graphics',
'ps' : 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
def print_emf(self, *args, **kwargs):
from backends.backend_emf import FigureCanvasEMF # lazy import
emf = self.switch_backends(FigureCanvasEMF)
return emf.print_emf(*args, **kwargs)
def print_eps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgb = print_raw
def print_svg(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
def get_supported_filetypes(self):
return self.filetypes
def get_supported_filetypes_grouped(self):
groupings = {}
for ext, name in self.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation* '
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
"""
if format is None:
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
method_name = 'print_%s' % format
if (format not in self.filetypes or
not hasattr(self, method_name)):
formats = self.filetypes.keys()
formats.sort()
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
try:
result = getattr(self, method_name)(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
**kwargs)
finally:
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
raise NotImplementedError
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def switch_backends(self, FigureCanvasClass):
"""
instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print 'you pressed', event.button, event.xdata, event.ydata
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self,timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self,timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str,DeprecationWarning)
if timeout <= 0: timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter*timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
class FigureManagerBase:
"""
Helper class for matlab mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure nuamber
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.canvas.mpl_connect('key_press_event', self.key_press)
def destroy(self):
pass
def full_screen_toggle (self):
pass
def resize(self, w, h):
'For gui backends: resize window in pixels'
pass
def key_press(self, event):
# these bindings happen whether you are over an axes or not
#if event.key == 'q':
# self.destroy() # how cruel to have to destroy oneself!
# return
if event.key == 'f':
self.full_screen_toggle()
# *h*ome or *r*eset mnemonic
elif event.key == 'h' or event.key == 'r' or event.key == "home":
self.canvas.toolbar.home()
# c and v to enable left handed quick navigation
elif event.key == 'left' or event.key == 'c' or event.key == 'backspace':
self.canvas.toolbar.back()
elif event.key == 'right' or event.key == 'v':
self.canvas.toolbar.forward()
# *p*an mnemonic
elif event.key == 'p':
self.canvas.toolbar.pan()
# z*o*om mnemonic
elif event.key == 'o':
self.canvas.toolbar.zoom()
elif event.key == 's':
self.canvas.toolbar.save_figure(self.canvas.toolbar)
if event.inaxes is None:
return
# the mouse has to be over an axes to trigger these
if event.key == 'g':
event.inaxes.grid()
self.canvas.draw()
elif event.key == 'l':
ax = event.inaxes
scale = ax.get_yscale()
if scale=='log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale=='linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
elif event.key is not None and (event.key.isdigit() and event.key!='0') or event.key=='a':
# 'a' enables all axes
if event.key!='a':
n=int(event.key)-1
for i, a in enumerate(self.canvas.figure.get_axes()):
if event.x is not None and event.y is not None and a.in_axes(event):
if event.key=='a':
a.set_navigate(True)
else:
a.set_navigate(i==n)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
pass
# cursors
class Cursors: #namespace
HAND, POINTER, SELECT_REGION, MOVE = range(4)
cursors = Cursors()
class NavigationToolbar2:
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self._button_pressed = None # determined by the button pressed at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
'display a message on toolbar or in status bar'
pass
def back(self, *args):
'move back up the view lim stack'
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
'draw a rectangle rubberband to indicate zoom limits'
pass
def forward(self, *args):
'move forward in the view lim stack'
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
'restore the original view'
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
#print 'mouse_move', event.button
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active=='ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
self.draw_rubberband(event, x, y, lastx, lasty)
elif (self._active=='PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try: s = event.inaxes.format_coord(event.xdata, event.ydata)
except ValueError: pass
except OverflowError: pass
else:
if len(self.mode):
self.set_message('%s : %s' % (self.mode, s))
else:
self.set_message(s)
else: self.set_message(self.mode)
def pan(self,*args):
'Activate the pan/zoom tool. pan with left button, zoom with right'
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
'this will be called whenver a mouse button is pressed'
pass
def press_pan(self, event):
'the press mouse button in pan/zoom mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) and a.get_navigate():
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.drag_pan)
self.press(event)
def press_zoom(self, event):
'the press mouse button in zoom to rect mode callback'
if event.button == 1:
self._button_pressed=1
elif event.button == 3:
self._button_pressed=3
else:
self._button_pressed=None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty(): self.push_current()
self._xypress=[]
for i, a in enumerate(self.canvas.figure.get_axes()):
if x is not None and y is not None and a.in_axes(event) \
and a.get_navigate() and a.can_zoom():
self._xypress.append(( x, y, a, i, a.viewLim.frozen(), a.transData.frozen()))
self.press(event)
def push_current(self):
'push the current view limits and position onto the stack'
lims = []; pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append( (xmin, xmax, ymin, ymax) )
# Store both the original and modified positions
pos.append( (
a.get_position(True).frozen(),
a.get_position().frozen() ) )
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
'this will be called whenever mouse button is released'
pass
def release_pan(self, event):
'the release mouse button callback in pan/zoom mode'
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag=self.canvas.mpl_connect('motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress: return
self._xypress = []
self._button_pressed=None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
'the drag callback in pan/zoom mode'
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def release_zoom(self, event):
'the release mouse button callback in zoom to rect mode'
if not self._xypress: return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x-lastx)<5 or abs(y-lasty)<5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point( (lastx, lasty) )
x, y = inverse.transform_point( (x, y) )
Xmin,Xmax=a.get_xlim()
Ymin,Ymax=a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a,la): twinx=True
if a.get_shared_y_axes().joined(a,la): twiny=True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x<lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 < Xmin: x0=Xmin
if x1 > Xmax: x1=Xmax
else:
if x>lastx: x0, x1 = x, lastx
else: x0, x1 = lastx, x
if x0 > Xmin: x0=Xmin
if x1 < Xmax: x1=Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y<lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 < Ymin: y0=Ymin
if y1 > Ymax: y1=Ymax
else:
if y>lasty: y0, y1 = y, lasty
else: y0, y1 = lasty, y
if y0 > Ymin: y0=Ymin
if y1 < Ymax: y1=Ymax
if self._button_pressed == 1:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale()=='log':
alpha=np.log(Xmax/Xmin)/np.log(x1/x0)
rx1=pow(Xmin/x0,alpha)*Xmin
rx2=pow(Xmax/x0,alpha)*Xmin
else:
alpha=(Xmax-Xmin)/(x1-x0)
rx1=alpha*(Xmin-x0)+Xmin
rx2=alpha*(Xmax-x0)+Xmin
if a.get_yscale()=='log':
alpha=np.log(Ymax/Ymin)/np.log(y1/y0)
ry1=pow(Ymin/y0,alpha)*Ymin
ry2=pow(Ymax/y0,alpha)*Ymin
else:
alpha=(Ymax-Ymin)/(y1-y0)
ry1=alpha*(Ymin-y0)+Ymin
ry2=alpha*(Ymax-y0)+Ymin
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self.push_current()
self.release(event)
def draw(self):
'redraw the canvases, update the locators'
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw()
def _update_view(self):
'''update the viewlim and position from the view and
position stack for each axes
'''
lims = self._views()
if lims is None: return
pos = self._positions()
if pos is None: return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position( pos[i][0], 'original' )
a.set_position( pos[i][1], 'active' )
self.draw()
def save_figure(self, *args):
'save the current figure'
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
'reset the axes stack'
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
'activate zoom to rect mode'
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress=self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease=self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event', self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event', self.release_zoom)
self.mode = 'Zoom to rect mode'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
'enable or disable back/forward button'
pass
| agpl-3.0 |
ghost9023/DeepLearningPythonStudy | DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/2_3) ReLU Function.py | 1 | 3897 | ###################ReLU ํจ์
#########KEYWORD##########
#saturated : ํจ์์ ์ถ๋ ฅ๊ฐ์ ํ๊ณ๊ฐ ์๋ ๊ฒ. ์๊ทธ๋ชจ์ด๋ ํจ์๋ ๊ณ๋จ ํจ์๋ ์ถ๋ ฅ๊ฐ์ด ๋ฌด์กฐ๊ฑด 0์์ 1์ด๊ธฐ ๋๋ฌธ์ saturate ํ๋ค๊ณ ํ๊ณ
# ReLU ํจ์๋ x๊ฐ์ด ์ปค์ง ๋ ์ถ๋ ฅ๊ฐ๋ ์ปค์ง๊ธฐ ๋๋ฌธ์ ๊ทธ๋ ์ง ์๋ค๊ณ ํ๋ค.
#ReLU
#PReLU
#ELU
##########################
#ReLU ํจ์์์ Re๋ Rectified๋ก ์ ๋ฅ๋ ์ด๋ผ๋ ๋ป.
#์ด๋ ์ ๊ธฐํ๋ก์ชฝ ์ฉ์ด๋ก ํน์ ๊ฐ ์ดํ์์ ํ๋ก๊ฐ ๋ฐ์ํ์ง ์๋ค๊ฐ ํด๋น ๊ฐ ์ด์์์ ๊ฐ์ด ์ปค์ง์๋ก ํฌ๊ฒ ๋ฐ์ํ๋ ๊ฒ์ ์๋ฏธ.
#ReLU ํจ์ ๊ทธ๋ํ๋ฅผ ๋ณด๋ฉด ReLUํจ์๊ฐ ์ ReLU ํจ์๋ก ๋ถ๋ฆฌ๋์ง ์ ์ ์๋ค. ReLU ํจ์์ ๊ทธ๋ํ๋ฅผ ๊ทธ๋ฆฌ๊ธฐ ์ ReLU ํจ์์ ํจ์์์ ์ดํด๋ณด์.
# |-------- x (x > 0)
# |
# h(x) = -
# |
# |-------- 0 (x <= 0)
#ReLUํจ์๋ 0๋ณด๋ค ์์ ์
๋ ฅ ๊ฐ์ ๋ํด์ 0์ผ๋ก ๋ฐ์ํ์ง ์๋ค๊ฐ
#0์ด ๋๋ ๊ฐ์ด ์
๋ ฅ๋๋ฉด ์ถ๋ ฅ๊ฐ์ด ์ ์ ์ปค์ง๋ ํํ.
#๊ทธ๋ํ
import numpy as np
import matplotlib.pylab as plt
def ReLU(x):
return np.maximum(0,x)
x=np.arange(-5.0,5.0,0.1)
y=ReLU(x)
plt.plot(x,y)
plt.ylim(-1,5)
plt.show()
#ReLU๋ ์ต๊ทผ ๋ช ๋
๊ฐ ๊ฐ์ฅ ์ธ๊ธฐ ์๋ activation ํจ์์ด๋ค. ์ด ํจ์๋ f(x)=max(0,x)์ ๊ผด๋ก ํํํ ์ ์๋๋ฐ, ์ด๋ x > 0 ์ด๋ฉด ๊ธฐ์ธ๊ธฐ๊ฐ 1์ธ ์ง์ ์ด๊ณ
#x<0 ์ด๋ฉด ์ถ๋ ฅ๊ฐ์ ํญ์ 0์ด๋ค. ์๊ทธ๋ชจ์ด๋๋ tanh ํจ์์ ๋น๊ตํ์ ๋ ์๊ทธ๋ชจ์ด๋์ ์๋ ด์๋๊ฐ ๋งค์ฐ ๋น ๋ฅธ ๊ฒ์ผ๋ก ๋ํ๋ฌ๋๋ฐ. ์ด๋ ํจ์๊ฐ
#saturatedํ์ง ์๊ณ ์ ํ์ ์ด๊ธฐ ๋๋ฌธ์ ๋ํ๋๋ค. ์๊ทธ๋ชจ์ด๋์ tanh๋ exp()์ ์ํด ๋ฏธ๋ถ์ ๊ณ์ฐํ๋๋ฐ ๋น์ฉ์ด ๋ค์ง๋ง,
#ReLU๋ ๋ณ๋ค๋ฅธ ๋น์ฉ์ด ๋ค์ง ์๋๋ค.(๋ฏธ๋ถ๋ 0 ํน์ 1)
#ReLU์ ํฐ ๋จ์ ์ ๋คํธ์ํฌ๋ฅผ ํ์ตํ ๋ ๋ด๋ฐ๋ค์ด ์ฃฝ๋ ๊ฒฝ์ฐ๊ฐ ๋ฐ์ํ๋ค. x < 0์ผ๋ ๊ธฐ์ธ๊ธฐ๊ฐ 0์ด๊ธฐ ๋๋ฌธ์ ๋ง์ฝ ์
๋ ฅ๊ฐ์ด 0๋ณด๋ค ์๋ค๋ฉด ๋ด๋ฐ์ด ์ฃฝ์ด๋ฒ๋ฆด์ ์์ผ๋ฉฐ
#๋ ์ด์ ๊ฐ์ด ์
๋ฐ์ดํธ ๋์ง ์๊ฒ ๋๋ค.
#์ด ReLU ํจ์ ๋ํ 0 ์ดํ์ ์
๋ ฅ๊ฐ์์ ์์ ๋ฐ์ํ์ง ์๋ ์ ์ด ๋ฌธ์ ๊ฐ ๋ ๋๊ฐ ์๋ค.
#์ด๋ฅผ ๋ณด์ํ Parametric ReLU ํจ์๋ผ๋ ๊ฒ์ ์ฌ์ฉํ๊ธฐ๋ ํ๋ค.
#PReLU ํจ์์ ํจ์์๊ณผ ๊ทธ๋ํ๋ ์๋์ ๊ฐ๋ค.
# |---- x ( x > 0 )
# |
# h(x)= --
# |
# |---- ay (x <= 0,0 < a )
#PRelu์ ์ฅ์
#ReLU ํจ์์ ์ฅ์ ์ ๊ทธ๋๋ก ๊ฐ์ง๊ณ ์๋ค.
#ReLU ํจ์์ ์
๋ ฅ๊ฐ์ด 0๋ณด๋ค ์์ ๋์ ์ฝ์ (4๋ฒ)์ ๋ณด์ํ๋ค.
#๋จ์
#exp() ํจ์๋ฅผ ๊ณ์ฐํด์ผํ๋ฏ๋ก ์๊ทธ๋ชจ์ด๋ ํจ์์ 3๋ฒ ์ฝ์ ๊ณผ ๋น์ทํ ๋ฌธ์ ๊ฐ ์๋ค.
#dying ReLU. ์ฆ, ReLU x < 0 ์ธ ๊ฒฝ์ฐ ํญ์ ํจ์ ๊ฐ์ด 0์ด์ง๋ง PReLU๋ ์์ ๊ธฐ์ธ๊ธฐ๋ฅผ ๋ถ์ฌํ์ฌ ์ฃฝ์ง ์๊ฒํ๋ค.
#์ด๊ฒ์ f(x)=max(ax,x)๋ก ํํํ๋ฉฐ ์ด ๋ a๋ ๋งค์ฐ ์์ ๊ฐ์ด๋ค.
#๋ช๋ช ๊ฒฝ์ฐ์ ์ด ํจ์๋ฅผ ์ด์ฉํ์ฌ ์ฑ๋ฅ ํฅ์์ ์ด๋ฃจ์๋ค๋ ๋ณด๊ณ ๊ฐ ์์ง๋ง ๋ชจ๋ ๊ฒฝ์ฐ์ ํด๋นํ์ง ์์ผ๋ฏ๋ก ์ง์ ๊ฒฝํํด๋ณด๊ณ ํ๋จํด์ผํ๋ค.
# ELUํจ์.
# 1) PReLU ํจ์์ ์ฅ์ ๋จ์ ์ด ๊ฐ๋ค.
# |------ x if x>0
# |
# f(x) = -
# |
# |------ ์ํ(e^x-1) if x <= 0
#ReLU ํจ์๋ค๊ณผ์ ๋น๊ต ๊ทธ๋ฆผ๊ณผ ๊ณต์์ ๋ณด๋ฉด ์๊ฒ ์ง๋ง ELU๋ ReLU์ ์๊ณ๊ฐ์ -1๋ก ๋ฎ์ถ ํจ์๋ฅผ exp()๋ก ์ด์ฉํ์ฌ ๊ทผ์ฌํ ๊ฒ.
#ELU์ ํน์ง์ ReLU์ ์ฅ์ ์ ๋ชจ๋ ํฌํจํ๊ณ ์ญ์ dying ReLU ๋ฌธ์ ๋ฅผ ํด๊ฒฐํ์์ผ๋ฉฐ ์ถ๋ ฅ๊ฐ์ด ๊ฑฐ์ zero-centered์ ๊ฐ๊น๋ค. ํ์ง๋ง
#ReLU, PReLU์ ๋ฌ๋ฆฌ exp()๋ฅผ ๊ณ์ฐํด์ผํ๋ ๋น์ฉ์ด ๋ ๋ค.
#์ ๋ฆฌํ์๋ฉด ๊ณต๋ถํ๋ ๊ณผ์ ์์ ์๊ทธ๋ชจ์ด๋์ ReLUํจ์๋ฅผ ์ฌ์ฉํด์ ๊ณต๋ถํ๊ณ ์ค์ ์ ๊ฒฝ๋ง์ ์ค๊ณํ ๋
#ReLU์ PReLU ํจ์๋ฅผ ํ์ฑ ํจ์๋ก ์ฌ์ฉํ๋ ๊ฒ์ด ์ข์ ๊ฒ.
| mit |
Jimmy-Morzaria/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaรซl Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
mhdella/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/testing/jpl_units/__init__.py | 21 | 3240 | #=======================================================================
"""
This is a sample set of units for use with testing unit conversion
of matplotlib routines. These are used because they use very strict
enforcement of unitized data which will test the entire spectrum of how
unitized data might be used (it is not always meaningful to convert to
a float without specific units given).
UnitDbl is essentially a unitized floating point number. It has a
minimal set of supported units (enough for testing purposes). All
of the mathematical operation are provided to fully test any behaviour
that might occur with unitized data. Remeber that unitized data has
rules as to how it can be applied to one another (a value of distance
cannot be added to a value of time). Thus we need to guard against any
accidental "default" conversion that will strip away the meaning of the
data and render it neutered.
Epoch is different than a UnitDbl of time. Time is something that can be
measured where an Epoch is a specific moment in time. Epochs are typically
referenced as an offset from some predetermined epoch.
A difference of two epochs is a Duration. The distinction between a
Duration and a UnitDbl of time is made because an Epoch can have different
frames (or units). In the case of our test Epoch class the two allowed
frames are 'UTC' and 'ET' (Note that these are rough estimates provided for
testing purposes and should not be used in production code where accuracy
of time frames is desired). As such a Duration also has a frame of
reference and therefore needs to be called out as different that a simple
measurement of time since a delta-t in one frame may not be the same in another.
"""
#=======================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from .Duration import Duration
from .Epoch import Epoch
from .UnitDbl import UnitDbl
from .Duration import Duration
from .Epoch import Epoch
from .UnitDbl import UnitDbl
from .StrConverter import StrConverter
from .EpochConverter import EpochConverter
from .UnitDblConverter import UnitDblConverter
from .UnitDblFormatter import UnitDblFormatter
#=======================================================================
__version__ = "1.0"
__all__ = [
'register',
'Duration',
'Epoch',
'UnitDbl',
'UnitDblFormatter',
]
#=======================================================================
def register():
"""Register the unit conversion classes with matplotlib."""
import matplotlib.units as mplU
mplU.registry[ str ] = StrConverter()
mplU.registry[ Epoch ] = EpochConverter()
mplU.registry[ UnitDbl ] = UnitDblConverter()
#=======================================================================
# Some default unit instances
# Distances
m = UnitDbl( 1.0, "m" )
km = UnitDbl( 1.0, "km" )
mile = UnitDbl( 1.0, "mile" )
# Angles
deg = UnitDbl( 1.0, "deg" )
rad = UnitDbl( 1.0, "rad" )
# Time
sec = UnitDbl( 1.0, "sec" )
min = UnitDbl( 1.0, "min" )
hr = UnitDbl( 1.0, "hour" )
day = UnitDbl( 24.0, "hour" )
sec = UnitDbl( 1.0, "sec" )
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.