repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
gdiminin/HaSAPPy | HaSAPPy/GeneReference_built.py | 1 | 5400 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 18:49:14 2015
@author: GDM
"""
import pandas as pd
import HTSeq
import argparse
import cPickle as pickle
from HaSAPPy.HaSAPPY_time import *
def library_preparation(info):
################
def exonInterval_define(transcript,ref_gene):
exonInterval = []
starts = ref_gene.ix[transcript,"exonStarts"].split(",")
ends = ref_gene.ix[transcript,"exonEnds"].split(",")
for n in range(ref_gene.ix[transcript,"exonCount"]):
interval = HTSeq.GenomicInterval(ref_gene.ix[transcript,"chrom"],int(starts[n]),int(ends[n]),ref_gene.ix[transcript,"strand"])
exonInterval.append(interval)
return exonInterval
####
def define_introns(index,library):
first = True
introns = []
for exon_interval in library.ix[index,"exon_specific"]:
if first:
_5 = exon_interval.end
else:
_3 = exon_interval.start
intron_interval = HTSeq.GenomicInterval(exon_interval.chrom,_5,_3,exon_interval.strand)
introns.append(intron_interval)
_5 = exon_interval.end
first = False
return introns
##################
print '\t- Loading .txt file'
ref_gene = pd.read_table(info.input,sep = "\t")
for index in ref_gene.index:
ref_gene.ix[index,"interval"] = HTSeq.GenomicInterval(ref_gene.ix[index,"chrom"],ref_gene.ix[index,"txStart"],ref_gene.ix[index,"txEnd"],ref_gene.ix[index,"strand"])
gene_collection = ref_gene.groupby(ref_gene["name2"])
gene_collection = pd.Series(gene_collection.groups)
columns = ["reference","genomic_interval","variants","exons","exon_specific","introns_all"]
gene_data = pd.DataFrame(columns= columns)
gene_data.index.name = "genes"
print '\t- Redundant gene intervals management'
for index in gene_collection.index:
list_interval = []
first = True
for n in gene_collection[index]:
if first:
list_interval.append((ref_gene.ix[n,"interval"],[n]))
first = False
else:
count = 0
finded = False
for interval in list_interval:
if ref_gene.ix[n,"interval"].overlaps(interval[0]):
interval[0].extend_to_include(ref_gene.ix[n,"interval"])
list_interval[count][1].append(n)
finded = True
break
count +=1
if not finded:
list_interval.append((ref_gene.ix[n,"interval"],[n]))
if len(list_interval)==1:
gene_data.ix[index,"reference"] = list_interval[0][1]
gene_data.ix[index,"genomic_interval"] = list_interval[0][0]
else:
num = len(list_interval)
for gene in list_interval:
index_1 = index + "_" + str(num)
num-=1
gene_data.ix[index_1,"reference"] = gene[1]
gene_data.ix[index_1,"genomic_interval"] = gene[0]
print '\t- Define exonic regions'
gene_data["variants"] = [{} for gene in range(len(gene_data))]
for index in gene_data.index:
for num in gene_data.ix[index,"reference"]:
exonInterval = exonInterval_define(num,ref_gene)
transcript = ref_gene.ix[num,"name"]
gene_data.ix[index,"variants"][transcript] = exonInterval
print '\t- Define intronic regions'
for index in gene_data.index:
transcripts = gene_data.ix[index,"variants"]
gene_exon = HTSeq.GenomicArray("auto",stranded = True)
gene_all_exon = []
gene_specific_exon = []
for transcript in transcripts:
for interval in transcripts[transcript]:
gene_exon[interval]+=1
for interval in gene_exon.steps():
if interval[1] > 0:
gene_all_exon.append(interval[0])
if interval[1] == len(transcripts):
gene_specific_exon.append(interval[0])
gene_data.set_value(index,"exons",gene_all_exon)
gene_data.set_value(index,"exon_specific",gene_specific_exon)
for index in gene_data.index:
introns = define_introns(index,gene_data)
gene_data.ix[index,"introns_all"] = introns
print '\t- Saving file in: %s' %(info.output)
with open (info.output,'wb') as write:
pickle.dump(gene_data,write)
print 'Gene models library generated\n\tRunTime: %s' %(computeRunTime(startTime, getCurrTime()))
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input', help = 'Provide .txt file containing informations on genes model',action = 'store')
parser.add_argument('-o','--output', help = 'Provide .pkl file PATH where library will be stored',action = 'store')
args = parser.parse_args()
print '\n***Generation of gene models library for HaSAPPy program***'
print '\tInput file: %s' % args.input
if ((args.input==None) or (args.output == None)):
print '\nWARNING: informations provided are not sufficent.\nCheck -h option to have more details on requested parameters'
else:
startTime = getCurrTime()
print 'Started: %s' %(startTime)
library_preparation(args)
| mit |
pprett/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
RealPolitiX/mpes | mpes/fprocessing.py | 1 | 123935 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: R. Patrick Xian
"""
# =========================
# Sections:
# 1. Utility functions
# 2. File I/O and parsing
# 3. Data transformation
# =========================
from __future__ import print_function, division
from .igoribw import loadibw
from .base import FileCollection, MapParser, saveClassAttributes
from .visualization import grid_histogram
from . import utils as u, bandstructure as bs, base as b
from . import dask_tps as tpsd
import igor.igorpy as igor
import pandas as pd
import os
import re
import gc
import glob as g
import numpy as np
import numpy.fft as nft
import numba
import scipy.io as sio
import scipy.interpolate as sint
import skimage.io as skio
from PIL import Image as pim
import warnings as wn
from h5py import File
import psutil as ps
import dask as d, dask.array as da, dask.dataframe as ddf
from dask.diagnostics import ProgressBar
import natsort as nts
from functools import reduce
from funcy import project
from threadpoolctl import threadpool_limits
N_CPU = ps.cpu_count()
# ================= #
# Utility functions #
# ================= #
def sgfltr2d(datamat, span, order, axis=0):
"""
Savitzky-Golay filter for two dimensional data
Operated in a line-by-line fashion along one axis
Return filtered data
"""
dmat = np.rollaxis(datamat, axis)
r, c = np.shape(datamat)
dmatfltr = np.copy(datamat)
for rnum in range(r):
dmatfltr[rnum, :] = savgol_filter(datamat[rnum, :], span, order)
return np.rollaxis(dmatfltr, axis)
def sortNamesBy(namelist, pattern, gp=0, slicerange=(None, None)):
"""
Sort a list of names according to a particular sequence of numbers
(specified by a regular expression search pattern)
Parameters
namelist : str
List of name strings
pattern : str
Regular expression of the pattern
gp : int
Grouping number
Returns
orderedseq : array
Ordered sequence from sorting
sortednamelist : str
Sorted list of name strings
"""
gp = int(gp)
sa, sb = slicerange
# Extract a sequence of numbers from the names in the list
seqnum = np.array([re.search(pattern, namelist[i][sa:sb]).group(gp)
for i in range(len(namelist))])
seqnum = seqnum.astype(np.float)
# Sorted index
idx_sorted = np.argsort(seqnum)
# Sort the name list according to the specific number of interest
sortednamelist = [namelist[i] for i in idx_sorted]
# Return the sorted number sequence and name list
return seqnum[idx_sorted], sortednamelist
def rot2d(th, angle_unit):
"""
construct 2D rotation matrix
"""
if angle_unit == 'deg':
thr = np.deg2rad(th)
return np.array([[np.cos(thr), -np.sin(thr)],
[np.sin(thr), np.cos(thr)]])
elif angle_unit == 'rad':
return np.array([[np.cos(th), -np.sin(th)], [np.sin(th), np.cos(th)]])
# ====================== #
# File I/O and parsing #
# ====================== #
def readimg(f_addr):
"""
Read images (jpg, png, 2D/3D tiff)
"""
return skio.imread(f_addr)
def readtsv(fdir, header=None, dtype='float', **kwds):
"""
Read tsv file from hemispherical detector
**Parameters**
fdir : str
file directory
header : int | None
number of header lines
dtype : str | 'float'
data type of the return numpy.ndarray
**kwds : keyword arguments
other keyword arguments for pandas.read_table()
**Return**
data : numpy ndarray
read and type-converted data
"""
data = np.asarray(pd.read_table(fdir, delim_whitespace=True,
header=None, **kwds), dtype=dtype)
return data
def readIgorBinFile(fdir, **kwds):
"""
Read Igor binary formats (pxp and ibw)
"""
ftype = kwds.pop('ftype', fdir[-3:])
errmsg = "Error in file loading, please check the file format."
if ftype == 'pxp':
try:
igfile = igor.load(fdir)
except IOError:
print(errmsg)
elif ftype == 'ibw':
try:
igfile = loadibw(fdir)
except IOError:
print(errmsg)
else:
raise IOError(errmsg)
return igfile
def readARPEStxt(fdir, withCoords=True):
"""
Read and convert Igor-generated ARPES .txt files into numpy arrays
The withCoords option specify whether the energy and angle information is given
"""
if withCoords:
# Retrieve the number of columns in the txt file
dataidx = pd.read_table(fdir, skiprows=1, header=None).columns
# Read all data with the specified columns
datamat = pd.read_table(fdir, skiprows=0, header=None, names=dataidx)
# Shift the first row by one value (align the angle axis)
#datamat.iloc[0] = datamat.iloc[0].shift(1)
ARPESData = datamat.loc[1::, 1::].values
EnergyData = datamat.loc[1::, 0].values
AngleData = datamat.loc[0, 1::].values
return ARPESData, EnergyData, AngleData
else:
ARPESData = np.asarray(pd.read_table(fdir, skiprows=1, header=None))
return ARPESData
def txtlocate(ffolder, keytext):
"""
Locate specific txt files containing experimental parameters
"""
txtfiles = g.glob(ffolder + r'\*.txt')
for ind, fname in enumerate(txtfiles):
if keytext in fname:
txtfile = txtfiles[ind]
return txtfile
def mat2im(datamat, dtype='uint8', scaling=['normal'], savename=None):
"""
Convert data matrix to image
"""
dataconv = np.abs(np.asarray(datamat))
for scstr in scaling:
if 'gamma' in scstr:
gfactors = re.split('gamma|-', scstr)[1:]
gfactors = u.numFormatConversion(gfactors, form='float', length=2)
dataconv = gfactors[0]*(dataconv**gfactors[1])
if 'normal' in scaling:
dataconv = (255 / dataconv.max()) * (dataconv - dataconv.min())
elif 'inv' in scaling and 'normal' not in scaling:
dataconv = 255 - (255 / dataconv.max()) * (dataconv - dataconv.min())
if dtype == 'uint8':
imrsc = dataconv.astype(np.uint8)
im = pim.fromarray(imrsc)
if savename:
im.save(savename)
return im
def im2mat(fdir):
"""
Convert image to numpy ndarray.
"""
mat = np.asarray(pim.open(fdir))
return mat
def metaReadHDF5(hfile, attributes=[], groups=[]):
"""
Parse the attribute (i.e. metadata) tree in the input HDF5 file and construct a dictionary of attributes
:Parameters:
hfile : HDF5 file instance
Instance of the ``h5py.File`` class.
attributes, groups : list, list | [], []
List of strings representing the names of the specified attribute/group names.
When specified as None, the components (all attributes or all groups) are ignored.
When specified as [], all components (attributes/groups) are included.
When specified as a list of strings, only the attribute/group names matching the strings are retrieved.
"""
out = {}
# Extract the file attributes
if attributes is not None:
attrdict = dict(hfile.attrs.items()) # Contains all file attributes
if len(attributes) > 0:
attrdict = project(attrdict, attributes)
out = u.dictmerge(out, attrdict)
# Extract the group information
if groups is not None:
# groups = None will not include any group.
if len(groups) == 0:
# group = [] will include all groups.
groups = list(hfile)
for g in groups:
gdata = hfile.get(g)
out[g] = dict(gdata.attrs)
out[g]['shape'] = gdata.shape
return out
class hdf5Reader(File):
""" HDF5 reader class
"""
def __init__(self, f_addr, ncores=None, **kwds):
self.faddress = f_addr
eventEstimator = kwds.pop('estimator', 'Stream_0') # Dataset representing event length
self.CHUNK_SIZE = int(kwds.pop('chunksz', 1e6))
super().__init__(name=self.faddress, mode='r', **kwds)
self.nEvents = self[eventEstimator].size
self.groupNames = list(self)
self.groupAliases = [self.readAttribute(self[gn], 'Name', nullval=gn) for gn in self.groupNames]
# Initialize the look-up dictionary between group aliases and group names
self.nameLookupDict = dict(zip(self.groupAliases, self.groupNames))
self.attributeNames = list(self.attrs)
if (ncores is None) or (ncores > N_CPU) or (ncores < 0):
self.ncores = N_CPU
else:
self.ncores = int(ncores)
def getGroupNames(self, wexpr=None, woexpr=None, use_alias=False):
""" Retrieve group names from the loaded hdf5 file with string filtering.
:Parameters:
wexpr : str | None
Expression in a name to leave in the group name list (w = with).
woexpr : str | None
Expression in a name to leave out of the group name list (wo = without).
use_alias : bool | False
Specification on the use of alias to replace the variable name.
:Return:
filteredGroupNames : list
List of filtered group names.
"""
# Gather group aliases, if specified
if use_alias == True:
groupNames = self.name2alias(self.groupNames)
else:
groupNames = self.groupNames
# Filter the group names
if (wexpr is None) and (woexpr is None):
filteredGroupNames = groupNames
if wexpr:
filteredGroupNames = [i for i in groupNames if wexpr in i]
elif woexpr:
filteredGroupNames = [i for i in groupNames if woexpr not in i]
return filteredGroupNames
def getAttributeNames(self, wexpr=None, woexpr=None):
""" Retrieve attribute names from the loaded hdf5 file with string filtering.
:Parameters:
wexpr : str | None
Expression in a name to leave in the attribute name list (w = with).
woexpr : str | None
Expression in a name to leave out of the attribute name list (wo = without).
:Return:
filteredAttrbuteNames : list
List of filtered attribute names.
"""
if (wexpr is None) and (woexpr is None):
filteredAttributeNames = self.attributeNames
elif wexpr:
filteredAttributeNames = [i for i in self.attributeNames if wexpr in i]
elif woexpr:
filteredAttributeNames = [i for i in self.attributeNames if woexpr not in i]
return filteredAttributeNames
@staticmethod
def readGroup(element, *group, amin=None, amax=None, sliced=True):
""" Retrieve the content of the group(s) in the loaded hdf5 file.
:Parameter:
group : list/tuple
Collection of group names.
amin, amax : numeric, numeric | None, None
Minimum and maximum indice to select from the group (dataset).
sliced : bool | True
Perform slicing on the group (dataset), if ``True``.
:Return:
groupContent : list/tuple
Collection of values of the corresponding groups.
"""
ngroup = len(group)
amin, amax = u.intify(amin, amax)
groupContent = []
for g in group:
try:
if sliced:
groupContent.append(element.get(g)[slice(amin, amax)])
else:
groupContent.append(element.get(g))
except:
raise ValueError("Group '"+g+"' doesn't have sufficient length for slicing!")
if ngroup == 1: # Singleton case
groupContent = groupContent[0]
return groupContent
@staticmethod
def readAttribute(element, *attribute, nullval='None'):
""" Retrieve the content of the attribute(s) in the loaded hdf5 file.
:Parameter:
attribute : list/tuple
Collection of attribute names.
nullval : str | 'None'
Null value to retrieve as a replacement of NoneType.
:Return:
attributeContent : list/tuple
Collection of values of the corresponding attributes.
"""
nattr = len(attribute)
attributeContent = []
for ab in attribute:
try:
attributeContent.append(element.attrs[ab].decode('utf-8'))
except AttributeError: # No need to decode
attributeContent.append(element.attrs[ab])
except KeyError: # No such an attribute
attributeContent.append(nullval)
if nattr == 1:
attributeContent = attributeContent[0]
return attributeContent
def name2alias(self, names_to_convert):
""" Find corresponding aliases of the named groups.
:Parameter:
names_to_convert : list/tuple
Names to convert to aliases.
:Return:
aliases : list/tuple
Aliases corresponding to the names.
"""
aliases = [self.readAttribute(self[ntc], 'Name', nullval=ntc) for ntc in names_to_convert]
return aliases
def _assembleGroups(self, gnames, amin=None, amax=None, use_alias=True, dtyp='float32', timeStamps = False, ret='array'):
""" Assemble the content values of the selected groups.
:Parameters:
gnames : list
List of group names.
amin, amax : numeric, numeric | None, None
Index selection range for all groups.
use_alias : bool | True
See ``hdf5Reader.getGroupNames()``.
dtype : str | 'float32'
Data type string.
ret : str | 'array'
Return type specification ('array' or 'dict').
"""
gdict = {}
# Add groups to dictionary
for ign, gn in enumerate(gnames):
g_dataset = self.readGroup(self, gn, sliced=False)
g_values = g_dataset[slice(amin, amax)]
if bool(dtyp):
g_values = g_values.astype(dtyp)
# Use the group alias as the dictionary key
if use_alias == True:
g_name = self.readAttribute(g_dataset, 'Name', nullval=gn)
gdict[g_name] = g_values
# Use the group name as the dictionary key
else:
gdict[gn] = g_values
# print('{}: {}'.format(g_name, g_values.dtype))
# calculate time Stamps
if timeStamps == True:
# create target array for time stamps
ts = np.zeros(len(gdict[self.readAttribute(g_dataset, 'Name', nullval=gnames[0])]))
# get the start time of the file from its modification date for now
startTime = os.path.getmtime(self.filename) * 1000 #convert to ms
# the ms marker contains a list of events that occurred at full ms intervals. It's monotonically increasing, and can contain duplicates
msMarker_ds = self.readGroup(self, 'msMarkers', sliced=False)
# convert into numpy array
msMarker = msMarker_ds[slice(None, None)]
# the modification time points to the time when the file was finished, so we need to correct for the length it took to write the file
startTime -= len(msMarker_ds)
for n in range(len(msMarker)-1):
# linear interpolation between ms: Disabled, because it takes a lot of time, and external signals are anyway not better synchronized than 1 ms
# ts[msMarker[n]:msMarker[n+1]] = np.linspace(startTime+n, startTime+n+1, msMarker[n+1]-msMarker[n])
ts[msMarker[n]:msMarker[n+1]] = startTime+n
# fill any remaining points
ts[msMarker[len(msMarker)-1]:len(ts)] = startTime + len(msMarker)
gdict['timeStamps'] = ts
if ret == 'array':
return np.asarray(list(gdict.values()))
elif ret == 'dict':
return gdict
def summarize(self, form='text', use_alias=True, timeStamps=False, ret=False, **kwds):
"""
Summarize the content of the hdf5 file (names of the groups,
attributes and the selected contents. Output in various user-specified formats.)
:Parameters:
form : str | 'text'
:'dataframe': HDF5 content summarized into a dask dataframe.
:'dict': HDF5 content (both data and metadata) summarized into a dictionary.
:'metadict': HDF5 metadata summarized into a dictionary.
:'text': descriptive text summarizing the HDF5 content.
Format to summarize the content of the file into.
use_alias : bool | True
Specify whether to use the alias to rename the groups.
ret : bool | False
Specify whether function return is sought.
**kwds : keyword arguments
:Return:
hdfdict : dict
Dictionary including both the attributes and the groups,
using their names as the keys.
edf : dataframe
Dataframe (edf = electron dataframe) constructed using only the group values,
and the column names are the corresponding group names (or aliases).
"""
# Summarize file information as printed text
if form == 'text':
# Print-out header
print('*** HDF5 file info ***\n',
'File address = ' + self.faddress + '\n')
# Output info on attributes
print('\n>>> Attributes <<<\n')
for an in self.attributeNames:
print(an + ' = {}'.format(self.readAttribute(self, an)))
# Output info on groups
print('\n>>> Groups <<<\n')
for gn in self.groupNames:
g_dataset = self.readGroup(self, gn, sliced=False)
g_shape = g_dataset.shape
g_alias = self.readAttribute(g_dataset, 'Name')
print(gn + ', Shape = {}, Alias = {}'.format(g_shape, g_alias))
# Summarize all metadata into a nested dictionary
elif form == 'metadict':
# Empty list specifies retrieving all entries, see mpes.metaReadHDF5()
attributes = kwds.pop('attributes', [])
groups = kwds.pop('groups', [])
return metaReadHDF5(self, attributes, groups)
# Summarize attributes and groups into a dictionary
elif form == 'dict':
groups = kwds.pop('groups', self.groupNames)
attributes = kwds.pop('attributes', None)
# Retrieve the range of acquired events
amin = kwds.pop('amin', None)
amax = kwds.pop('amax', None)
amin, amax = u.intify(amin, amax)
# Output as a dictionary
# Attribute name stays, stream_x rename as their corresponding attribute name
# Add groups to dictionary
hdfdict = self._assembleGroups(groups, amin=amin, amax=amax,
use_alias=use_alias, ret='dict')
# Add attributes to dictionary
if attributes is not None:
for attr in attributes:
hdfdict[attr] = self.readAttribute(self, attr)
if ret == True:
return hdfdict
# Load a very large (e.g. > 1GB), single (monolithic) HDF5 file into a dataframe
elif form == 'dataframe':
self.CHUNK_SIZE = int(kwds.pop('chunksz', 1e6))
dfParts = []
chunkSize = min(self.CHUNK_SIZE, self.nEvents / self.ncores)
nPartitions = int(self.nEvents // chunkSize) + 1
# Determine the column names
gNames = kwds.pop('groupnames', self.getGroupNames(wexpr='Stream'))
colNames = self.name2alias(gNames)
for p in range(nPartitions): # Generate partitioned dataframe
# Calculate the starting and ending index of every chunk of events
eventIDStart = int(p * chunkSize)
eventIDEnd = int(min(eventIDStart + chunkSize, self.nEvents))
dfParts.append(d.delayed(self._assembleGroups)(gNames, amin=eventIDStart, amax=eventIDEnd, **kwds))
# Construct eda (event dask array) and edf (event dask dataframe)
eda = da.from_array(np.concatenate(d.compute(*dfParts), axis=1).T, chunks=self.CHUNK_SIZE)
self.edf = ddf.from_dask_array(eda, columns=colNames)
if ret == True:
return self.edf
# Delayed array for loading an HDF5 file of reasonable size (e.g. < 1GB)
elif form == 'darray':
gNames = kwds.pop('groupnames', self.getGroupNames(wexpr='Stream'))
darray = d.delayed(self._assembleGroups)(gNames, amin=None, amax=None, timeStamps=timeStamps, ret='array', **kwds)
if ret == True:
return darray
def convert(self, form, save_addr='./summary', pq_append=False, **kwds):
""" Format conversion from hdf5 to mat (for Matlab/Python) or ibw (for Igor).
:Parameters:
form : str
The format of the data to convert into.
save_addr : str | './summary'
File address to save to.
pq_append : bool | False
Option to append to parquet files.
:True: Append to existing parquet files.
:False: The existing parquet files will be deleted before new file creation.
"""
save_fname = u.appendformat(save_addr, form)
if form == 'mat': # Save dictionary as mat file
hdfdict = self.summarize(form='dict', ret=True, **kwds)
sio.savemat(save_fname, hdfdict)
elif form == 'parquet': # Save dataframe as parquet file
compression = kwds.pop('compression', 'UNCOMPRESSED')
engine = kwds.pop('engine', 'fastparquet')
self.summarize(form='dataframe', **kwds)
self.edf.to_parquet(save_addr, engine=engine, compression=compression,
append=pq_append, ignore_divisions=True)
elif form == 'ibw':
# TODO: Save in igor ibw format
raise NotImplementedError
else:
raise NotImplementedError
def saveDict(dct={}, processor=None, dictname='', form='h5', save_addr='./histogram', **kwds):
""" Save the binning result dictionary, including the histogram and the
axes values (edges or midpoints).
:Parameters:
dct : dict | {}
A dictionary containing the binned data and axes values to be exported.
processor : class | None
Class including all attributes.
dictname : str | ''
Namestring of the dictionary to save (such as the attribute name in a class).
form : str | 'h5'
Save format, supporting 'mat', 'h5'/'hdf5', 'tiff' (need tifffile) or 'png' (need imageio).
save_addr : str | './histogram'
File path to save the binning result.
**kwds : keyword arguments
================ =========== =========== ========================================
keyword data type default meaning
================ =========== =========== ========================================
dtyp string 'float32' Data type of the histogram
cutaxis int 3 The index of axis to cut the 4D data
slicename string 'V' The shared namestring for the 3D slice
binned_data_name string 'binned' Namestring of the binned data
otheraxes dict None Values along other or converted axes
mat_compression bool False Matlab file compression
================ =========== =========== ========================================
"""
dtyp = kwds.pop('dtyp', 'float32')
sln = kwds.pop('slicename', 'V') # sln = slicename
bdn = kwds.pop('binned_data_name', 'binned') # bdn = binned data name
save_addr = u.appendformat(save_addr, form)
otheraxes = kwds.pop('otheraxes', None)
# Extract the dictionary containing data from the the class instance attributes or given arguments
if processor is not None:
dct = getattr(processor, dictname)
binaxes = processor.binaxes
else:
binaxes = list(dct.keys())
binaxes.remove(bdn)
# Include other axes values in the binning dictionary
if otheraxes:
dct = u.dictmerge(dct, otheraxes)
if form == 'mat': # Save as mat file (for Matlab)
compression = kwds.pop('mat_compression', False)
sio.savemat(save_addr, dct, do_compression=compression, **kwds)
elif form in ('h5', 'hdf5'): # Save as hdf5 file
cutaxis = kwds.pop('cutaxis', 3)
# Change the bit length of data
if dtyp not in ('float64', 'float'):
for dk, dv in dct.items():
try:
dct[dk] = dv.astype(dtyp)
except:
pass
# Save the binned data
# Save 1-3D data as single datasets
try:
hdf = File(save_addr, 'w')
nbinaxes = len(binaxes)
if nbinaxes < 4:
hdf.create_dataset('binned/'+sln, data=dct[bdn])
# Save 4D data as a list of separated 3D datasets
elif nbinaxes == 4:
nddata = np.rollaxis(dct[bdn], cutaxis)
n = nddata.shape[0]
for i in range(n):
hdf.create_dataset('binned/'+sln+str(i), data=nddata[i,...])
else:
raise NotImplementedError('The output format is undefined for data\
with higher than four dimensions!')
# Save the axes in the same group
for bax in binaxes:
hdf.create_dataset('axes/'+bax, data=dct[bax])
finally:
hdf.close()
elif form == 'tiff': # Save as tiff stack
try:
import tifffile as ti
ti.imsave(save_addr, data=dct[bdn].astype(dtyp))
except ImportError:
raise ImportError('tifffile package is not installed locally!')
elif form == 'png': # Save as png for slices
import imageio as imio
cutaxis = kwds.pop('cutaxis', 2)
nbinaxes = len(binaxes)
if nbinaxes == 2:
imio.imwrite(save_addr[:-3]+'.png', dct[bdn], format='png')
if nbinaxes == 3:
nddata = np.rollaxis(dct[bdn], cutaxis)
n = nddata.shape[0]
for i in range(n):
wn.simplefilter('ignore', UserWarning)
imio.imwrite(save_addr[:-3]+'_'+str(i)+'.png', nddata[i,...], format='png')
elif nbinaxes >= 4:
raise NotImplementedError('The output format is undefined for data \
with higher than three dimensions!')
elif form == 'ibw': # Save as Igor wave
from igorwriter import IgorWave
wave = IgorWave(dct[bdn], name=bdn)
wave.save(save_addr)
else:
raise NotImplementedError('Not implemented output format!')
class hdf5Processor(hdf5Reader):
""" Class for generating multidimensional histogram from hdf5 files.
"""
def __init__(self, f_addr, **kwds):
self.faddress = f_addr
self.ua = kwds.pop('', True)
self.hdfdict = {}
self.histdict = {}
self.axesdict = {}
super().__init__(f_addr=self.faddress, **kwds)
def _addBinners(self, axes=None, nbins=None, ranges=None, binDict=None, irregular_bins=False):
"""
Construct the binning parameters within an instance.
"""
# Use information specified in binDict, ignore others
if binDict is not None:
try:
self.binaxes = list(binDict['axes'])
self.nbinaxes = len(self.binaxes)
self.bincounts = binDict['nbins']
self.binranges = binDict['ranges']
except:
pass # No action when binDict is not specified
# Use information from other specified parameters if binDict is not given
else:
self.binaxes = list(axes)
self.nbinaxes = len(self.binaxes)
# Collect the number of bins
if irregular_bins == False:
try: # To have the same number of bins on all axes
self.bincounts = int(nbins)
except: # To have different number of bins on each axis
self.bincounts = list(map(int, nbins))
self.binranges = ranges
# Construct binning steps
self.binsteps = []
for bc, (lrange, rrange) in zip(self.bincounts, self.binranges):
self.binsteps.append((rrange - lrange) / bc)
def loadMapping(self, energy, momentum):
"""
Load the mapping parameters
"""
# TODO: add transform functions to for axes conversion.
pass
def viewEventHistogram(self, ncol, axes=['X', 'Y', 't', 'ADC'], bins=[80, 80, 80, 80],
ranges=[(0, 1800), (0, 1800), (68000, 74000), (0, 500)], axes_name_type='alias',
backend='bokeh', legend=True, histkwds={}, legkwds={}, **kwds):
"""
Plot individual histograms of specified dimensions (axes).
:Parameters:
ncol : int
Number of columns in the plot grid.
axes : list/tuple
Name of the axes to view.
bins : list/tuple
Bin values of all speicified axes.
ranges : list
Value ranges of all specified axes.
axes_name_type : str | 'alias'
:'alias': human-comprehensible aliases of the datasets from the hdf5 file (e.g. 'X', 'ADC', etc)
:'original': original names of the datasets from the hdf5 file (e.g. 'Stream0', etc).
Type of specified axes names.
backend : str | 'matplotlib'
Backend of the plotting library ('matplotlib' or 'bokeh').
legend : bool | True
Option to include a legend in the histogram plots.
histkwds, legkwds, **kwds : dict, dict, keyword arguments
Extra keyword arguments passed to ``mpes.visualization.grid_histogram()``.
"""
input_types = map(type, [axes, bins, ranges])
allowed_types = [list, tuple]
if set(input_types).issubset(allowed_types):
# Convert axes names
if axes_name_type == 'alias':
gnames = [self.nameLookupDict[ax] for ax in axes]
elif axes_name_type == 'original':
gnames = axes
# Read out the values for the specified groups
group_dict = self.summarize(form='dict', groups=gnames, attributes=None,
use_alias=True, ret=True)
# Plot multiple histograms in a grid
grid_histogram(group_dict, ncol=ncol, rvs=axes, rvbins=bins, rvranges=ranges,
backend=backend, legend=legend, histkwds=histkwds, legkwds=legkwds, **kwds)
else:
raise TypeError('Inputs of axes, bins, ranges need to be list or tuple!')
def getCountRate(self, plot=False):
"""
Create count rate trace from the msMarker field in the hdf5 file.
:Parameters:
plot=False|True
No function yet.
return: countRate: the count rate in Hz
secs: the seconds into the scan.
"""
msMarkers=self.readGroup(self, 'msMarkers', sliced=True)
secs = np.asarray(range(0,len(msMarkers)))/1000
f = sint.InterpolatedUnivariateSpline(secs, msMarkers, k=1)
fprime = f.derivative()
countRate = fprime(secs)
return countRate, secs
def getElapsedTime(self):
"""
Return the elapsed time in the file from the msMarkers wave
return: secs: the length of the the file in seconds.
"""
secs = self.get('msMarkers').len()/1000
return secs
def localBinning(self, axes=None, nbins=None, ranges=None, binDict=None,
jittered=False, histcoord='midpoint', ret='dict', **kwds):
"""
Compute the photoelectron intensity histogram locally after loading all data into RAM.
:Paramters:
axes : (list of) strings | None
Names the axes to bin.
nbins : (list of) int | None
Number of bins along each axis.
ranges : (list of) tuples | None
Ranges of binning along every axis.
binDict : dict | None
Dictionary with specifications of axes, nbins and ranges. If binDict
is not None. It will override the specifications from other arguments.
jittered : bool | False
Determines whether to add jitter to the data to avoid rebinning artefact.
histcoord : string | 'midpoint'
The coordinates of the histogram. Specify 'edge' to get the bar edges (every
dimension has one value more), specify 'midpoint' to get the midpoint of the
bars (same length as the histogram dimensions).
ret : bool | True
:True: returns the dictionary containing binned data explicitly
:False: no explicit return of the binned data, the dictionary
generated in the binning is still retained as an instance attribute.
**kwds : keyword argument
================ ============== =========== ==========================================
keyword data type default meaning
================ ============== =========== ==========================================
amin numeric/None None minimum value of electron sequence
amax numeric/None None maximum value of electron sequence
jitter_axes list axes list of axes to jitter
jitter_bins list nbins list of the number of bins
jitter_amplitude numeric/array 0.5 jitter amplitude (single number for all)
jitter_ranges list ranges list of the binning ranges
================ ============== =========== ==========================================
:Return:
histdict : dict
Dictionary containing binned data and the axes values (if ``ret = True``).
"""
# Retrieve the range of acquired events
amin = kwds.pop('amin', None)
amax = kwds.pop('amax', None)
amin, amax = u.intify(amin, amax)
# Assemble the data for binning, assuming they can be completely loaded into RAM
self.hdfdict = self.summarize(form='dict', use_alias=self.ua, amin=amin, amax=amax, ret=True)
# Set up binning parameters
self._addBinners(axes, nbins, ranges, binDict)
# Add jitter to the data streams before binning
if jittered:
# Retrieve parameters for histogram jittering, the ordering of the jittering
# parameters is the same as that for the binning
jitter_axes = kwds.pop('jitter_axes', axes)
jitter_bins = kwds.pop('jitter_bins', nbins)
jitter_amplitude = kwds.pop('jitter_amplitude', 0.5*np.ones(len(jitter_axes)))
jitter_ranges = kwds.pop('jitter_ranges', ranges)
# Add jitter to the specified dimensions of the data
for jb, jax, jamp, jr in zip(jitter_bins, jitter_axes, jitter_amplitude, jitter_ranges):
sz = self.hdfdict[jax].size
# Calculate the bar size of the histogram in every dimension
binsize = abs(jr[0] - jr[1])/jb
self.hdfdict[jax] = self.hdfdict[jax].astype('float32')
# Jitter as random uniformly distributed noise (W. S. Cleveland)
self.hdfdict[jax] += jamp * binsize * np.random.uniform(low=-1,
high=1, size=sz).astype('float32')
# Stack up data from unbinned axes
data_unbinned = np.stack((self.hdfdict[ax] for ax in axes), axis=1)
self.hdfdict = {}
# Compute binned data locally
self.histdict['binned'], ax_vals = np.histogramdd(data_unbinned,
bins=self.bincounts, range=self.binranges)
self.histdict['binned'] = self.histdict['binned'].astype('float32')
del data_unbinned
for iax, ax in enumerate(axes):
if histcoord == 'midpoint':
ax_edge = ax_vals[iax]
ax_midpoint = (ax_edge[1:] + ax_edge[:-1])/2
self.histdict[ax] = ax_midpoint
elif histcoord == 'edge':
self.histdict[ax] = ax_vals[iax]
if ret == 'dict':
return self.histdict
elif ret == 'histogram':
histogram = self.histdict.pop('binned')
self.axesdict = self.histdict.copy()
self.histdict = {}
return histogram
elif ret == False:
return
def localBinning_numba(self, axes=None, nbins=None, ranges=None, binDict=None,
jittered=False, histcoord='midpoint', ret='dict', **kwds):
"""
Compute the photoelectron intensity histogram locally after loading all data into RAM.
:Paramters:
axes : (list of) strings | None
Names the axes to bin.
nbins : (list of) int | None
Number of bins along each axis.
ranges : (list of) tuples | None
Ranges of binning along every axis.
binDict : dict | None
Dictionary with specifications of axes, nbins and ranges. If binDict
is not None. It will override the specifications from other arguments.
jittered : bool | False
Determines whether to add jitter to the data to avoid rebinning artefact.
histcoord : string | 'midpoint'
The coordinates of the histogram. Specify 'edge' to get the bar edges (every
dimension has one value more), specify 'midpoint' to get the midpoint of the
bars (same length as the histogram dimensions).
ret : bool | True
:True: returns the dictionary containing binned data explicitly
:False: no explicit return of the binned data, the dictionary
generated in the binning is still retained as an instance attribute.
**kwds : keyword argument
================ ============== =========== ==========================================
keyword data type default meaning
================ ============== =========== ==========================================
amin numeric/None None minimum value of electron sequence
amax numeric/None None maximum value of electron sequence
jitter_axes list axes list of axes to jitter
jitter_bins list nbins list of the number of bins
jitter_amplitude numeric/array 0.5 jitter amplitude (single number for all)
jitter_ranges list ranges list of the binning ranges
================ ============== =========== ==========================================
:Return:
histdict : dict
Dictionary containing binned data and the axes values (if ``ret = True``).
"""
# Retrieve the range of acquired events
amin = kwds.pop('amin', None)
amax = kwds.pop('amax', None)
amin, amax = u.intify(amin, amax)
# Assemble the data for binning, assuming they can be completely loaded into RAM
self.hdfdict = self.summarize(form='dict', use_alias=self.ua, amin=amin, amax=amax, ret=True)
# Set up binning parameters
self._addBinners(axes, nbins, ranges, binDict)
# Add jitter to the data streams before binning
if jittered:
# Retrieve parameters for histogram jittering, the ordering of the jittering
# parameters is the same as that for the binning
jitter_axes = kwds.pop('jitter_axes', axes)
jitter_bins = kwds.pop('jitter_bins', nbins)
jitter_amplitude = kwds.pop('jitter_amplitude', 0.5*np.ones(len(jitter_axes)))
jitter_ranges = kwds.pop('jitter_ranges', ranges)
# Add jitter to the specified dimensions of the data
for jb, jax, jamp, jr in zip(jitter_bins, jitter_axes, jitter_amplitude, jitter_ranges):
sz = self.hdfdict[jax].size
# Calculate the bar size of the histogram in every dimension
binsize = abs(jr[0] - jr[1])/jb
self.hdfdict[jax] = self.hdfdict[jax].astype('float32')
# Jitter as random uniformly distributed noise (W. S. Cleveland)
self.hdfdict[jax] += jamp * binsize * np.random.uniform(low=-1,
high=1, size=sz).astype('float32')
# Stack up data from unbinned axes
data_unbinned = np.stack((self.hdfdict[ax] for ax in axes), axis=1)
self.hdfdict = {}
# Compute binned data locally
self.histdict['binned'], ax_vals = numba_histogramdd(data_unbinned,
bins=self.bincounts, ranges=self.binranges)
self.histdict['binned'] = self.histdict['binned'].astype('float32')
del data_unbinned
for iax, ax in enumerate(axes):
if histcoord == 'midpoint':
ax_edge = ax_vals[iax]
ax_midpoint = (ax_edge[1:] + ax_edge[:-1])/2
self.histdict[ax] = ax_midpoint
elif histcoord == 'edge':
self.histdict[ax] = ax_vals[iax]
if ret == 'dict':
return self.histdict
elif ret == 'histogram':
histogram = self.histdict.pop('binned')
self.axesdict = self.histdict.copy()
self.histdict = {}
return histogram
elif ret == False:
return
def updateHistogram(self, axes=None, sliceranges=None, ret=False):
"""
Update the dimensional sizes of the binning results.
"""
# Input axis order to binning axes order
binaxes = np.asarray(self.binaxes)
seqs = [np.where(ax == binaxes)[0][0] for ax in axes]
for seq, ax, rg in zip(seqs, axes, sliceranges):
# Update the lengths of binning axes
seq = np.where(ax == binaxes)[0][0]
self.histdict[ax] = self.histdict[ax][rg[0]:rg[1]]
# Update the binned histogram
tempmat = np.moveaxis(self.histdict['binned'], seq, 0)[rg[0]:rg[1],...]
self.histdict['binned'] = np.moveaxis(tempmat, 0, seq)
if ret:
return self.histdict
def saveHistogram(self, dictname='histdict', form='h5', save_addr='./histogram', **kwds):
"""
Save binned histogram and the axes. See ``mpes.fprocessing.saveDict()``.
"""
try:
saveDict(processor=self, dictname=dictname, form=form, save_addr=save_addr, **kwds)
except:
raise Exception('Saving histogram was unsuccessful!')
def saveParameters(self, form='h5', save_addr='./binning'):
"""
Save all the attributes of the binning instance for later use
(e.g. binning axes, ranges, etc).
:Parameters:
form : str | 'h5'
File format to for saving the parameters ('h5'/'hdf5', 'mat')
save_addr : str | './binning'
The address for the to be saved file.
"""
saveClassAttributes(self, form, save_addr)
def toSplitter(self):
"""
Convert to an instance of hdf5Splitter.
"""
return hdf5Splitter(f_addr=self.faddress)
def toBandStructure(self):
"""
Convert to an instance of BandStructure.
"""
pass
def binPartition(partition, binaxes, nbins, binranges, jittered=False, jitter_params={}):
""" Bin the data within a file partition (e.g. dask dataframe).
:Parameters:
partition : dataframe partition
Partition of a dataframe.
binaxes : list
List of axes to bin.
nbins : list
Number of bins for each binning axis.
binranges : list
The range of each axis to bin.
jittered : bool | False
Option to include jittering in binning.
jitter_params : dict | {}
Parameters used to set jittering.
:Return:
hist_partition : ndarray
Histogram from the binning process.
"""
if jittered:
# Add jittering to values
jitter_bins = jitter_params['jitter_bins']
jitter_axes = jitter_params['jitter_axes']
jitter_amplitude = jitter_params['jitter_amplitude']
jitter_ranges = jitter_params['jitter_ranges']
jitter_type = jitter_params['jitter_type']
for jb, jax, jamp, jr in zip(jitter_bins, jitter_axes, jitter_amplitude, jitter_ranges):
# Calculate the bar size of the histogram in every dimension
binsize = abs(jr[0] - jr[1])/jb
# Jitter as random uniformly distributed noise (W. S. Cleveland)
applyJitter(partition, amp=jamp*binsize, col=jax, type=jitter_type)
cols = partition.columns
# Locate columns for binning operation
binColumns = [cols.get_loc(binax) for binax in binaxes]
vals = partition.values[:, binColumns]
hist_partition, _ = np.histogramdd(vals, bins=nbins, range=binranges)
return hist_partition
def binPartition_numba(partition, binaxes, nbins, binranges, jittered=False, jitter_params={}):
""" Bin the data within a file partition (e.g. dask dataframe).
:Parameters:
partition : dataframe partition
Partition of a dataframe.
binaxes : list
List of axes to bin.
nbins : list
Number of bins for each binning axis.
binranges : list
The range of each axis to bin.
jittered : bool | False
Option to include jittering in binning.
jitter_params : dict | {}
Parameters used to set jittering.
:Return:
hist_partition : ndarray
Histogram from the binning process.
"""
if jittered:
# Add jittering to values
jitter_bins = jitter_params['jitter_bins']
jitter_axes = jitter_params['jitter_axes']
jitter_amplitude = jitter_params['jitter_amplitude']
jitter_ranges = jitter_params['jitter_ranges']
jitter_type = jitter_params['jitter_type']
colsize = partition[jitter_axes[0]].size
if (jitter_type == 'uniform'):
jitter = np.random.uniform(low=-1, high=1, size=colsize)
elif (jitter_type == 'normal'):
jitter = np.random.standard_normal(size=colsize)
else:
jitter = 0
for jb, jax, jamp, jr in zip(jitter_bins, jitter_axes, jitter_amplitude, jitter_ranges):
# Calculate the bar size of the histogram in every dimension
binsize = abs(jr[0] - jr[1])/jb
# Apply same jitter to all columns to save time
partition[jax] += jamp*binsize*jitter
cols = partition.columns
# Locate columns for binning operation
binColumns = [cols.get_loc(binax) for binax in binaxes]
vals = partition.values[:, binColumns]
hist_partition, _ = numba_histogramdd(vals, bins=nbins, ranges=binranges)
return hist_partition
def binDataframe(df, ncores=N_CPU, axes=None, nbins=None, ranges=None,
binDict=None, pbar=True, jittered=True, pbenv='classic', **kwds):
"""
Calculate multidimensional histogram from columns of a dask dataframe.
Prof. Yves Acremann's method.
:Paramters:
axes : (list of) strings | None
Names the axes to bin.
nbins : (list of) int | None
Number of bins along each axis.
ranges : (list of) tuples | None
Ranges of binning along every axis.
binDict : dict | None
Dictionary with specifications of axes, nbins and ranges. If binDict
is not None. It will override the specifications from other arguments.
pbar : bool | True
Option to display a progress bar.
pbenv : str | 'classic'
Progress bar environment ('classic' for generic version and
'notebook' for notebook compatible version).
jittered : bool | True
Option to add histogram jittering during binning.
**kwds : keyword arguments
See keyword arguments in ``mpes.fprocessing.hdf5Processor.localBinning()``.
:Return:
histdict : dict
Dictionary containing binned data and the axes values (if ``ret = True``).
"""
histdict = {}
partitionResults = [] # Partition-level results
tqdm = u.tqdmenv(pbenv)
# Add jitter to all the partitions before binning
if jittered:
# Retrieve parameters for histogram jittering, the ordering of the jittering
# parameters is the same as that for the binning
jitter_axes = kwds.pop('jitter_axes', axes)
jitter_bins = kwds.pop('jitter_bins', nbins)
jitter_amplitude = kwds.pop('jitter_amplitude', 0.5*np.ones(len(jitter_axes)))
jitter_ranges = kwds.pop('jitter_ranges', ranges)
jitter_type = jitter_params['jitter_type']
# Add jitter to the specified dimensions of the data
for jb, jax, jamp, jr in zip(jitter_bins, jitter_axes, jitter_amplitude, jitter_ranges):
# Calculate the bar size of the histogram in every dimension
binsize = abs(jr[0] - jr[1])/jb
# Jitter as random uniformly distributed noise (W. S. Cleveland)
df.map_partitions(applyJitter, amp=jamp*binsize, col=jax, type=jitter_type)
# Main loop for binning
for i in tqdm(range(0, df.npartitions, ncores), disable=not(pbar)):
coreTasks = [] # Core-level jobs
for j in range(0, ncores):
ij = i + j
if ij >= df.npartitions:
break
dfPartition = df.get_partition(ij) # Obtain dataframe partition
coreTasks.append(d.delayed(binPartition)(dfPartition, axes, nbins, ranges))
if len(coreTasks) > 0:
coreResults = d.compute(*coreTasks, **kwds)
# Combine all core results for a dataframe partition
partitionResult = np.zeros_like(coreResults[0])
for coreResult in coreResults:
partitionResult += coreResult
partitionResults.append(partitionResult)
# del partitionResult
del coreTasks
# Combine all partition results
fullResult = np.zeros_like(partitionResults[0])
for pr in partitionResults:
fullResult += np.nan_to_num(pr)
# Load into dictionary
histdict['binned'] = fullResult.astype('float32')
# Calculate axes values
for iax, ax in enumerate(axes):
axrange = ranges[iax]
histdict[ax] = np.linspace(axrange[0], axrange[1], nbins[iax])
return histdict
def binDataframe_lean(df, ncores=N_CPU, axes=None, nbins=None, ranges=None,
binDict=None, pbar=True, jittered=True, pbenv='classic', **kwds):
"""
Calculate multidimensional histogram from columns of a dask dataframe.
:Paramters:
axes : (list of) strings | None
Names the axes to bin.
nbins : (list of) int | None
Number of bins along each axis.
ranges : (list of) tuples | None
Ranges of binning along every axis.
binDict : dict | None
Dictionary with specifications of axes, nbins and ranges. If binDict
is not None. It will override the specifications from other arguments.
pbar : bool | True
Option to display a progress bar.
pbenv : str | 'classic'
Progress bar environment ('classic' for generic version and 'notebook' for notebook compatible version).
jittered : bool | True
Option to add histogram jittering during binning.
**kwds : keyword arguments
See keyword arguments in ``mpes.fprocessing.hdf5Processor.localBinning()``.
:Return:
histdict : dict
Dictionary containing binned data and the axes values (if ``ret = True``).
"""
histdict = {}
fullResult = np.zeros(tuple(nbins)) # Partition-level results
tqdm = u.tqdmenv(pbenv)
# Construct jitter specifications
jitter_params = {}
if jittered:
# Retrieve parameters for histogram jittering, the ordering of the jittering
# parameters is the same as that for the binning
jaxes = kwds.pop('jitter_axes', axes)
jitter_params = {'jitter_axes': jaxes,
'jitter_bins': kwds.pop('jitter_bins', nbins),
'jitter_amplitude': kwds.pop('jitter_amplitude', 0.5*np.ones(len(jaxes))),
'jitter_ranges': kwds.pop('jitter_ranges', ranges),
'jitter_type': kwds.pop('jitter_type', 'normal')}
# Main loop for binning
for i in tqdm(range(0, df.npartitions, ncores), disable=not(pbar)):
coreTasks = [] # Core-level jobs
for j in range(0, ncores):
ij = i + j
if ij >= df.npartitions:
break
dfPartition = df.get_partition(ij) # Obtain dataframe partition
coreTasks.append(d.delayed(binPartition)(dfPartition, axes, nbins, ranges, jittered, jitter_params))
if len(coreTasks) > 0:
coreResults = d.compute(*coreTasks, **kwds)
# Combine all core results for a dataframe partition
partitionResult = reduce(_arraysum, coreResults)
fullResult += partitionResult
del partitionResult
del coreResults
del coreTasks
# Load into dictionary
histdict['binned'] = fullResult.astype('float32')
# Calculate axes values
for iax, ax in enumerate(axes):
axrange = ranges[iax]
histdict[ax] = np.linspace(axrange[0], axrange[1], nbins[iax])
return histdict
def binDataframe_fast(df, ncores=N_CPU, axes=None, nbins=None, ranges=None,
binDict=None, pbar=True, jittered=True, pbenv='classic', jpart=True, **kwds):
"""
Calculate multidimensional histogram from columns of a dask dataframe.
:Paramters:
axes : (list of) strings | None
Names the axes to bin.
nbins : (list of) int | None
Number of bins along each axis.
ranges : (list of) tuples | None
Ranges of binning along every axis.
binDict : dict | None
Dictionary with specifications of axes, nbins and ranges. If binDict
is not None. It will override the specifications from other arguments.
pbar : bool | True
Option to display a progress bar.
pbenv : str | 'classic'
Progress bar environment ('classic' for generic version and 'notebook' for notebook compatible version).
jittered : bool | True
Option to add histogram jittering during binning.
**kwds : keyword arguments
See keyword arguments in ``mpes.fprocessing.hdf5Processor.localBinning()``.
:Return:
histdict : dict
Dictionary containing binned data and the axes values (if ``ret = True``).
"""
histdict = {}
fullResult = np.zeros(tuple(nbins)) # Partition-level results
tqdm = u.tqdmenv(pbenv)
# Construct jitter specifications
jitter_params = {}
if jittered:
# Retrieve parameters for histogram jittering, the ordering of the jittering
# parameters is the same as that for the binning
jaxes = kwds.pop('jitter_axes', axes)
jitter_params = {'jitter_axes': jaxes,
'jitter_bins': kwds.pop('jitter_bins', nbins),
'jitter_amplitude': kwds.pop('jitter_amplitude', 0.5*np.ones(len(jaxes))),
'jitter_ranges': kwds.pop('jitter_ranges', ranges),
'jitter_type': kwds.pop('jitter_type', 'normal')}
# limit multithreading in worker threads
nthreads_per_worker = kwds.pop('nthreads_per_worker', 4)
threadpool_api = kwds.pop('threadpool_api', 'blas')
with threadpool_limits(limits=nthreads_per_worker, user_api=threadpool_api):
# Main loop for binning
for i in tqdm(range(0, df.npartitions, ncores), disable=not(pbar)):
coreTasks = [] # Core-level jobs
for j in range(0, ncores):
ij = i + j
if ij >= df.npartitions:
break
dfPartition = df.get_partition(ij) # Obtain dataframe partition
coreTasks.append(d.delayed(binPartition)(dfPartition, axes, nbins, ranges, jittered, jitter_params))
if len(coreTasks) > 0:
coreResults = d.compute(*coreTasks, **kwds)
combineTasks = []
for j in range(0, ncores):
combineParts = []
# split results along the first dimension among worker threads
for r in coreResults:
combineParts.append(r[int(j*nbins[0]/ncores):int((j+1)*nbins[0]/ncores),...])
combineTasks.append(d.delayed(reduce)(_arraysum, combineParts))
combineResults = d.compute(*combineTasks, **kwds)
# Directly fill into target array. This is much faster than the (not so parallel) reduce/concatenation used before, and uses less memory.
for j in range(0, ncores):
fullResult[int(j*nbins[0]/ncores):int((j+1)*nbins[0]/ncores),...] += combineResults[j]
del combineParts
del combineTasks
del combineResults
del coreResults
del coreTasks
# Load into dictionary
histdict['binned'] = fullResult.astype('float32')
# Calculate axes values
for iax, ax in enumerate(axes):
axrange = ranges[iax]
histdict[ax] = np.linspace(axrange[0], axrange[1], nbins[iax])
return histdict
def binDataframe_numba(df, ncores=N_CPU, axes=None, nbins=None, ranges=None,
binDict=None, pbar=True, jittered=True, pbenv='classic', jpart=True, **kwds):
"""
Calculate multidimensional histogram from columns of a dask dataframe.
:Paramters:
axes : (list of) strings | None
Names the axes to bin.
nbins : (list of) int | None
Number of bins along each axis.
ranges : (list of) tuples | None
Ranges of binning along every axis.
binDict : dict | None
Dictionary with specifications of axes, nbins and ranges. If binDict
is not None. It will override the specifications from other arguments.
pbar : bool | True
Option to display a progress bar.
pbenv : str | 'classic'
Progress bar environment ('classic' for generic version and 'notebook' for notebook compatible version).
jittered : bool | True
Option to add histogram jittering during binning.
**kwds : keyword arguments
See keyword arguments in ``mpes.fprocessing.hdf5Processor.localBinning()``.
:Return:
histdict : dict
Dictionary containing binned data and the axes values (if ``ret = True``).
"""
histdict = {}
fullResult = np.zeros(tuple(nbins)) # Partition-level results
tqdm = u.tqdmenv(pbenv)
# Construct jitter specifications
jitter_params = {}
if jittered:
# Retrieve parameters for histogram jittering, the ordering of the jittering
# parameters is the same as that for the binning
jaxes = kwds.pop('jitter_axes', axes)
jitter_params = {'jitter_axes': jaxes,
'jitter_bins': kwds.pop('jitter_bins', nbins),
'jitter_amplitude': kwds.pop('jitter_amplitude', 0.5*np.ones(len(jaxes))),
'jitter_ranges': kwds.pop('jitter_ranges', ranges),
'jitter_type': kwds.pop('jitter_type', 'normal')}
# limit multithreading in worker threads
nthreads_per_worker = kwds.pop('nthreads_per_worker', 4)
threadpool_api = kwds.pop('threadpool_api', 'blas')
with threadpool_limits(limits=nthreads_per_worker, user_api=threadpool_api):
# Main loop for binning
for i in tqdm(range(0, df.npartitions, ncores), disable=not(pbar)):
coreTasks = [] # Core-level jobs
for j in range(0, ncores):
ij = i + j
if ij >= df.npartitions:
break
dfPartition = df.get_partition(ij) # Obtain dataframe partition
coreTasks.append(d.delayed(binPartition_numba)(dfPartition, axes, nbins, ranges, jittered, jitter_params))
if len(coreTasks) > 0:
coreResults = d.compute(*coreTasks, **kwds)
combineTasks = []
for j in range(0, ncores):
combineParts = []
# split results along the first dimension among worker threads
for r in coreResults:
combineParts.append(r[int(j*nbins[0]/ncores):int((j+1)*nbins[0]/ncores),...])
combineTasks.append(d.delayed(reduce)(_arraysum, combineParts))
combineResults = d.compute(*combineTasks, **kwds)
# Directly fill into target array. This is much faster than the (not so parallel) reduce/concatenation used before, and uses less memory.
for j in range(0, ncores):
fullResult[int(j*nbins[0]/ncores):int((j+1)*nbins[0]/ncores),...] += combineResults[j]
del combineParts
del combineTasks
del combineResults
del coreResults
del coreTasks
# Load into dictionary
histdict['binned'] = fullResult.astype('float32')
# Calculate axes values
for iax, ax in enumerate(axes):
axrange = ranges[iax]
histdict[ax] = np.linspace(axrange[0], axrange[1], nbins[iax])
return histdict
def applyJitter(df, amp, col, type):
""" Add jittering to a dataframe column.
:Parameters:
df : dataframe
Dataframe to add noise/jittering to.
amp : numeric
Amplitude scaling for the jittering noise.
col : str
Name of the column to add jittering to.
:Return:
Uniformly distributed noise vector with specified amplitude and size.
"""
colsize = df[col].size
if (type == 'uniform'):
# Uniform Jitter distribution
df[col] += amp*np.random.uniform(low=-1, high=1, size=colsize)
elif (type == 'normal'):
# Normal Jitter distribution works better for non-linear transformations and jitter sizes that don't match the original bin sizes
df[col] += amp*np.random.standard_normal(size=colsize)
class hdf5Splitter(hdf5Reader):
"""
Class to split large hdf5 files.
"""
def __init__(self, f_addr, **kwds):
self.faddress = f_addr
self.splitFilepaths = []
super().__init__(f_addr=self.faddress, **kwds)
@d.delayed
def _split_file(self, idx, save_addr, namestr):
""" Split file generator.
"""
evmin, evmax = self.eventList[idx], self.eventList[idx+1]
fpath = save_addr + namestr + str(idx+1) + '.h5'
# Use context manager to open hdf5 file
with File(fpath, 'w') as fsp:
# Copy the attributes
for attr, attrval in self.attrs.items():
fsp.attrs[attr] = attrval
# Copy the segmented groups and their attributes
for gp in self.groupNames:
#self.copy(gn, fsp[gn])
fsp.create_dataset(gp, data=self.readGroup(self, gp, amin=evmin, amax=evmax))
for gattr, gattrval in self[gp].attrs.items():
fsp[gp].attrs[gattr] = gattrval
return(fpath)
def split(self, nsplit, save_addr='./', namestr='split_',
split_group='Stream_0', pbar=False):
"""
Split and save an hdf5 file.
:Parameters:
nsplit : int
Number of split files.
save_addr : str | './'
Directory to store the split files.
namestr : str | 'split_'
Additional namestring attached to the front of the filename.
split_group : str | 'Stream_0'
Name of the example group to split for file length reference.
pbar : bool | False
Enable (when True)/Disable (when False) the progress bar.
"""
nsplit = int(nsplit)
self.splitFilepaths = [] # Refresh the path when re-splitting
self.eventLen = self[split_group].size
self.eventList = np.linspace(0, self.eventLen, nsplit+1, dtype='int')
tasks = []
# Distributed file splitting
for isp in range(nsplit):
tasks.append(self._split_file(isp, save_addr, namestr))
if pbar:
with ProgressBar():
self.splitFilepaths = d.compute(*tasks)
else:
self.splitFilepaths = d.compute(*tasks)
def subset(self, file_id):
"""
Spawn an instance of hdf5Processor from a specified split file.
"""
if self.splitFilepaths:
return hdf5Processor(f_addr=self.splitFilepaths[file_id])
else:
raise ValueError("No split files are present.")
def toProcessor(self):
"""
Change to an hdf5Processor instance.
"""
return hdf5Processor(f_addr=self.faddress)
def readDataframe(folder=None, files=None, ftype='parquet', timeStamps=False, **kwds):
""" Read stored files from a folder into a dataframe.
:Parameters:
folder, files : str, list/tuple | None, None
Folder path of the files or a list of file paths. The folder path has
the priority such that if it's specified, the specified files will be ignored.
ftype : str | 'parquet'
File type to read ('h5' or 'hdf5', 'parquet', 'json', 'csv', etc).
If a folder path is given, all files of the specified type are read
into the dataframe in the reading order.
**kwds : keyword arguments
See the keyword arguments for the specific file parser in ``dask.dataframe`` module.
:Return:
Dask dataframe read from specified files.
"""
# ff (folder or files) is a folder or a list/tuple of files
if folder is not None:
ff = folder
files = g.glob(folder + '/*.' + ftype)
elif folder is None:
if files is not None:
ff = files # List of file paths
else:
raise ValueError('Either the folder or file path should be provided!')
if ftype == 'parquet':
return ddf.read_parquet(ff, **kwds)
elif ftype in ('h5', 'hdf5'):
# Read a file to parse the file structure
test_fid = kwds.pop('test_fid', 0)
test_proc = hdf5Processor(files[test_fid])
gnames = kwds.pop('group_names', test_proc.getGroupNames(wexpr='Stream'))
colNames = test_proc.name2alias(gnames)
if timeStamps == True:
colNames.append('timeStamps')
test_array = test_proc.summarize(form='darray', groupnames=gnames, timeStamps=timeStamps, ret=True).compute()
# Delay-read all files
arrays = [da.from_delayed(hdf5Processor(f).summarize(form='darray', groupnames=gnames, timeStamps=timeStamps, ret=True),
dtype=test_array.dtype, shape=(test_array.shape[0], np.nan)) for f in files]
array_stack = da.concatenate(arrays, axis=1).T
# if rechunksz is not None:
# array_stack = array_stack.rechunk(rechunksz)
return ddf.from_dask_array(array_stack, columns=colNames)
elif ftype == 'json':
return ddf.read_json(ff, **kwds)
elif ftype == 'csv':
return ddf.read_csv(ff, **kwds)
else:
try:
return ddf.read_table(ff, **kwds)
except:
raise Exception('The file format cannot be understood!')
class dataframeProcessor(MapParser):
"""
Processs the parquet file converted from single events data.
"""
def __init__(self, datafolder, paramfolder='', datafiles=[], ncores=None):
self.datafolder = datafolder
self.paramfolder = paramfolder
self.datafiles = datafiles
self.histogram = None
self.histdict = {}
self.npart = 0
# Instantiate the MapParser class (contains parameters related to binning and image transformation)
super().__init__(file_sorting=False, folder=paramfolder)
if (ncores is None) or (ncores > N_CPU) or (ncores < 0):
#self.ncores = N_CPU
# Change the default to use 20 cores, as the speedup is small above
self.ncores = min(20,N_CPU)
else:
self.ncores = int(ncores)
@property
def nrow(self):
""" Number of rows in the distributed dataframe.
"""
return len(self.edf.index)
@property
def ncol(self):
""" Number of columns in the distrbuted dataframe.
"""
return len(self.edf.columns)
def read(self, source='folder', ftype='parquet', fids=[], update='', timeStamps=False, **kwds):
""" Read into distributed dataframe.
:Parameters:
source : str | 'folder'
Source of the file readout.
:'folder': Read from the provided data folder.
:'files': Read from the provided list of file addresses.
ftype : str | 'parquet'
Type of file to read into dataframe ('h5' or 'hdf5', 'parquet', 'json', 'csv').
fids : list | []
IDs of the files to be selected (see ``mpes.base.FileCollection.select()``).
Specify 'all' to read all files of the given file type.
update : str | ''
File selection update option (see ``mpes.base.FileCollection.select()``).
**kwds : keyword arguments
See keyword arguments in ``mpes.readDataframe()``.
"""
# Create the single-event dataframe
if source == 'folder':
# gather files first to get a sorted list.
self.gather(folder=self.datafolder, identifier=r'/*.'+ftype, file_sorting=True)
self.datafiles = self.files
self.edf = readDataframe(files=self.datafiles, ftype=ftype, timeStamps=timeStamps, **kwds)
elif source == 'files':
if len(self.datafiles) > 0: # When filenames are specified
self.edf = readDataframe(folder=None, files=self.datafiles, ftype=ftype, timeStamps=timeStamps, **kwds)
else:
# When only the datafolder address is given but needs to read partial files,
# first gather files from the folder, then select files and read into dataframe
self.gather(folder=self.datafolder, identifier=r'/*.'+ftype, file_sorting=True)
if len(fids) == 0:
print('Nothing is read since no file IDs (fids) is specified!')
self.datafiles = self.select(ids=fids, update='', ret='selected')
elif fids == 'all':
self.datafiles = self.select(ids=list(range(len(self.files))), update='', ret='selected')
else:
self.datafiles = self.select(ids=fids, update='', ret='selected')
self.edf = readDataframe(files=self.datafiles, ftype=ftype, timeStamps=timeStamps, **kwds)
self.npart = self.edf.npartitions
def _addBinners(self, axes=None, nbins=None, ranges=None, binDict=None):
""" Construct the binning parameters within an instance.
"""
# Use information specified in binDict, ignore others
if binDict is not None:
try:
self.binaxes = list(binDict['axes'])
self.nbinaxes = len(self.binaxes)
self.bincounts = binDict['nbins']
self.binranges = binDict['ranges']
except:
pass # No action when binDict is not specified
# Use information from other specified parameters if binDict is not given
else:
self.binaxes = list(axes)
self.nbinaxes = len(self.binaxes)
# Collect the number of bins
try: # To have the same number of bins on all axes
self.bincounts = int(nbins)
except: # To have different number of bins on each axis
self.bincounts = list(map(int, nbins))
self.binranges = ranges
# Construct binning steps
self.binsteps = []
for bc, (lrange, rrange) in zip(self.bincounts, self.binranges):
self.binsteps.append((rrange - lrange) / bc)
# Column operations
def appendColumn(self, colnames, colvals):
""" Append columns to dataframe.
:Parameters:
colnames : list/tuple
New column names.
colvals : numpy array/list
Entries of the new columns.
"""
colnames = list(colnames)
colvals = [colvals]
ncn = len(colnames)
ncv = len(colvals)
if ncn != ncv:
errmsg = 'The names and values of the columns need to have the same dimensions.'
raise ValueError(errmsg)
else:
for cn, cv in zip(colnames, colvals):
self.edf = self.edf.assign(**{cn:ddf.from_array(cv)})
def deleteColumn(self, colnames):
""" Delete columns
:Parameters:
colnames : str/list/tuple
List of column names to be dropped.
"""
self.edf = self.edf.drop(colnames, axis=1)
def applyFilter(self, colname, lb=-np.inf, ub=np.inf, update='replace', ret=False):
""" Application of bound filters to a specified column (can be used consecutively).
:Parameters:
colname : str
Name of the column to filter.
lb, ub : numeric, numeric | -infinity, infinity
The lower and upper bounds used in the filtering.
update : str | 'replace'
Update option for the filtered dataframe.
ret : bool | False
Return option for the filtered dataframe.
"""
if ret == True:
return self.edf[(self.edf[colname] > lb) & (self.edf[colname] < ub)]
if update == 'replace':
self.edf = self.edf[(self.edf[colname] > lb) & (self.edf[colname] < ub)]
def columnApply(self, mapping, rescolname, **kwds):
""" Apply a user-defined function (e.g. partial function) to an existing column.
:Parameters:
mapping : function
Function to apply to the column.
rescolname : str
Name of the resulting column.
**kwds : keyword arguments
Keyword arguments of the user-input mapping function.
"""
self.edf[rescolname] = mapping(**kwds)
def mapColumn(self, mapping, *args, **kwds):
""" Apply a dataframe-partition based mapping function to an existing column.
:Parameters:
oldcolname : str
The name of the column to use for computation.
mapping : function
Functional map to apply to the values of the old column. Takes the data frame as first argument. Further arguments are passed by **kwds
newcolname : str | 'Transformed'
New column name to be added to the dataframe.
args : tuple | ()
Additional arguments of the functional map.
update : str | 'append'
Updating option.
'append' = append to the current dask dataframe as a new column with the new column name.
'replace' = replace the values of the old column.
**kwds : keyword arguments
Additional arguments for the ``dask.dataframe.apply()`` function.
"""
self.edf = self.edf.map_partitions(mapping, *args, **kwds)
def transformColumn(self, oldcolname, mapping, newcolname='Transformed',
args=(), update='append', **kwds):
""" Apply a simple function to an existing column.
:Parameters:
oldcolname : str
The name of the column to use for computation.
mapping : function
Functional map to apply to the values of the old column.
newcolname : str | 'Transformed'
New column name to be added to the dataframe.
args : tuple | ()
Additional arguments of the functional map.
update : str | 'append'
Updating option.
'append' = append to the current dask dataframe as a new column with the new column name.
'replace' = replace the values of the old column.
**kwds : keyword arguments
Additional arguments for the ``dask.dataframe.apply()`` function.
"""
if update == 'append':
self.edf[newcolname] = self.edf[oldcolname].apply(mapping, args=args, meta=('x', 'f8'), **kwds)
elif update == 'replace':
self.edf[oldcolname] = self.edf[oldcolname].apply(mapping, args=args, meta=('x', 'f8'), **kwds)
def transformColumn2D(self, map2D, X, Y, **kwds):
""" Apply a mapping simultaneously to two dimensions.
:Parameters:
map2D : function
2D mapping function.
X, Y : series, series
The two columns of the dataframe to apply mapping to.
**kwds : keyword arguments
Additional arguments for the 2D mapping function.
"""
newX = kwds.pop('newX', X)
newY = kwds.pop('newY', Y)
self.edf[newX], self.edf[newY] = map2D(self.edf[X], self.edf[Y], **kwds)
def applyECorrection(self, type, **kwds):
""" Apply correction to the time-of-flight (TOF) axis of single-event data.
:Parameters:
type : str
Type of correction to apply to the TOF axis.
**kwds : keyword arguments
Additional parameters to use for the correction.
:corraxis: str | 't'
String name of the axis to correct.
:center: list/tuple | (650, 650)
Image center pixel positions in (row, column) format.
:amplitude: numeric | -1
Amplitude of the time-of-flight correction term
(negative sign meaning subtracting the curved wavefront).
:d: numeric | 0.9
Field-free drift distance.
:t0: numeric | 0.06
Time zero position corresponding to the tip of the valence band.
:gam: numeric
Linewidth value for correction using a 2D Lorentz profile.
:sig: numeric
Standard deviation for correction using a 2D Gaussian profile.
"""
corraxis = kwds.pop('corraxis', 't')
ycenter, xcenter = kwds.pop('center', (650, 650))
amplitude = kwds.pop('amplitude', -1)
if type == 'spherical':
d = kwds.pop('d', 0.9)
t0 = kwds.pop('t0', 0.06)
self.edf[corraxis] += (np.sqrt(1 + ((self.edf['X'] - xcenter)**2 +\
(self.edf['Y'] - ycenter)**2)/d**2) - 1) * t0 * amplitude
elif type == 'Lorentzian':
gam = kwds.pop('gamma', 300)
self.edf[corraxis] += amplitude/(gam * np.pi) * (gam**2 / ((self.edf['X'] -\
xcenter)**2 + (self.edf['Y'] - ycenter)**2 + gam**2))
elif type == 'Gaussian':
sig = kwds.pop('sigma', 300)
self.edf[corraxis] += amplitude/np.sqrt(2*np.pi*sig**2) *\
np.exp(-((self.edf['X'] - xcenter)**2 + (self.edf['Y'] - ycenter)**2) / (2*sig**2))
else:
raise NotImplementedError
def applyKCorrection(self, X='X', Y='Y', newX='Xm', newY='Ym', type='mattrans', **kwds):
""" Calculate and replace the X and Y values with their distortion-corrected version.
This method can be reused.
:Parameters:
X, Y : str, str | 'X', 'Y'
Labels of the columns before momentum distortion correction.
newX, newY : str, str | 'Xm', 'Ym'
Labels of the columns after momentum distortion correction.
"""
if type == 'mattrans': # Apply matrix transform
if ('warping' in kwds):
self.warping = kwds.pop('warping')
self.transformColumn2D(map2D=b.perspectiveTransform, X=X, Y=Y, newX=newX, newY=newY, M=self.warping, **kwds)
else:
self.transformColumn2D(map2D=self.wMap, X=X, Y=Y, newX=newX, newY=newY, **kwds)
self.transformColumn2D(map2D=self.wMap, X=X, Y=Y, newX=newX, newY=newY, **kwds)
elif type == 'tps':
self.transformColumn2D(map2D=self.wMap, X=X, Y=Y, newX=newX, newY=newY, **kwds)
elif type == 'tps_matrix':
if ('dfield' in kwds):
self.dfield = kwds.pop('dfield')
self.mapColumn(b.dfieldapply, self.dfield, X=X, Y=Y, newX=newX, newY=newY)
elif ('rdeform_field' in kwds and 'cdeform_field' in kwds):
rdeform_field = kwds.pop('rdeform_field')
cdeform_field = kwds.pop('cdeform_field')
print('Calculating inverse Deformation Field, might take a moment...')
self.dfield = b.generateDfield(rdeform_field, cdeform_field)
self.mapColumn(b.dfieldapply, self.dfield, X=X, Y=Y, newX=newX, newY=newY)
else:
print('Not implemented.')
def appendKAxis(self, x0, y0, X='X', Y='Y', newX='kx', newY='ky', **kwds):
""" Calculate and append the k axis coordinates (kx, ky) to the events dataframe.
This method can be reused.
"""
if ('fr' in kwds and 'fc' in kwds):
self.fr = kwds.pop('fr')
self.fc = kwds.pop('fc')
self.transformColumn2D(map2D=b.detrc2krc, X=X, Y=Y, newX=newX, newY=newY, r0=x0, c0=y0, fr=self.fr, fc=self.fc, **kwds)
else:
self.transformColumn2D(map2D=self.kMap, X=X, Y=Y, newX=newX, newY=newY, r0=x0, c0=y0, **kwds)
def appendEAxis(self, E0, **kwds):
""" Calculate and append the E axis to the events dataframe.
This method can be reused.
:Parameter:
E0 : numeric
Time-of-flight offset.
"""
t = kwds.pop('t', self.edf['t'].astype('float64'))
if ('a' in kwds):
self.poly_a = kwds.pop('a')
self.columnApply(mapping=b.tof2evpoly, rescolname='E', E0=E0, a=self.poly_a, t=t, **kwds)
else:
self.columnApply(mapping=self.EMap, rescolname='E', E0=E0, t=t, **kwds)
# Row operation
def appendRow(self, folder=None, df=None, ftype='parquet', **kwds):
""" Append rows read from other files to existing dataframe.
:Parameters:
folder : str | None
Folder directory for the files to append to the existing dataframe
(i.e. when appending parquet files).
df : dataframe | None
Dataframe to append to the exisitng dataframe.
ftype : str | 'parquet'
File type ('parquet', 'dataframe')
**kwds : keyword arguments
Additional arguments to submit to ``dask.dataframe.append()``.
"""
if ftype == 'parquet':
return self.edf.append(self.read(folder), **kwds)
elif ftype == 'dataframe':
return self.edf.append(df, **kwds)
else:
raise NotImplementedError
def appendMarker(self, source_name='ADC', mapping=u.multithresh, marker_name='Marker',
lower_bounds=[], upper_bounds=[], thresholds=[], update='append', **kwds):
""" Append markers to specific ranges in a source column. The mapping of the marker is usually
a piecewise defined function. This enables binning in nonequivalent steps as the next step.
"""
if len(lower_bounds) == len(upper_bounds) == len(thresholds):
self.transformColumn(oldcolname=source_name, mapping=mapping, newcolname=marker_name,
args=(lower_bounds, upper_bounds, thresholds), update=update, **kwds)
else:
raise ValueError('Length of the bounds and the thresholds should be the same!')
# Complex operation
def distributedBinning(self, axes, nbins, ranges, binDict=None, pbar=True,
binmethod='numba', ret=False, **kwds):
""" Binning the dataframe to a multidimensional histogram.
:Parameters:
axes, nbins, ranges, binDict, pbar
See ``mpes.fprocessing.binDataframe()``.
binmethod : str | 'numba'
Dataframe binning method ('original', 'lean', 'fast' and 'numba').
ret : bool | False
Option to return binning results as a dictionary.
**kwds : keyword arguments
See ``mpes.fprocessing.binDataframe()`` or ``mpes.fprocessing.binDataframe_lean()``
"""
# Set up the binning parameters
self._addBinners(axes, nbins, ranges, binDict)
edf = kwds.pop('df', self.edf)
#self.edf = self.edf[amin:amax] # Select event range for binning
self.histdict = {}
if binmethod == 'original':
self.histdict = binDataframe(self.edf, ncores=self.ncores, axes=axes, nbins=nbins,
ranges=ranges, binDict=binDict, pbar=pbar, **kwds)
elif binmethod == 'lean':
self.histdict = binDataframe_lean(self.edf, ncores=self.ncores, axes=axes, nbins=nbins,
ranges=ranges, binDict=binDict, pbar=pbar, **kwds)
elif binmethod == 'fast':
self.histdict = binDataframe_fast(self.edf, ncores=self.ncores, axes=axes, nbins=nbins,
ranges=ranges, binDict=binDict, pbar=pbar, **kwds)
elif binmethod == 'numba':
self.histdict = binDataframe_numba(self.edf, ncores=self.ncores, axes=axes, nbins=nbins,
ranges=ranges, binDict=binDict, pbar=pbar, **kwds)
# clean up memory
gc.collect()
if ret:
return self.histdict
def convert(self, form='parquet', save_addr=None, namestr='/data', pq_append=False, **kwds):
""" Update or convert to other file formats.
:Parameters:
form : str | 'parquet'
File format to convert into.
save_addr : str | None
Path of the folder to save the converted files to.
namestr : '/data'
Extra namestring attached to the filename.
pq_append : bool | False
Option to append to the existing parquet file (if ``True``) in the specified folder,
otherwise the existing parquet files will be deleted before writing new files in.
**kwds : keyword arguments
See extra keyword arguments in ``dask.dataframe.to_parquet()`` for parquet conversion,
or in ``dask.dataframe.to_hdf()`` for HDF5 conversion.
"""
if form == 'parquet':
compression = kwds.pop('compression', 'UNCOMPRESSED')
engine = kwds.pop('engine', 'fastparquet')
self.edf.to_parquet(save_addr, engine=engine, compression=compression,
append=pq_append, ignore_divisions=True, **kwds)
elif form in ('h5', 'hdf5'):
self.edf.to_hdf(save_addr, namestr, **kwds)
elif form == 'json':
self.edf.to_json(save_addr, **kwds)
def saveHistogram(self, form, save_addr, dictname='histdict', **kwds):
""" Export binned histogram in other formats.
:Parameters:
See ``mpes.fprocessing.saveDict()``.
"""
try:
saveDict(processor=self, dictname=dictname, form=form, save_addr=save_addr, **kwds)
except:
raise Exception('Saving histogram was unsuccessful!')
def toBandStructure(self):
""" Convert to the xarray data structure from existing binned data.
:Return:
An instance of ``BandStructure()`` or ``MPESDataset()`` from the ``mpes.bandstructure`` module.
"""
if bool(self.histdict):
coords = project(self.histdict, self.binaxes)
if self.nbinaxes == 3:
return bs.BandStructure(data=self.histdict['binned'],
coords=coords, dims=self.binaxes, datakey='')
elif self.nbinaxes > 3:
return bs.MPESDataset(data=self.histdict['binned'],
coords=coords, dims=self.binaxes, datakey='')
else:
raise ValueError('No binning results are available!')
def viewEventHistogram(self, dfpid, ncol, axes=['X', 'Y', 't', 'ADC'], bins=[80, 80, 80, 80],
ranges=[(0, 1800), (0, 1800), (68000, 74000), (0, 500)],
backend='bokeh', legend=True, histkwds={}, legkwds={}, **kwds):
"""
Plot individual histograms of specified dimensions (axes) from a substituent dataframe partition.
:Parameters:
dfpid : int
Number of the data frame partition to look at.
ncol : int
Number of columns in the plot grid.
axes : list/tuple
Name of the axes to view.
bins : list/tuple
Bin values of all speicified axes.
ranges : list
Value ranges of all specified axes.
backend : str | 'matplotlib'
Backend of the plotting library ('matplotlib' or 'bokeh').
legend : bool | True
Option to include a legend in the histogram plots.
histkwds, legkwds, **kwds : dict, dict, keyword arguments
Extra keyword arguments passed to ``mpes.visualization.grid_histogram()``.
"""
input_types = map(type, [axes, bins, ranges])
allowed_types = [list, tuple]
if set(input_types).issubset(allowed_types):
# Read out the values for the specified groups
group_dict = {}
dfpart = self.edf.get_partition(dfpid)
cols = dfpart.columns
for ax in axes:
group_dict[ax] = dfpart.values[:, cols.get_loc(ax)].compute()
# Plot multiple histograms in a grid
grid_histogram(group_dict, ncol=ncol, rvs=axes, rvbins=bins, rvranges=ranges,
backend=backend, legend=legend, histkwds=histkwds, legkwds=legkwds, **kwds)
else:
raise TypeError('Inputs of axes, bins, ranges need to be list or tuple!')
def getCountRate(self, fids='all', plot=False):
"""
Create count rate data for the files in the data frame processor specified in 'fids'
:Parameters:
fids: the file ids to include. 'all' | list of file ids.
See arguments in ``parallelHDF5Processor.subset()`` and ``hdf5Processor.getCountRate()``.
"""
if fids == 'all':
fids = range(0, len(self.datafiles))
secs = []
countRate = []
accumulated_time = 0
for fid in fids:
subproc = hdf5Processor(self.datafiles[fid])
countRate_, secs_ = subproc.getCountRate(plot=False)
secs.append((accumulated_time + secs_).T)
countRate.append(countRate_.T)
accumulated_time += secs_[len(secs_)-1]
countRate = np.concatenate(np.asarray(countRate))
secs = np.concatenate(np.asarray(secs))
return countRate, secs
def getElapsedTime(self, fids='all'):
"""
Return the elapsed time in the file from the msMarkers wave
return: secs: the length of the the file in seconds.
"""
if fids == 'all':
fids = range(0, len(self.datafiles))
secs = 0
for fid in fids:
subproc = hdf5Processor(self.datafiles[fid])
secs += subproc.get('msMarkers').len()/1000
return secs
class parquetProcessor(dataframeProcessor):
"""
Legacy version of the ``mpes.fprocessing.dataframeProcessor`` class.
"""
def __init__(self, folder, files=[], source='folder', ftype='parquet',
fids=[], update='', ncores=None, **kwds):
super().__init__(datafolder=folder, paramfolder=folder, datafiles=files, ncores=ncores)
self.folder = folder
# Read only the parquet files from the given folder/files
self.read(source=source, ftype=ftype, fids=fids, update=update, **kwds)
self.npart = self.edf.npartitions
def _arraysum(array_a, array_b):
"""
Calculate the sum of two arrays.
"""
return array_a + array_b
class parallelHDF5Processor(FileCollection):
"""
Class for parallel processing of hdf5 files.
"""
def __init__(self, files=[], file_sorting=True, folder=None, ncores=None):
super().__init__(files=files, file_sorting=file_sorting, folder=folder)
self.metadict = {}
self.results = {}
self.combinedresult = {}
if (ncores is None) or (ncores > N_CPU) or (ncores < 0):
#self.ncores = N_CPU
# Change the default to use 20 cores, as the speedup is small above
self.ncores = min(20,N_CPU)
else:
self.ncores = int(ncores)
def _parse_metadata(self, attributes, groups):
"""
Parse the metadata from all HDF5 files.
:Parameters:
attributes, groups : list, list
See ``mpes.fprocessing.metaReadHDF5()``.
"""
for fid in range(self.nfiles):
output = self.subset(fid).summarize(form='metadict', attributes=attributes, groups=groups)
self.metadict = u.dictmerge(self.metadict)
def subset(self, file_id):
"""
Spawn an instance of ``mpes.fprocessing.hdf5Processor`` from a specified substituent file.
:Parameter:
file_id : int
Integer-numbered file ID (any integer from 0 to self.nfiles - 1).
"""
if self.files:
return hdf5Processor(f_addr=self.files[int(file_id)])
else:
raise ValueError("No substituent file is present (value out of range).")
def summarize(self, form='dataframe', ret=False, **kwds):
"""
Summarize the measurement information from all HDF5 files.
:Parameters:
form : str | 'dataframe'
Format of the files to summarize into.
ret : bool | False
Specification on value return.
**kwds : keyword arguments
See keyword arguments in ``mpes.fprocessing.readDataframe()``.
"""
if form == 'text':
raise NotImplementedError
elif form == 'metadict':
self.metadict = {}
if ret == True:
return self.metadict
elif form == 'dataframe':
self.edfhdf = readDataframe(files=self.files, ftype='h5', ret=True, **kwds)
if ret == True:
return self.edfhdf
def viewEventHistogram(self, fid, ncol, **kwds):
"""
Plot individual histograms of specified dimensions (axes) from a substituent file.
:Parameters:
See arguments in ``parallelHDF5Processor.subset()`` and ``hdf5Processor.viewEventHistogram()``.
"""
subproc = self.subset(fid)
subproc.viewEventHistogram(ncol, **kwds)
def getCountRate(self, fids='all', plot=False):
"""
Create count rate data for the files in the parallel hdf5 processor specified in 'fids'
:Parameters:
fids: the file ids to include. 'all' | list of file ids.
See arguments in ``parallelHDF5Processor.subset()`` and ``hdf5Processor.getCountRate()``.
"""
if fids == 'all':
fids = range(0, len(self.files))
secs = []
countRate = []
accumulated_time = 0
for fid in fids:
subproc = self.subset(fid)
countRate_, secs_ = subproc.getCountRate(plot=False)
secs.append((accumulated_time + secs_).T)
countRate.append(countRate_.T)
accumulated_time += secs_[len(secs_)-1]
countRate = np.concatenate(np.asarray(countRate))
secs = np.concatenate(np.asarray(secs))
return countRate, secs
def getElapsedTime(self, fids='all'):
"""
Return the elapsed time in the file from the msMarkers wave
return: secs: the length of the the file in seconds.
"""
if fids == 'all':
fids = range(0, len(self.files))
secs = 0
for fid in fids:
subproc = self.subset(fid)
secs += subproc.get('msMarkers').len()/1000
return secs
def parallelBinning(self, axes, nbins, ranges, scheduler='threads', combine=True,
histcoord='midpoint', pbar=True, binning_kwds={}, compute_kwds={}, pbenv='classic', ret=False):
"""
Parallel computation of the multidimensional histogram from file segments.
Version with serialized loop over processor threads and parallel recombination to save memory.
:Parameters:
axes : (list of) strings | None
Names the axes to bin.
nbins : (list of) int | None
Number of bins along each axis.
ranges : (list of) tuples | None
Ranges of binning along every axis.
scheduler : str | 'threads'
Type of distributed scheduler ('threads', 'processes', 'synchronous')
histcoord : string | 'midpoint'
The coordinates of the histogram. Specify 'edge' to get the bar edges (every
dimension has one value more), specify 'midpoint' to get the midpoint of the
bars (same length as the histogram dimensions).
pbar : bool | true
Option to display the progress bar.
binning_kwds : dict | {}
Keyword arguments to be included in ``mpes.fprocessing.hdf5Processor.localBinning()``.
compute_kwds : dict | {}
Keyword arguments to specify in ``dask.compute()``.
"""
self.binaxes = axes
self.nbinaxes = len(axes)
self.bincounts = nbins
self.binranges = ranges
# Construct binning steps
self.binsteps = []
for bc, (lrange, rrange) in zip(self.bincounts, self.binranges):
self.binsteps.append((rrange - lrange) / bc)
# Reset containers of results
self.results = {}
self.combinedresult = {}
self.combinedresult['binned'] = np.zeros(tuple(nbins))
tqdm = u.tqdmenv(pbenv)
ncores = self.ncores
# Execute binning tasks
binning_kwds = u.dictmerge({'ret':'histogram'}, binning_kwds)
# limit multithreading in worker threads
nthreads_per_worker = binning_kwds.pop('nthreads_per_worker', 1)
threadpool_api = binning_kwds.pop('threadpool_api', 'blas')
with threadpool_limits(limits=nthreads_per_worker, user_api=threadpool_api):
# Construct binning tasks
for i in tqdm(range(0, len(self.files), ncores), disable=not(pbar)):
coreTasks = [] # Core-level jobs
for j in range(0, ncores):
# Fill up worker threads
ij = i + j
if ij >= len(self.files):
break
file = self.files[ij]
coreTasks.append(d.delayed(hdf5Processor(file).localBinning)(axes=axes, nbins=nbins, ranges=ranges, **binning_kwds))
if len(coreTasks) > 0:
coreResults = d.compute(*coreTasks, scheduler=scheduler, **compute_kwds)
# Combine all core results for a dataframe partition
# Fast parallel version with Dask
combineTasks = []
for j in range(0, ncores):
combineParts = []
# Split up results along first bin axis
for r in coreResults:
combineParts.append(r[int(j*nbins[0]/ncores):int((j+1)*nbins[0]/ncores),...])
# Fill up worker threads
combineTasks.append(d.delayed(reduce)(_arraysum, combineParts))
combineResults = d.compute(*combineTasks, scheduler=scheduler, **compute_kwds)
# Directly fill into target array. This is much faster than the (not so parallel) reduce/concatenation used before, and uses less memory.
for j in range(0, ncores):
self.combinedresult['binned'][int(j*nbins[0]/ncores):int((j+1)*nbins[0]/ncores),...] += combineResults[j]
del combineParts
del combineTasks
del combineResults
del coreResults
del coreTasks
# Calculate and store values of the axes
for iax, ax in enumerate(self.binaxes):
p_start, p_end = self.binranges[iax]
self.combinedresult[ax] = u.calcax(p_start, p_end, self.bincounts[iax], ret=histcoord)
# clean up memory
gc.collect()
if ret:
return self.combinedresult
def parallelBinning_old(self, axes, nbins, ranges, scheduler='threads', combine=True,
histcoord='midpoint', pbar=True, binning_kwds={}, compute_kwds={}, ret=False):
"""
Parallel computation of the multidimensional histogram from file segments.
Old version with completely parallel binning with unlimited memory consumption.
Crashes for very large data sets.
:Parameters:
axes : (list of) strings | None
Names the axes to bin.
nbins : (list of) int | None
Number of bins along each axis.
ranges : (list of) tuples | None
Ranges of binning along every axis.
scheduler : str | 'threads'
Type of distributed scheduler ('threads', 'processes', 'synchronous')
combine : bool | True
Option to combine the results obtained from distributed binning.
histcoord : string | 'midpoint'
The coordinates of the histogram. Specify 'edge' to get the bar edges (every
dimension has one value more), specify 'midpoint' to get the midpoint of the
bars (same length as the histogram dimensions).
pbar : bool | true
Option to display the progress bar.
binning_kwds : dict | {}
Keyword arguments to be included in ``mpes.fprocessing.hdf5Processor.localBinning()``.
compute_kwds : dict | {}
Keyword arguments to specify in ``dask.compute()``.
"""
binTasks = []
self.binaxes = axes
self.nbinaxes = len(axes)
self.bincounts = nbins
self.binranges = ranges
# Construct binning steps
self.binsteps = []
for bc, (lrange, rrange) in zip(self.bincounts, self.binranges):
self.binsteps.append((rrange - lrange) / bc)
# Reset containers of results
self.results = {}
self.combinedresult = {}
# Execute binning tasks
if combine == True: # Combine results in the process of binning
binning_kwds = u.dictmerge({'ret':'histogram'}, binning_kwds)
# Construct binning tasks
for f in self.files:
binTasks.append(d.delayed(hdf5Processor(f).localBinning)
(axes=axes, nbins=nbins, ranges=ranges, **binning_kwds))
if pbar:
with ProgressBar():
self.combinedresult['binned'] = reduce(_arraysum,
d.compute(*binTasks, scheduler=scheduler, **compute_kwds))
else:
self.combinedresult['binned'] = reduce(_arraysum,
d.compute(*binTasks, scheduler=scheduler, **compute_kwds))
del binTasks
# Calculate and store values of the axes
for iax, ax in enumerate(self.binaxes):
p_start, p_end = self.binranges[iax]
self.combinedresult[ax] = u.calcax(p_start, p_end, self.bincounts[iax], ret=histcoord)
if ret:
return self.combinedresult
else: # Return all task outcome of binning (not recommended due to the size)
for f in self.files:
binTasks.append(d.delayed(hdf5Processor(f).localBinning)
(axes=axes, nbins=nbins, ranges=ranges, **binning_kwds))
if pbar:
with ProgressBar():
self.results = d.compute(*binTasks, scheduler=scheduler, **compute_kwds)
else:
self.results = d.compute(*binTasks, scheduler=scheduler, **compute_kwds)
del binTasks
if ret:
return self.results
def combineResults(self, ret=True):
"""
Combine the results from all segments (only when self.results is non-empty).
:Parameters:
ret : bool | True
:True: returns the dictionary containing binned data explicitly
:False: no explicit return of the binned data, the dictionary
generated in the binning is still retained as an instance attribute.
:Return:
combinedresult : dict
Return combined result dictionary (if ``ret == True``).
"""
try:
binnedhist = np.stack([self.results[i]['binned'] for i in range(self.nfiles)], axis=0).sum(axis=0)
# Transfer the results to combined result
self.combinedresult = self.results[0].copy()
self.combinedresult['binned'] = binnedhist
except:
pass
if ret:
return self.combinedresult
def convert(self, form='parquet', save_addr='./summary', append_to_folder=False,
pbar=True, pbenv='classic', **kwds):
"""
Convert files to another format (e.g. parquet).
:Parameters:
form : str | 'parquet'
File format to convert into.
save_addr : str | './summary'
Path of the folder for saving parquet files.
append_to_folder : bool | False
Option to append to the existing parquet files in the specified folder,
otherwise the existing parquet files will be deleted first. The HDF5 files
in the same folder are kept intact.
pbar : bool | True
Option to display progress bar.
pbenv : str | 'classic'
Specification of the progress bar environment ('classic' for generic version
and 'notebook' for notebook compatible version).
**kwds : keyword arguments
See ``mpes.fprocessing.hdf5Processor.convert()``.
"""
tqdm = u.tqdmenv(pbenv)
if os.path.exists(save_addr) and os.path.isdir(save_addr):
# In an existing folder, clean up the files if specified
existing_files = g.glob(save_addr + r'/*')
n_existing_files = len(existing_files)
# Remove existing files in the folder before writing into it
if (n_existing_files > 0) and (append_to_folder == False):
for f_exist in existing_files:
if '.h5' not in f_exist: # Keep the calibration files
os.remove(f_exist)
for fi in tqdm(range(self.nfiles), disable=not(pbar)):
subproc = self.subset(file_id=fi)
subproc.convert(form=form, save_addr=save_addr, pq_append=True, **kwds)
def updateHistogram(self, axes=None, sliceranges=None, ret=False):
"""
Update the dimensional sizes of the binning results.
:Parameters:
axes : tuple/list | None
Collection of the names of axes for size change.
sliceranges : tuple/list/array | None
Collection of ranges, e.g. (start_position, stop_position) pairs,
for each axis to be updated.
ret : bool | False
Option to return updated histogram.
"""
# Input axis order to binning axes order
binaxes = np.asarray(self.binaxes)
seqs = [np.where(ax == binaxes)[0][0] for ax in axes]
for seq, ax, rg in zip(seqs, axes, sliceranges):
# Update the lengths of binning axes
self.combinedresult[ax] = self.combinedresult[ax][rg[0]:rg[1]]
# Update the binned histogram
tempmat = np.moveaxis(self.combinedresult['binned'], seq, 0)[rg[0]:rg[1],...]
self.combinedresult['binned'] = np.moveaxis(tempmat, 0, seq)
if ret:
return self.combinedresult
def saveHistogram(self, dictname='combinedresult', form='h5', save_addr='./histogram', **kwds):
"""
Save binned histogram and the axes.
:Parameters:
See ``mpes.fprocessing.saveDict()``.
"""
try:
saveDict(processor=self, dictname=dictname, form=form, save_addr=save_addr, **kwds)
except:
raise Exception('Saving histogram was unsuccessful!')
def saveParameters(self, form='h5', save_addr='./binning'):
"""
Save all the attributes of the binning instance for later use
(e.g. binning axes, ranges, etc).
:Parameters:
form : str | 'h5'
File format to for saving the parameters ('h5'/'hdf5', 'mat').
save_addr : str | './binning'
The address for the to be saved file.
"""
saveClassAttributes(self, form, save_addr)
def extractEDC(folder=None, files=[], axes=['t'], bins=[1000], ranges=[(65000, 100000)],
binning_kwds={'jittered':True}, ret=True, **kwds):
""" Extract EDCs from a list of bias scan files.
"""
pp = parallelHDF5Processor(folder=folder, files=files)
if len(files) == 0:
pp.gather(identifier='/*.h5')
pp.parallelBinning_old(axes=axes, nbins=bins, ranges=ranges, combine=False, ret=False,
binning_kwds=binning_kwds, **kwds)
edcs = [pp.results[i]['binned'] for i in range(len(pp.results))]
tof = pp.results[0][axes[0]]
traces = np.asarray(edcs)
del pp
if ret:
return traces, tof
def readBinnedhdf5(fpath, combined=True, typ='float32'):
"""
Read binned hdf5 file (3D/4D data) into a dictionary.
:Parameters:
fpath : str
File path.
combined : bool | True
Specify if the volume slices are combined.
typ : str | 'float32'
Data type of the numerical values in the output dictionary.
:Return:
out : dict
Dictionary with keys being the axes and the volume (slices).
"""
f = File(fpath, 'r')
out = {}
# Read the axes group
for ax, axval in f['axes'].items():
out[ax] = axval[...]
# Read the binned group
group = f['binned']
itemkeys = group.keys()
nbinned = len(itemkeys)
# Binned 3D matrix
if (nbinned == 1) or (combined == False):
for ik in itemkeys:
out[ik] = np.asarray(group[ik], dtype=typ)
# Binned 4D matrix
elif (nbinned > 1) or (combined == True):
val = []
itemkeys_sorted = nts.natsorted(itemkeys)
for ik in itemkeys_sorted:
val.append(group[ik])
out['V'] = np.asarray(val, dtype=typ)
return out
# =================== #
# Data transformation #
# =================== #
def fftfilter2d(datamat):
r, c = datamat.shape
x, y = np.meshgrid(np.arange(-r / 2, r / 2), np.arange(-c / 2, c / 2))
zm = np.zeros_like(datamat.T)
ftmat = (nft.fftshift(nft.fft2(datamat))).T
# Construct peak center coordinates array using rotation
x0, y0 = -80, -108
# Conversion factor for radius (half-width half-maximum) of Gaussian
rgaus = 2 * np.log(2)
sx, sy = 10 / rgaus, 10 * (c / r) / rgaus
alf, bet = np.arctan(r / c), np.arctan(c / r)
rotarray = np.array([0, 2 * alf, 2 * (alf + bet), -2 * bet])
xy = [np.dot(rot2d(roth, 'rad'), np.array([x0, y0])) for roth in rotarray]
# Generate intermediate positions and append to peak center coordinates
# array
for everynumber in range(4):
n = everynumber % 4
xy.append((xy[n] + xy[n - 1]) / 2)
# Construct the complement of mask matrix
for currpair in range(len(xy)):
xc, yc = xy[currpair]
zm += np.exp(-((x - xc)**2) / (2 * sx**2) -
((y - yc)**2) / (2 * sy**2))
fltrmat = np.abs(nft.ifft2((1 - zm) * ftmat))
return fltrmat
# =================== #
# Numba Binning #
# =================== #
@numba.jit(nogil=True, parallel=False)
def _hist1d_numba_seq(sample, bins, ranges):
"""
1D Binning function, pre-compiled by Numba for performance.
Behaves much like numpy.histogramdd, but calculates and returns unsigned 32 bit integers
"""
H = np.zeros((bins[0]), dtype=np.uint32)
delta = 1/((ranges[:,1] - ranges[:,0]) / bins)
if (sample.shape[1] != 1):
raise ValueError(
'The dimension of bins must be equal to the dimension of the sample x.')
for t in range(sample.shape[0]):
i = (sample[t,0] - ranges[0,0]) * delta[0]
if 0 <= i < bins[0]:
H[int(i)] += 1
return H
@numba.jit(nogil=True, parallel=False)
def _hist2d_numba_seq(sample, bins, ranges):
"""
2D Binning function, pre-compiled by Numba for performance.
Behaves much like numpy.histogramdd, but calculates and returns unsigned 32 bit integers
"""
H = np.zeros((bins[0], bins[1]), dtype=np.uint32)
delta = 1/((ranges[:,1] - ranges[:,0]) / bins)
if (sample.shape[1] != 2):
raise ValueError(
'The dimension of bins must be equal to the dimension of the sample x.')
for t in range(sample.shape[0]):
i = (sample[t,0] - ranges[0,0]) * delta[0]
j = (sample[t,1] - ranges[1,0]) * delta[1]
if 0 <= i < bins[0] and 0 <= j < bins[1]:
H[int(i),int(j)] += 1
return H
return H
@numba.jit(nogil=True, parallel=False)
def _hist3d_numba_seq(sample, bins, ranges):
"""
3D Binning function, pre-compiled by Numba for performance.
Behaves much like numpy.histogramdd, but calculates and returns unsigned 32 bit integers
"""
H = np.zeros((bins[0], bins[1], bins[2]), dtype=np.uint32)
delta = 1/((ranges[:,1] - ranges[:,0]) / bins)
if (sample.shape[1] != 3):
raise ValueError(
'The dimension of bins must be equal to the dimension of the sample x.')
for t in range(sample.shape[0]):
i = (sample[t,0] - ranges[0,0]) * delta[0]
j = (sample[t,1] - ranges[1,0]) * delta[1]
k = (sample[t,2] - ranges[2,0]) * delta[2]
if 0 <= i < bins[0] and 0 <= j < bins[1] and 0 <= k < bins[2]:
H[int(i),int(j), int(k)] += 1
return H
@numba.jit(nogil=True, parallel=False)
def _hist4d_numba_seq(sample, bins, ranges):
"""
4D Binning function, pre-compiled by Numba for performance.
Behaves much like numpy.histogramdd, but calculates and returns unsigned 32 bit integers
"""
H = np.zeros((bins[0], bins[1], bins[2], bins[3]), dtype=np.uint32)
delta = 1/((ranges[:,1] - ranges[:,0]) / bins)
if (sample.shape[1] != 4):
raise ValueError(
'The dimension of bins must be equal to the dimension of the sample x.')
for t in range(sample.shape[0]):
i = (sample[t,0] - ranges[0,0]) * delta[0]
j = (sample[t,1] - ranges[1,0]) * delta[1]
k = (sample[t,2] - ranges[2,0]) * delta[2]
l = (sample[t,3] - ranges[3,0]) * delta[3]
if 0 <= i < bins[0] and 0 <= j < bins[1] and 0 <= k < bins[2] and 0 <= l < bins[3]:
H[int(i),int(j),int(k),int(l)] += 1
return H
def numba_histogramdd(sample, bins, ranges):
"""
Wrapper for the Number pre-compiled binning functions. Behaves in total much like numpy.histogramdd.
Returns uint32 arrays. This was chosen because it has a significant performance improvement over uint64
for large binning volumes. Be aware that this can cause overflows for very large sample sets exceeding 3E9 counts
in a single bin. This should never happen in a realistic photoemission experiment with useful bin sizes.
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
nbin = np.empty(D, int)
edges = D*[None]
dedges = D*[None]
# normalize the ranges argument
if ranges is None:
ranges = (None,) * D
elif len(ranges) != D:
raise ValueError('range argument must have one entry per dimension')
ranges = np.asarray(ranges)
bins = np.asarray(bins)
# Create edge arrays
for i in range(D):
edges[i] = np.linspace(*ranges[i,:], bins[i]+1)
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
if (D == 1):
hist = _hist1d_numba_seq(sample, bins , ranges)
elif (D == 2):
hist = _hist2d_numba_seq(sample, bins , ranges)
elif (D == 3):
hist = _hist3d_numba_seq(sample, bins , ranges)
elif (D == 4):
hist = _hist4d_numba_seq(sample, bins , ranges)
else:
raise ValueError('Only implemented for up to 4 dimensions currently.')
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges | mit |
jart/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 20 | 19127 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _fill_array(arr, seq, fillvalue=0):
"""Recursively fills padded arr with elements from seq.
If length of seq is less than arr padded length, fillvalue used.
Args:
arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len].
seq: Non-padded list of data samples of shape
[batch_size, ..., padded_dim(None)]
fillvalue: Default fillvalue to use.
"""
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = fillvalue
else:
for subarr, subseq in six.moves.zip_longest(arr, seq, fillvalue=()):
_fill_array(subarr, subseq, fillvalue)
def _pad_if_needed(batch_key_item, fillvalue=0):
""" Returns padded batch.
Args:
batch_key_item: List of data samples of any type with shape
[batch_size, ..., padded_dim(None)].
fillvalue: Default fillvalue to use.
Returns:
Padded with zeros tensor of same type and shape
[batch_size, ..., max_padded_dim_len].
Raises:
ValueError if data samples have different shapes (except last padded dim).
"""
shapes = [
seq.shape[:-1] if len(seq.shape) > 0 else -1 for seq in batch_key_item
]
if not all(shapes[0] == x for x in shapes):
raise ValueError("Array shapes must match.")
last_length = [
seq.shape[-1] if len(seq.shape) > 0 else 0 for seq in batch_key_item
]
if all([x == last_length[0] for x in last_length]):
return batch_key_item
batch_size = len(batch_key_item)
max_sequence_length = max(last_length)
result_batch = np.zeros(
shape=[batch_size] + list(shapes[0]) + [max_sequence_length],
dtype=batch_key_item[0].dtype)
_fill_array(result_batch, batch_key_item, fillvalue)
return result_batch
def _get_integer_indices_for_next_batch(batch_indices_start, batch_size,
epoch_end, array_length, current_epoch,
total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [
j % array_length for j in range(batch_indices_start, batch_indices_end)
]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns) + 1, len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None,
pad_value=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
self._pad_value = pad_value
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun "
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index], list()).append(
data_row[key])
list_dict_size += 1
if self._pad_value is not None:
feed_dict = {
key: np.asarray(_pad_if_needed(item, self._pad_value))
for key, item in list(list_dict.items())
}
else:
feed_dict = {
key: np.asarray(item)
for key, item in list(list_dict.items())
}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None,
pad_value=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
NotImplementedError: padding and shuffling data at the same time.
NotImplementedError: padding usage with non generator data type.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64
] + [dtypes.as_dtype(col.dtype) for col in data.values()]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
pad_data = pad_value is not None
if pad_data and get_feed_fn is not _GeneratorFeedFn:
raise NotImplementedError(
"padding is only available with generator usage")
if shuffle and pad_data:
raise NotImplementedError(
"padding and shuffling data at the same time is not implemented")
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
elif pad_data:
min_after_dequeue = 0 # just for the summary text
queue_shapes = list(
map(lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x,
queue_shapes))
queue = data_flow_ops.PaddingFIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
if not pad_data:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
else:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs,
pad_value=pad_value))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue,
enqueue_ops=enqueue_ops,
feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (
math_ops.cast(
math_ops.maximum(0,
queue.size() - min_after_dequeue), dtypes.float32)
* (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
yanhuay/seisDD | specfem2d/utils/adjoint_sources/define_adjoint_source_python_script.py | 1 | 2425 |
# by Philip Knaute, June 2012
import numpy as np
import scipy.interpolate as interp
import scipy.integrate as integr
import scipy.signal as sgnl
import matplotlib.pyplot as plt
import ownModules.proc.filter as filt
def calAdjCCTTFromTrace(nt,dt,tStartIn,tEndIn,dataIn, synthIn):
""" calculate the cross correlation traveltime adjoint sources for one seismogram
IN:
nt : number of timesteps in each seismogram
dt : timestep of seismograms
tStartIn : float starting time for trace
tEndIn : float end time for trace
OUT:
fBar : array containing the adjoint seismogram for the trace
t : ndarray containing the time steps
"""
isCalculateWeights = False
if isCalculateWeights:
dSeism = np.zeros(nt)
weight = 0
# -- time vector
t = np.ogrid[0:(nt-1)*dt:nt*1j]
# -- the norm
norm = 0
# -- numpy arrays initialisation
velSynth = np.zeros(nt)
accSynth = np.zeros(nt)
timeWind = np.zeros(nt)
fBar = np.zeros(nt)
# -- calculate time time-window
tStart = tStartIn
tEnd = tEndIn
# -- the starting and ending sample numbers
iStart = int(np.floor(tStart/dt))
iEnd = int(np.ceil(tEnd/dt))
# -- sample length of the window
iWind = iEnd - iStart
#print iStart,iEnd,iWind
timeWind[iStart:iEnd]=sgnl.hann(iWind)
# -- calculate the adjoint
synth = synthIn
interpTrc = interp.InterpolatedUnivariateSpline(t,synth)
velSynth = interpTrc(t,1)
accSynth = interpTrc(t,2)
integrArgument = timeWind*synth*accSynth
# -- calculating the norm
norm = integr.simps(integrArgument,dx=dt,axis=-1,even='last')
# -- divide every trace (row in matrices) by their norm (row in vector norm)
fBar = timeWind*velSynth / norm
if isCalculateWeights:
# -- read in the data seismograms
data = dataIn
# -- calculate the difference between data and synthetics (amplitude) per trace
dSeism = data - synth
# -- calculate the weight per trace
integrArgument = timeWind*velSynth*dSeism
weight = integr.simps(integrArgument,dx=dt,axis=-1,even='last')
print "weight", weight/norm
# -- multiply weight with every adj trace
fBar = fBar*weight
print weight
return [fBar,t]
| gpl-3.0 |
automata/tri-delaunay | tri_passos.py | 1 | 3605 | # coding:utf-8
import numpy as np
import matplotlib.pyplot as plt
from skimage.segmentation import slic
from skimage.measure import regionprops
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.image as mpimg
import matplotlib.delaunay as triang
import sys
if len(sys.argv) == 1:
print 'uso: ./%s <arquivo PNG>' % sys.argv[0]
sys.exit()
IMG = sys.argv[1]
fig, axes = plt.subplots(ncols=5, figsize=(18, 18))
ax0, ax1, ax2, ax3, ax4 = axes
# abrimos a imagem
image_rgb = mpimg.imread(IMG)
# segmentamos em regiões
labels = slic(image_rgb,
convert2lab=True,
ratio=-10,
n_segments=100,
sigma=.1,
max_iter=10)
conexos = labels
for ax in axes:
ax.clear()
ax0.imshow(image_rgb, cmap=plt.cm.gray, interpolation='nearest')
ax1.imshow(conexos, cmap=plt.cm.jet, interpolation='nearest')
ax2.imshow(conexos, cmap=plt.cm.jet, interpolation='nearest')
ax3.imshow(image_rgb, cmap=plt.cm.gray, interpolation='nearest')
ax4.imshow(np.zeros(image_rgb.shape))
ax0.set_title(u'Imagem original', fontsize=10)
ax1.set_title(u'Segmentação', fontsize=10)
ax2.set_title(u'Triangulação (Delaunay)', fontsize=10)
ax3.set_title(u'Imagem gerada', fontsize=10)
ax4.set_title(u'Apenas triângulos', fontsize=10)
# calculamos os boundingbox e centróides das regiões
# guardamos os pontos dos boundingbox em 'pontos'
pontos = []
for region in regionprops(conexos, ['Area', 'BoundingBox', 'Centroid']):
minr, minc, maxr, maxc = region['BoundingBox']
# plota boundingbox
# rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
# fill=False, edgecolor='red', linewidth=2)
# ax1.add_patch(rect)
x0 = region['Centroid'][1]
y0 = region['Centroid'][0]
# pontos.append([x0, y0])
pontos.append([minc, minr])
pontos.append([maxc, minr])
pontos.append([minc, maxr])
pontos.append([maxc, maxr])
pontos = np.array(pontos)
# calculamos pontos dos triânbulos através de Delaunay
cens, edg, tri, neig = triang.delaunay(pontos[:,0], pontos[:,1])
patches = []
cores = []
coords_tri = []
for t in tri:
# t[0], t[1], t[2] are the points indexes of the triangle
t_i = [t[0], t[1], t[2], t[0]]
ax2.plot(pontos[:,0][t_i], pontos[:,1][t_i], 'k-', linewidth=.5)
ax4.plot(pontos[:,0][t_i], pontos[:,1][t_i], 'w-', linewidth=.3)
ax4.plot(pontos[:,0][t_i], pontos[:,1][t_i], 'wo', markersize=2)
#print '---'
t_i2 = [t[0], t[1], t[2]]
#print pontos[:,[0,1]][t_i2]
coords_tri.append([list(aa) for aa in list(pontos[:,[0,1]][t_i2])])
x = pontos[:,0][t_i][0]
y = pontos[:,1][t_i][0]
# a cor do polígono vem da imagem original
cores.append(image_rgb[y-1][x-1])
# criamos um polígono para representar o triângulo (aqui começa o desenho
# que estamos sintetizando)
poly = Polygon(pontos[:,[0,1]][t_i], True)
patches.append(poly)
# adicionamos os polígonos a uma coleção de colagens
p = PatchCollection(patches)
#print 'patches:', len(patches), 'regions:', len(cores)
# definimos as cores conforme imagem original e as bordas em transparente
p.set_facecolor(cores)
p.set_edgecolor(cores)
p.set_alpha(.8)
# plotamos o desenho sintetizado
ax3.add_collection(p)
for ax in axes:
ax.axis('off')
nome_pintura = '%s_passos.svg' % IMG[:-4]
fig.savefig(nome_pintura)
print 'pintura gerada em: %s' % nome_pintura
# lista com coordenadas de cada triângulo
# print 'coords. triângulos:', coords_tri
# número de triângulos
# print 'num. de triângulos:', len(coords_tri)
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/tests/test_docstring_parameters.py | 3 | 5239 | # Authors: Alexandre Gramfort <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import inspect
import sys
import warnings
import importlib
from pkgutil import walk_packages
from inspect import getsource
import sklearn
from sklearn.base import signature
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_docstring_parameters
from sklearn.utils.testing import _get_func_name
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.deprecation import _is_deprecated
PUBLIC_MODULES = set(['sklearn.' + modname
for _, modname, _ in walk_packages(sklearn.__path__)
if not modname.startswith('_') and
'.tests.' not in modname])
# TODO Uncomment all modules and fix doc inconsistencies everywhere
# The list of modules that are not tested for now
PUBLIC_MODULES -= set([
'sklearn.ensemble',
'sklearn.feature_selection',
'sklearn.kernel_approximation',
'sklearn.model_selection',
'sklearn.multioutput',
'sklearn.random_projection',
'sklearn.setup',
'sklearn.svm',
'sklearn.utils',
# Deprecated modules
'sklearn.cross_validation',
'sklearn.grid_search',
'sklearn.learning_curve',
])
# functions to ignore args / docstring of
_DOCSTRING_IGNORES = [
'sklearn.utils.deprecation.load_mlcomp',
'sklearn.pipeline.make_pipeline',
'sklearn.pipeline.make_union',
'sklearn.utils.extmath.safe_sparse_dot',
]
# Methods where y param should be ignored if y=None by default
_METHODS_IGNORE_NONE_Y = [
'fit',
'score',
'fit_predict',
'fit_transform',
'partial_fit',
'predict'
]
def test_docstring_parameters():
raise SkipTest('Not testing docstring parameter consistency')
# Test module docstring formatting
# Skip test if numpydoc is not found or if python version is < 3.5
try:
import numpydoc # noqa
assert sys.version_info >= (3, 5)
except (ImportError, AssertionError):
raise SkipTest("numpydoc is required to test the docstrings, "
"as well as python version >= 3.5")
from numpydoc import docscrape
incorrect = []
for name in PUBLIC_MODULES:
with warnings.catch_warnings(record=True):
module = importlib.import_module(name)
classes = inspect.getmembers(module, inspect.isclass)
for cname, cls in classes:
this_incorrect = []
if cname in _DOCSTRING_IGNORES:
continue
if cname.startswith('_'):
continue
with warnings.catch_warnings(record=True) as w:
cdoc = docscrape.ClassDoc(cls)
if len(w):
raise RuntimeError('Error for __init__ of %s in %s:\n%s'
% (cls, name, w[0]))
cls_init = getattr(cls, '__init__', None)
if _is_deprecated(cls_init):
continue
elif cls_init is not None:
this_incorrect += check_docstring_parameters(
cls.__init__, cdoc, class_name=cname)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
if _is_deprecated(method):
continue
param_ignore = None
# Now skip docstring test for y when y is None
# by default for API reason
if method_name in _METHODS_IGNORE_NONE_Y:
sig = signature(method)
if ('y' in sig.parameters and
sig.parameters['y'].default is None):
param_ignore = ['y'] # ignore y for fit and score
result = check_docstring_parameters(
method, ignore=param_ignore, class_name=cname)
this_incorrect += result
incorrect += this_incorrect
functions = inspect.getmembers(module, inspect.isfunction)
for fname, func in functions:
# Don't test private methods / functions
if fname.startswith('_'):
continue
name_ = _get_func_name(func)
if (not any(d in name_ for d in _DOCSTRING_IGNORES) and
not _is_deprecated(func)):
incorrect += check_docstring_parameters(func)
msg = '\n' + '\n'.join(sorted(list(set(incorrect))))
if len(incorrect) > 0:
raise AssertionError(msg)
@ignore_warnings(category=DeprecationWarning)
def test_tabs():
# Test that there are no tabs in our source files
for importer, modname, ispkg in walk_packages(sklearn.__path__,
prefix='sklearn.'):
# because we don't import
mod = importlib.import_module(modname)
try:
source = getsource(mod)
except IOError: # user probably should have run "make clean"
continue
assert '\t' not in source, ('"%s" has tabs, please remove them ',
'or add it to theignore list'
% modname)
| mit |
bchappet/dnfpy | src/test_dnfpy/cellular/testSbsFast2LayerConvolution.py | 1 | 2556 | import unittest
import time
import numpy as np
import matplotlib.pyplot as plt
from dnfpy.cellular.sbsFast2LayerConvolution import SbsFast2LayerConvolution
from dnfpy.core.constantMap import ConstantMap
import dnfpy.view.staticViewMatplotlib as view
class TestSbsFast2LayerConvolution(unittest.TestCase):
def setUp(self):
self.size = 49
self.activation = np.zeros((self.size,self.size),np.intc)
activationMap = ConstantMap("actmap",self.size, self.activation)
self.uut = SbsFast2LayerConvolution("uut",self.size)
self.uut.addChildren(activation=activationMap)
def test_update(self):
self.uut.compute()
data = self.uut.getData()
print(data)
self.assertEqual(np.sum(data),0)
def test_update_act(self):
self.uut.setParams(pSpike=0.1)
sizeStream = 200
self.uut.setParams(sizeStream=sizeStream)
data = self.getData()
sumD = np.sum(data)
print("min : %s, max: %s, sum: %s"%(np.min(data),np.max(data),sumD))
view.plotArray(data)
view.show()
self.assertAlmostEqual(sumD,-297.74427321949184)
def test_diag_std(self):
self.uut.setParams(pSpike=0.1)
self.uut.setParams(precisionProba=30)
self.uut.setParams(iExc=1.51,iInh=0.92),#pExc=0.00045,pInh=0.44)
sizeStream = 200
self.uut.setParamsRec(sizeStream=sizeStream)
diags = []
#self.uut.setParamsRec(nstep=repet)
for i in range(2):
print("Repetition %s"%i)
self.uut.resetData()
data = self.getData()
diag = self.getDiag(data)
diags.append(diag)
(mean,std) = self.computeStatsDiag(diags)
print( mean)
print(std)
def getData(self):
for i in range(-1,2,1):
for j in range(-1,2,1):
self.activation[self.size/2+i][self.size/2+j] = 1;
start = time.clock()
self.uut.compute()
end = time.clock()
print("elapsed time %s"%(end-start))
data = self.uut.getData()
return data
def getDiag(self,data):
diag = []
for i in range(data.shape[0]):
diag.append(data[i,i])
return np.array(diag)
def computeStatsDiag(self,diags):
mean = np.mean(diags,axis=0)
std = np.std(diags,axis=0)
x = range(0,len(mean))
plt.plot(x,mean)
plt.errorbar(x,mean,yerr=std)
plt.show()
return (mean,std)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
PeterHuang2015/BITDM | python/kaggle/Titanic/analyse101.py | 1 | 1500 | import pandas
"""
Ph @ 4/5/2016
Code to learn solving a demo kaggle game
"""
# Read in training data of titanic dataset
t_data = pandas.read_csv("train.csv")
# Show first 5 rows of data
print(t_data.head(5))
# Basic analysis, only numeric results/columns are shown
print(t_data.describe())
# Get a column
t_data["Age"]
# Prove that '=' makes an alias
t_data_cp = t_data
t_data_cp["Age"][0] = 11
print(t_data["Age"][0])
t_data_cp["Age"][0] = 22.0
# Fill median of the column to holes of the dataset, need assignment of the column, interesting
t_data["Age"] = t_data["Age"].fillna(t_data["Age"].median())
# Get different values of column, NaN is a value returned like "NaN in t_data["Cabin"].unique()", nan, misleading....
t_data["Sex"].unique()
# A beautiful batch convertion, using comprehension of python, but what does loc return ? male should be 1, okay?
t_data.loc[t_data["Sex"] == "male", "Sex"] = 0
t_data.loc[t_data["Sex"] == "female", "Sex"] = 1
# Linear Regression
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
alg = LinearRegression()
kf = KFold(t_data.shape[0], n_folds=3, random_state=1)
predictions = []
for train, test in kf:
train_predictors = (t_data[predictors].iloc[train,:])
train_target = t_data["Survived"].iloc[train]
test_predications = alg.predict(t_data[predicators].iloc[test, :])
predications.append(test_predictions)
| mit |
thilbern/scikit-learn | sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
finetea/metricstudy | notebook/get_stock_financials_mul.py | 1 | 6687 |
# coding: utf-8
# In[3]:
import pandas as pd
import csv
import re
import os
import multiprocessing
# In[4]:
#returns list of [market, stock_sybol] by querying database.
MAX_LIMIT = 100000
DATA_PATH = '..\\data'
US_STOCK_FILE_CSV = DATA_PATH + '\\us_stock.csv'
#limit means the number of results to be returned. 0 means everything
def getSymbols(start=0, limit=10):
if limit <= 0:
limit = MAX_LIMIT
count = 0
stocks = []
with open(US_STOCK_FILE_CSV, 'rb') as stock_csv:
lines = csv.reader(stock_csv, delimiter=',')
for line in lines:
market = line[0]
stock = line[1]
m = re.search('^[A-Z]+$', stock)
if m:
stock = m.group(0)
count = count + 1
if count >= start:
stocks.append([market, stock])
limit = limit -1
if limit == 0:
break
return stocks
# In[5]:
#returns db column names from the dataframe arg. names are a form of string without special characters.
def getFields(df):
res = []
y = df.iloc[:,0:1] #extract 1st column
#y.iloc[1:10].values
for z in y.iloc[:].values:
x=re.sub('[ /,\.\&\-()\']','',z[0]) #remove special chars
if isinstance(x, unicode):
x = x.encode('UTF-8')
#print type(x)
res.append(x)
return res
# In[6]:
def getTableName(data):
y = data.iloc[:,0:1]
val = y.iloc[0:1].values[0][0]
if isinstance(val, unicode):
val = val.encode('UTF-8')
if val.startswith('Revenue'):
return 'IncomeStatement'
elif val.startswith('Cash '):
return 'BalanceSheet'
elif val.startswith('Net '):
return 'CashFlow'
else:
return None
# In[7]:
#open files for fin values
data_to_extract = {1:'IncomeStatement', #index in the dataframe, the name of financial data
3:'BalanceSheet',
5:'CashFlow'}
#returns dic of fin data type and writer handle
def openFinFiles():
stocks = getSymbols(0,1)
stock = stocks[0]
#print stocks
url = "https://www.google.com/finance?q="+stock[0]+"%3A"+stock[1]+"&fstype=ii"
#print url
df = pd.read_html(url, encoding=False)
#print len(df)
fin_files = {}
for k in data_to_extract.keys():
#print k
### get header string
x = df[k]
y = x.iloc[:,0:]
fields = getFields(y)
common_fields = ['symbol','date','period']
fields = common_fields + fields
header = ','.join(fields)
#print header
file_path = DATA_PATH + '\\' + data_to_extract[k] + '.csv'
print file_path
f = open(file_path, "wb")
f.write(header)
f.write('\r\n')
fin_files[data_to_extract[k]] = f
return fin_files
def closeFinFiles(fin_files):
for k in fin_files.keys():
fin_files[k].close()
return
# In[8]:
def getPeriodAndDate(column_value):
if isinstance(column_value, unicode):
column_value = column_value.encode('UTF-8')
column_value = re.sub('[\n]','',column_value) #remove special chars
size = len(column_value)
period = column_value[:size-11]
date = column_value[size-10:]
return (period,date)
# In[9]:
# In[ ]:
def gatherFinValueAsync(stock, fin_incomestatement, fin_balancesheet, fin_cashflow):
market = stock[0]
stockname = stock[1]
url = "https://www.google.com/finance?q="+market+"%3A"+stockname+"&fstype=ii"
print url
df = None
try:
df = pd.read_html(url, encoding=False)
except Exception:
print "Exception has been caught while processing [%s:%s]. No financial data."%(market, stockname)
if df is None:
return #skip the rest if there is no financial data for this stock
#print 'num of data is %d'%(len(df))
if len(df) <= 1:
print "Exception has been caught while processing [%s:%s]. No financial data."%(market, stockname)
return #skip the rest if there is no financial data for this stock
#data_to_extract : this is declared already
#columns_to_extract = [1,2,3,4] #from 2015 to 2012
for k in range(0, len(df)):
data = df[k]
columns_to_extract = range(1,len(data.columns.values)) # all data columns
for c in columns_to_extract:
#print getPeriodAndDate(data.columns.values[c])
tablename = getTableName(data)
(period, date) = getPeriodAndDate(data.columns.values[c])
#common fields
fields = [stockname, date, period]
#row = '%s,%s,%s'%(stockname, date, period)
#data fields
y = data.iloc[:,c:c+1]
for z in y.iloc[:].values:
try:
val = z[0].encode('UTF-8')
except AttributeError:
print z[0]
val = str(z[0])
#print val
if val == '-':
fields.append('NA')
else:
fields.append(val)
fields_str = ','.join(fields)
#print fields_str
if tablename == 'IncomeStatement':
fin_incomestatement.append(fields_str)
elif tablename == 'BalanceSheet':
fin_balancesheet.append(fields_str)
elif tablename == 'CashFlow':
fin_cashflow.append(fields_str)
return
def do_multirun(start, limit=10):
if limit <= 0:
limit = MAX_LIMIT
cpu_cnt = multiprocessing.cpu_count()
multi_factor = 2
print "count of cpu : %d"%(cpu_cnt)
m = multiprocessing.Manager()
fin_incomestatement = m.list()
fin_balancesheet = m.list()
fin_cashflow = m.list()
p = multiprocessing.Pool(cpu_cnt*multi_factor)
stocks = getSymbols(start, limit) #get every symbol
i = 300
for stock in stocks[:limit]:
p.apply_async(gatherFinValueAsync, [stock, fin_incomestatement, fin_balancesheet, fin_cashflow])
i = i - 1
if i == 0:
os.system('sleep 10')
i = 300
p.close()
p.join()
#res
fin_files = openFinFiles()
for r in fin_incomestatement:
fin_files[data_to_extract[1]].write(r+'\r\n')
for r in fin_balancesheet:
fin_files[data_to_extract[3]].write(r+'\r\n')
for r in fin_cashflow:
fin_files[data_to_extract[5]].write(r+'\r\n')
closeFinFiles(fin_files)
print "end of do_multirun"
return
def main():
run = 100
for x in range(0, 6300, run):
do_multirun(x, x+run-1)
os.system('sleep 10')
if __name__ == '__main__':
main()
| mit |
LICEF/edx-platform | docs/en_us/developers/source/conf.py | 30 | 6955 | # -*- coding: utf-8 -*-
# pylint: disable=C0103
# pylint: disable=W0622
# pylint: disable=W0212
# pylint: disable=W0613
import sys, os
from path import path
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append('../../../../')
from docs.shared.conf import *
# Add any paths that contain templates here, relative to this directory.
templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path.append('source/_static')
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root = path('../../../..').abspath()
sys.path.insert(0, root)
sys.path.append(root / "common/djangoapps")
sys.path.append(root / "common/lib")
sys.path.append(root / "common/lib/capa")
sys.path.append(root / "common/lib/chem")
sys.path.append(root / "common/lib/sandbox-packages")
sys.path.append(root / "common/lib/xmodule")
sys.path.append(root / "common/lib/opaque_keys")
sys.path.append(root / "lms/djangoapps")
sys.path.append(root / "lms/lib")
sys.path.append(root / "cms/djangoapps")
sys.path.append(root / "cms/lib")
sys.path.insert(0, os.path.abspath(os.path.normpath(os.path.dirname(__file__)
+ '/../../../')))
sys.path.append('.')
# django configuration - careful here
if on_rtd:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms.envs.test'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinxcontrib.napoleon']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# Output file base name for HTML help builder.
htmlhelp_basename = 'edXDocs'
project = u'edX Platform Developer Documentation'
copyright = u'2014, edX'
# --- Mock modules ------------------------------------------------------------
# Mock all the modules that the readthedocs build can't import
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
# The list of modules and submodules that we know give RTD trouble.
# Make sure you've tried including the relevant package in
# docs/share/requirements.txt before adding to this list.
MOCK_MODULES = [
'bson',
'bson.errors',
'bson.objectid',
'dateutil',
'dateutil.parser',
'fs',
'fs.errors',
'fs.osfs',
'lazy',
'mako',
'mako.template',
'matplotlib',
'matplotlib.pyplot',
'mock',
'numpy',
'oauthlib',
'oauthlib.oauth1',
'oauthlib.oauth1.rfc5849',
'PIL',
'pymongo',
'pyparsing',
'pysrt',
'requests',
'scipy.interpolate',
'scipy.constants',
'scipy.optimize',
'yaml',
'webob',
'webob.multidict',
]
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -----------------------------------------------------------------------------
# from http://djangosnippets.org/snippets/2533/
# autogenerate models definitions
import inspect
import types
from HTMLParser import HTMLParser
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), encoding, errors)
elif not isinstance(s, unicode):
s = unicode(s, encoding, errors)
return s
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def process_docstring(app, what, name, obj, options, lines):
"""Autodoc django models"""
# This causes import errors if left outside the function
from django.db import models
# If you want extract docs from django forms:
# from django import forms
# from django.forms.models import BaseInlineFormSet
# Only look at objects that inherit from Django's base MODEL class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
def setup(app):
"""Setup docsting processors"""
#Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| agpl-3.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| mit |
xuewei4d/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 28 | 3844 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure()
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10, edgecolors=(0, 0, 0))
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure()
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10, edgecolors=(0, 0, 0))
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure()
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
level = np.around(np.logspace(np.log10(vmin), np.log10(vmax), 50), decimals=1)
plt.contour(Theta0, Theta1, -LML,
levels=level, norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Modeling_and_Simulation_Examples_Static_Examples/Cosserat/plate_circular_hole/Cosserat_elasticity/plot.py | 5 | 1239 | import numpy as np
import matplotlib.pyplot as plt
import h5py
def h52stressStrain(h5in_filename):
h5in=h5py.File(h5in_filename,"r")
outputs_all=h5in['/Model/Elements/Gauss_Outputs'][()]
stress = outputs_all[16 , 1:-1]
strain = outputs_all[4 , 1:-1]
return [stress, strain]
[stress_load, strain_load] = h52stressStrain("vm_2shearing.h5.feioutput")
[stress_unload, strain_unload] = h52stressStrain("vm_3unloading.h5.feioutput")
[stress_reload, strain_reload] = h52stressStrain("vm_4reloading.h5.feioutput")
stress = np.concatenate((stress_load,stress_unload,stress_reload))
strain = np.concatenate((strain_load,strain_unload,strain_reload))
# plt.plot(strain, stress)
# plt.show()
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'xx-large',
'figure.figsize': (10, 8),
'axes.labelsize': 'xx-large',
'axes.titlesize':'xx-large',
'xtick.labelsize':'xx-large',
'ytick.labelsize':'xx-large'}
pylab.rcParams.update(params)
plt.plot(strain, stress, 'k', linewidth=3)
plt.xlabel('Strain [unitless]')
plt.ylabel('Stress [Pa]')
plt.title('Material Behavior: Stress-Strain')
plt.grid()
plt.box()
plt.savefig('result.pdf', transparent=True, bbox_inches='tight')
plt.show() | cc0-1.0 |
kwikadi/orange3 | Orange/regression/random_forest.py | 1 | 1280 | import sklearn.ensemble as skl_ensemble
from Orange.regression import SklLearner, SklModel
from Orange.data import Variable, ContinuousVariable
from Orange.preprocess.score import LearnerScorer
__all__ = ["RandomForestRegressionLearner"]
class _FeatureScorerMixin(LearnerScorer):
feature_type = Variable
class_type = ContinuousVariable
def score(self, model):
return model.skl_model.feature_importances_
class RandomForestRegressor(SklModel):
pass
class RandomForestRegressionLearner(SklLearner, _FeatureScorerMixin):
__wraps__ = skl_ensemble.RandomForestRegressor
__returns__ = RandomForestRegressor
name = 'random forest regression'
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
preprocessors=None):
super().__init__(preprocessors=preprocessors)
self.params = vars()
| bsd-2-clause |
ruohoruotsi/librosa | librosa/filters.py | 1 | 25845 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filters
=======
Filter bank construction
------------------------
.. autosummary::
:toctree: generated/
dct
mel
chroma
constant_q
Window functions
----------------
.. autosummary::
:toctree: generated/
window_bandwidth
get_window
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
constant_q_lengths
cq_to_chroma
"""
import warnings
import numpy as np
import scipy
import scipy.signal
import six
from . import cache
from . import util
from .util.exceptions import ParameterError
from .core.time_frequency import note_to_hz, hz_to_midi, hz_to_octs
from .core.time_frequency import fft_frequencies, mel_frequencies
__all__ = ['dct',
'mel',
'chroma',
'constant_q',
'constant_q_lengths',
'cq_to_chroma',
'window_bandwidth',
'get_window']
# Dictionary of window function bandwidths
WINDOW_BANDWIDTHS = {'bart': 1.3334961334912805,
'barthann': 1.4560255965133932,
'bartlett': 1.3334961334912805,
'bkh': 2.0045975283585014,
'black': 1.7269681554262326,
'blackharr': 2.0045975283585014,
'blackman': 1.7269681554262326,
'blackmanharris': 2.0045975283585014,
'blk': 1.7269681554262326,
'bman': 1.7859588613860062,
'bmn': 1.7859588613860062,
'bohman': 1.7859588613860062,
'box': 1.0,
'boxcar': 1.0,
'brt': 1.3334961334912805,
'brthan': 1.4560255965133932,
'bth': 1.4560255965133932,
'cosine': 1.2337005350199792,
'flat': 2.7762255046484143,
'flattop': 2.7762255046484143,
'flt': 2.7762255046484143,
'halfcosine': 1.2337005350199792,
'ham': 1.3629455320350348,
'hamm': 1.3629455320350348,
'hamming': 1.3629455320350348,
'han': 1.50018310546875,
'hann': 1.50018310546875,
'hanning': 1.50018310546875,
'nut': 1.9763500280946082,
'nutl': 1.9763500280946082,
'nuttall': 1.9763500280946082,
'ones': 1.0,
'par': 1.9174603174603191,
'parz': 1.9174603174603191,
'parzen': 1.9174603174603191,
'rect': 1.0,
'rectangular': 1.0,
'tri': 1.3331706523555851,
'triang': 1.3331706523555851,
'triangle': 1.3331706523555851}
@cache(level=10)
def dct(n_filters, n_input):
"""Discrete cosine transform (DCT type-III) basis.
.. [1] http://en.wikipedia.org/wiki/Discrete_cosine_transform
Parameters
----------
n_filters : int > 0 [scalar]
number of output components (DCT filters)
n_input : int > 0 [scalar]
number of input components (frequency bins)
Returns
-------
dct_basis: np.ndarray [shape=(n_filters, n_input)]
DCT (type-III) basis vectors [1]_
Notes
-----
This function caches at level 10.
Examples
--------
>>> n_fft = 2048
>>> dct_filters = librosa.filters.dct(13, 1 + n_fft // 2)
>>> dct_filters
array([[ 0.031, 0.031, ..., 0.031, 0.031],
[ 0.044, 0.044, ..., -0.044, -0.044],
...,
[ 0.044, 0.044, ..., -0.044, -0.044],
[ 0.044, 0.044, ..., 0.044, 0.044]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(dct_filters, x_axis='linear')
>>> plt.ylabel('DCT function')
>>> plt.title('DCT filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
basis = np.empty((n_filters, n_input))
basis[0, :] = 1.0 / np.sqrt(n_input)
samples = np.arange(1, 2*n_input, 2) * np.pi / (2.0 * n_input)
for i in range(1, n_filters):
basis[i, :] = np.cos(i*samples) * np.sqrt(2.0/n_input)
return basis
@cache(level=10)
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)))
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i+2] / fdiff[i+1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2:n_mels+2] - mel_f[:n_mels])
weights *= enorm[:, np.newaxis]
# Only check weights if f_mel[0] is positive
if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
# This means we have an empty channel somewhere
warnings.warn('Empty filters detected in mel frequency basis. '
'Some channels will produce empty responses. '
'Try increasing your sampling rate (and fmax) or '
'reducing n_mels.')
return weights
@cache(level=10)
def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0,
octwidth=2, norm=2, base_c=True):
"""Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Notes
-----
This function caches at level 10.
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(frequencies, A440)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1],
1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)),
(n_chroma, 1))
if base_c:
wts = np.roll(wts, -3, axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)])
def __float_window(window_spec):
'''Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
'''
def _wrap(n, *args, **kwargs):
'''The wrapped window'''
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = get_window(window_spec, n_min)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))],
mode='constant')
window[n_min:] = 0.0
return window
return _wrap
@cache(level=10)
def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0,
window='hann', filter_scale=1, pad_fft=True, norm=1,
**kwargs):
r'''Construct a constant-Q basis.
This uses the filter bank described by [1]_.
.. [1] McVicar, Matthew.
"A machine learning approach to automatic chord extraction."
Dissertation, University of Bristol. 2013.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : string, tuple, number, or function
Windowing function to apply to filters.
filter_scale : float > 0 [scalar]
Scale of filter windows.
Small values (<1) use shorter windows for higher temporal resolution.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the `mode=` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
kwargs : additional keyword arguments
Arguments to `np.pad()` when `pad==True`.
Returns
-------
filters : np.ndarray, `len(filters) == n_bins`
`filters[i]` is `i`\ th time-domain CQT basis filter
lengths : np.ndarray, `len(lengths) == n_bins`
The (fractional) length of each filter
Notes
-----
This function caches at level 10.
See Also
--------
constant_q_lengths
librosa.core.cqt
librosa.util.normalize
Examples
--------
Use a shorter window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, filter_scale=0.5)
Plot one octave of filters in time and frequency
>>> import matplotlib.pyplot as plt
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes[:12])):
... f_scale = librosa.util.normalize(f) / 2
... plt.plot(i + f_scale.real)
... plt.plot(i + f_scale.imag, linestyle=':')
>>> plt.axis('tight')
>>> plt.yticks(np.arange(len(notes[:12])), notes[:12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filters (one octave, time domain)')
>>> plt.xlabel('Time (samples at 22050 Hz)')
>>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)
>>> plt.subplot(2, 1, 2)
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear')
>>> plt.yticks(np.arange(len(notes))[::12], notes[::12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filter magnitudes (frequency domain)')
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
# Pass-through parameters to get the filter lengths
lengths = constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Convert lengths back to frequencies
freqs = Q * sr / lengths
# Build the filters
filters = []
for ilen, freq in zip(lengths, freqs):
# Build the filter: note, length will be ceil(ilen)
sig = np.exp(np.arange(ilen, dtype=float) * 1j * 2 * np.pi * freq / sr)
# Apply the windowing function
sig = sig * __float_window(window)(ilen)
# Normalize
sig = util.normalize(sig, norm=norm)
filters.append(sig)
# Pad and stack
max_len = max(lengths)
if pad_fft:
max_len = int(2.0**(np.ceil(np.log2(max_len))))
else:
max_len = int(np.ceil(max_len))
filters = np.asarray([util.pad_center(filt, max_len, **kwargs)
for filt in filters])
return filters, np.asarray(lengths)
@cache(level=10)
def constant_q_lengths(sr, fmin, n_bins=84, bins_per_octave=12,
tuning=0.0, window='hann', filter_scale=1):
r'''Return length of each filter in a constant-Q basis.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : str or callable
Window function to use on filters
filter_scale : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
Notes
-----
This function caches at level 10.
See Also
--------
constant_q
librosa.core.cqt
'''
if fmin <= 0:
raise ParameterError('fmin must be positive')
if bins_per_octave <= 0:
raise ParameterError('bins_per_octave must be positive')
if filter_scale <= 0:
raise ParameterError('filter_scale must be positive')
if n_bins <= 0 or not isinstance(n_bins, int):
raise ParameterError('n_bins must be a positive integer')
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Compute the frequencies
freq = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
if freq[-1] * (1 + 0.5 * window_bandwidth(window) / Q) > sr / 2.0:
raise ParameterError('Filter pass-band lies beyond Nyquist')
# Convert frequencies to filter lengths
lengths = Q * sr / freq
return lengths
@cache(level=10)
def cq_to_chroma(n_input, bins_per_octave=12, n_chroma=12,
fmin=None, window=None, base_c=True):
'''Convert a Constant-Q basis to Chroma.
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with `window`.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: `Chroma = np.dot(cq_to_chroma, CQT)`
Raises
------
ParameterError
If `n_input` is not an integer multiple of `n_chroma`
Notes
-----
This function caches at level 10.
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = librosa.cqt(y, sr=sr)
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(CQT,
... ref=np.max),
... y_axis='cqt_note')
>>> plt.title('CQT Power')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(chromagram, y_axis='chroma')
>>> plt.title('Chroma (wrapped CQT)')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.title('librosa.feature.chroma_stft')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
# How many fractional bins are we merging?
n_merge = float(bins_per_octave) / n_chroma
if fmin is None:
fmin = note_to_hz('C1')
if np.mod(n_merge, 1) != 0:
raise ParameterError('Incompatible CQ merge: '
'input bins must be an '
'integer multiple of output bins.')
# Tile the identity to merge fractional bins
cq_to_ch = np.repeat(np.eye(n_chroma), n_merge, axis=1)
# Roll it left to center on the target bin
cq_to_ch = np.roll(cq_to_ch, - int(n_merge // 2), axis=1)
# How many octaves are we repeating?
n_octaves = np.ceil(np.float(n_input) / bins_per_octave)
# Repeat and trim
cq_to_ch = np.tile(cq_to_ch, int(n_octaves))[:, :n_input]
# What's the note number of the first bin in the CQT?
# midi uses 12 bins per octave here
midi_0 = np.mod(hz_to_midi(fmin), 12)
if base_c:
# rotate to C
roll = midi_0
else:
# rotate to A
roll = midi_0 - 9
# Adjust the roll in terms of how many chroma we want out
# We need to be careful with rounding here
roll = int(np.round(roll * (n_chroma / 12.)))
# Apply the roll
cq_to_ch = np.roll(cq_to_ch, roll, axis=0).astype(float)
if window is not None:
cq_to_ch = scipy.signal.convolve(cq_to_ch,
np.atleast_2d(window),
mode='same')
return cq_to_ch
@cache(level=10)
def window_bandwidth(window, n=1000):
'''Get the equivalent noise bandwidth of a window function.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
n : int > 0
The number of coefficients to use in estimating the
window bandwidth
Returns
-------
bandwidth : float
The equivalent noise bandwidth (in FFT bins) of the
given window function
Notes
-----
This function caches at level 10.
See Also
--------
get_window
'''
if hasattr(window, '__name__'):
key = window.__name__
else:
key = window
if key not in WINDOW_BANDWIDTHS:
win = get_window(window, n)
WINDOW_BANDWIDTHS[key] = n * np.sum(win**2) / np.sum(np.abs(win))**2
return WINDOW_BANDWIDTHS[key]
@cache(level=10)
def get_window(window, Nx, fftbins=True):
'''Compute a window function.
This is a wrapper for `scipy.signal.get_window` that additionally
supports callable or pre-computed windows.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window specification:
- If string, it's the name of the window function (e.g., `'hann'`)
- If tuple, it's the name of the window function and any parameters
(e.g., `('kaiser', 4.0)`)
- If numeric, it is treated as the beta parameter of the `'kaiser'`
window, as in `scipy.signal.get_window`.
- If callable, it's a function that accepts one integer argument
(the window length)
- If list-like, it's a pre-computed window of the correct length `Nx`
Nx : int > 0
The length of the window
fftbins : bool, optional
If True (default), create a periodic window for use with FFT
If False, create a symmetric window for filter design applications.
Returns
-------
get_window : np.ndarray
A window of length `Nx` and type `window`
See Also
--------
scipy.signal.get_window
Notes
-----
This function caches at level 10.
Raises
------
ParameterError
If `window` is supplied as a vector of length != `n_fft`,
or is otherwise mis-specified.
'''
if six.callable(window):
return window(Nx)
elif (isinstance(window, (six.string_types, tuple)) or
np.isscalar(window)):
# TODO: if we add custom window functions in librosa, call them here
return scipy.signal.get_window(window, Nx, fftbins=fftbins)
elif isinstance(window, (np.ndarray, list)):
if len(window) == Nx:
return np.asarray(window)
raise ParameterError('Window size mismatch: '
'{:d} != {:d}'.format(len(window), Nx))
else:
raise ParameterError('Invalid window specification: {}'.format(window))
| isc |
christinahedges/PyKE | pyke/kepoutlier.py | 2 | 14393 | from .utils import PyKEArgumentHelpFormatter
import numpy as np
from astropy.io import fits as pyfits
from matplotlib import pyplot as plt
from tqdm import tqdm
from . import kepio, kepmsg, kepkey, kepfit, kepstat, kepfunc
__all__ = ['kepoutlier']
def kepoutlier(infile, outfile=None, datacol='SAP_FLUX', nsig=3.0, stepsize=1.0,
npoly=3, niter=1, operation='remove', ranges='0,0', plot=False,
plotfit=False, overwrite=False, verbose=False,
logfile='kepoutlier.log'):
"""
kepoutlier -- Remove or replace statistical outliers from time series data
kepoutlier identifies data outliers relative to piecemeal best-fit
polynomials. Outliers are either removed from the output time series or
replaced by a noise-treated value defined by the polynomial fit. Identified
outliers and the best fit functions are optionally plotted for inspection
purposes.
Parameters
----------
infile : str
The name of a MAST standard format FITS file containing a Kepler light
curve within the first data extension.
outfile : str
The name of the output FITS file. ``outfile`` will be direct copy of
infile with either data outliers removed (i.e. the table will have
fewer rows) or the outliers will be corrected according to a best-fit
function and a noise model.
datacol : str
The column name containing data stored within extension 1 of infile.
This data will be searched for outliers. Typically this name is
SAP_FLUX (Simple Aperture Photometry fluxes) or PDCSAP_FLUX (Pre-search
Data Conditioning fluxes).
nsig : float
The sigma clipping threshold. Data deviating from a best fit function
by more than the threshold will be either removed or corrected
according to the user selection of operation.
stepsize : float
The data within datacol is unlikely to be well represented by a single
polynomial function. stepsize splits the data up into a series of time
blocks, each is fit independently by a separate function. The user can
provide an informed choice of stepsize after inspecting the data with
the kepdraw tool. Units are days.
npoly : int
The polynomial order of each best-fit function.
niter : int
If outliers are found in a particular data section, that data will be
removed temporarily and the time series fit again. This will be
iterated niter times before freezing upon the best available fit.
operation : str
* ``remove`` throws away outliers. The output data table will smaller
or equal in size to the input table.
* ``replace`` replaces outliers with a value that is consistent with
the best-fit polynomial function and a random component defined by the
rms of the data relative to the fit and calculated using the inverse
normal cumulative function and a random number generator.
ranges : str
The user can choose specific time ranges of data on which to work. This
could, for example, avoid removing known stellar flares from a dataset.
Time ranges are supplied as comma-separated pairs of Barycentric Julian
Dates (BJDs). Multiple ranges are separated by a semi-colon. An example
containing two time ranges is::
'2455012.48517,2455014.50072;2455022.63487,2455025.08231'
If the user wants to correct the entire time series then providing
``ranges = '0,0'`` will tell the task to operate on the whole time series.
plot : bool
Plot the data and outliers?
plotfit : bool
Overlay the polynomial fits upon the plot?
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ kepoutlier kplr002437329-2010355172524_llc.fits --datacol SAP_FLUX
--nsig 4 --stepsize 5 --npoly 2 --niter 10 --operation replace
--verbose --plot --plotfit
.. image:: ../_static/images/api/kepoutlier.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPOUTLIER -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' datacol={}'.format(datacol)
+ ' nsig={}'.format(nsig)
+ ' stepsize={}'.format(stepsize)
+ ' npoly={}'.format(npoly)
+ ' niter={}'.format(niter)
+ ' operation={}'.format(operation)
+ ' ranges={}'.format(ranges)
+ ' plot={}'.format(plot)
+ ' plotfit={}'.format(plotfit)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPOUTLIER started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPOUTLIER: {} exists. Use overwrite=True'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# open input file
instr = pyfits.open(infile)
tstart, tstop, bjdref, cadence = kepio.timekeys(instr, infile, logfile,
verbose)
try:
work = instr[0].header['FILEVER']
cadenom = 1.0
except:
cadenom = cadence
# fudge non-compliant FITS keywords with no values
instr = kepkey.emptykeys(instr, infile, logfile, verbose)
# read table structure
table = kepio.readfitstab(infile, instr[1], logfile, verbose)
# filter input data table
try:
nanclean = instr[1].header['NANCLEAN']
except:
time = kepio.readtimecol(infile, table, logfile, verbose)
flux = kepio.readfitscol(infile, table, datacol, logfile, verbose)
finite_data_mask = np.isfinite(time) & np.isfinite(flux) & (flux != 0)
table = table[finite_data_mask]
instr[1].data = table
comment = 'NaN cadences removed from data'
kepkey.new('NANCLEAN', True, comment, instr[1], outfile, logfile,
verbose)
# read table columns
try:
intime = instr[1].data.field('barytime') + 2.4e6
except:
intime = kepio.readfitscol(infile, instr[1].data, 'time', logfile,
verbose)
indata = kepio.readfitscol(infile, instr[1].data, datacol, logfile,
verbose)
intime = intime + bjdref
indata = indata / cadenom
# time ranges for region to be corrected
t1, t2 = kepio.timeranges(ranges, logfile, verbose)
cadencelis = kepstat.filterOnRange(intime, t1, t2)
# find limits of each time step
tstep1, tstep2 = [], []
work = intime[0]
while work < intime[-1]:
tstep1.append(work)
tstep2.append(np.array([work + stepsize, intime[-1]],
dtype='float64').min())
work += stepsize
# find cadence limits of each time step
cstep1, cstep2 = [], []
work1 = 0
work2 = 0
for i in range(len(intime)):
if intime[i] >= intime[work1] and intime[i] < intime[work1] + stepsize:
work2 = i
else:
cstep1.append(work1)
cstep2.append(work2)
work1 = i
work2 = i
cstep1.append(work1)
cstep2.append(work2)
outdata = indata * 1.0
# comment keyword in output file
kepkey.history(call, instr[0], outfile, logfile, verbose)
# clean up x-axis unit
intime0 = (tstart // 100) * 100.0
ptime = intime - intime0
xlab = 'BJD $-$ {}'.format(intime0)
# clean up y-axis units
pout = indata * 1.0
nrm = len(str(int(pout.max())))-1
pout = pout / 10**nrm
ylab = '10$^%d$ e$^-$ s$^{-1}$' % nrm
# data limits
xmin = ptime.min()
xmax = ptime.max()
ymin = pout.min()
ymax = pout.max()
xr = xmax - xmin
yr = ymax - ymin
ptime = np.insert(ptime, [0], [ptime[0]])
ptime = np.append(ptime, [ptime[-1]])
pout = np.insert(pout, [0], [0.0])
pout = np.append(pout, 0.0)
# plot light curve
if plot:
plt.figure()
plt.clf()
# plot data
ax = plt.axes([0.06, 0.1, 0.93, 0.87])
# force tick labels to be absolute rather than relative
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.plot(ptime, pout, color='#0000ff', linestyle='-', linewidth=1.0)
plt.fill(ptime, pout, color='#ffff00', linewidth=0.0, alpha=0.2)
plt.xlabel(xlab, {'color' : 'k'})
plt.ylabel(ylab, {'color' : 'k'})
plt.grid()
# loop over each time step, fit data, determine rms
masterfit = indata * 0.0
mastersigma = np.zeros(len(masterfit))
functype = getattr(kepfunc, 'poly' + str(npoly))
for i in range(len(cstep1)):
pinit = [indata[cstep1[i]:cstep2[i]+1].mean()]
if npoly > 0:
for j in range(npoly):
pinit.append(0.0)
pinit = np.array(pinit, dtype='float32')
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty = \
kepfit.lsqclip(functype, pinit,
intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]],
indata[cstep1[i]:cstep2[i]+1], None, nsig,
nsig, niter, logfile, verbose)
for j in range(len(coeffs)):
masterfit[cstep1[i]: cstep2[i] + 1] += (coeffs[j]
* (intime[cstep1[i]:cstep2[i]+1] - intime[cstep1[i]]) ** j)
for j in range(cstep1[i], cstep2[i] + 1):
mastersigma[j] = sigma
if plotfit:
plt.plot(plotx + intime[cstep1[i]] - intime0, ploty / 10 ** nrm,
'g', lw=3)
except:
for j in range(cstep1[i], cstep2[i] + 1):
masterfit[j] = indata[j]
mastersigma[j] = 1.0e10
message = ('WARNING -- KEPOUTLIER: could not fit range '
+ str(intime[cstep1[i]]) + '-' + str(intime[cstep2[i]]))
kepmsg.warn(logfile, message, verbose)
# reject outliers
rejtime, rejdata = [], []
naxis2 = 0
for i in tqdm(range(len(masterfit))):
if (abs(indata[i] - masterfit[i]) > nsig * mastersigma[i]
and i in cadencelis):
rejtime.append(intime[i])
rejdata.append(indata[i])
if operation == 'replace':
[rnd] = kepstat.randarray([masterfit[i]], [mastersigma[i]])
table[naxis2] = table[i]
table.field(datacol)[naxis2] = rnd
naxis2 += 1
else:
table[naxis2] = table[i]
naxis2 += 1
instr[1].data = table[:naxis2]
if plot:
rejtime = np.array(rejtime, dtype='float64')
rejdata = np.array(rejdata, dtype='float32')
plt.plot(rejtime - intime0, rejdata / 10 ** nrm, 'ro')
# plot ranges
plt.xlim(xmin - xr * 0.01, xmax + xr * 0.01)
if ymin >= 0.0:
plt.ylim(ymin - yr * 0.01, ymax + yr * 0.01)
else:
plt.ylim(1.0e-10, ymax + yr * 0.01)
# render plot
plt.show()
# write output file
print("Writing output file {}...".format(outfile))
instr.writeto(outfile)
# close input file
instr.close()
kepmsg.clock('KEPOUTLIER completed at', logfile, verbose)
def kepoutlier_main():
import argparse
parser = argparse.ArgumentParser(
description='Remove or replace data outliers from a time series',
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input file', type=str)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-kepoutlier.'),
default=None)
parser.add_argument('--datacol', default='SAP_FLUX',
help='Name of data column to plot', type=str)
parser.add_argument('--nsig', default=3.,
help='Sigma clipping threshold for outliers',
type=float)
parser.add_argument('--stepsize', default=1.0,
help='Stepsize on which to fit data [days]',
type=float)
parser.add_argument('--npoly', default=3,
help='Polynomial order for each fit', type=int)
parser.add_argument('--niter', default=1,
help='Maximum number of clipping iterations', type=int)
parser.add_argument('--operation', default='remove',
help='Remove or replace outliers?', type=str,
choices=['replace','remove'])
parser.add_argument('--ranges', default='0,0',
help='Time ranges of regions to filter', type=str)
parser.add_argument('--plot', action='store_true', help='Plot result?')
parser.add_argument('--plotfit', action='store_true',
help='Plot fit over results?')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='kepoutlier.log', dest='logfile', type=str)
args = parser.parse_args()
kepoutlier(args.infile, args.outfile, args.datacol, args.nsig,
args.stepsize, args.npoly,args.niter, args.operation,
args.ranges, args.plot, args.plotfit, args.overwrite,
args.verbose, args.logfile)
| mit |
nhuntwalker/astroML | book_figures/chapter3/fig_transform_distribution.py | 3 | 2529 | r"""
Transformation of Distribution
------------------------------
Figure 3.4.
An example of transforming a uniform distribution. In the left panel, x
is sampled from a uniform distribution of unit width centered on x = 0.5
(:math:`\mu` = 0 and W = 1; see Section 3.3.1). In the right panel,
the distribution is transformed via y = exp(x). The form of the resulting
pdf is computed from eq. 3.20.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up the data
np.random.seed(0)
# create a uniform distribution
uniform_dist = stats.uniform(0, 1)
x_sample = uniform_dist.rvs(1000)
x = np.linspace(-0.5, 1.5, 1000)
Px = uniform_dist.pdf(x)
# transform the data
y_sample = np.exp(x_sample)
y = np.exp(x)
Py = Px / y
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(left=0.11, right=0.95, wspace=0.3, bottom=0.17, top=0.9)
ax = fig.add_subplot(121)
ax.hist(x_sample, 20, histtype='stepfilled', fc='#CCCCCC', normed=True)
ax.plot(x, Px, '-k')
ax.set_xlim(-0.2, 1.2)
ax.set_ylim(0, 1.4001)
ax.xaxis.set_major_locator(plt.MaxNLocator(6))
ax.text(0.95, 0.95, r'$p_x(x) = {\rm Uniform}(x)$',
va='top', ha='right',
transform=ax.transAxes)
ax.set_xlabel('$x$')
ax.set_ylabel('$p_x(x)$')
ax = fig.add_subplot(122)
ax.hist(y_sample, 20, histtype='stepfilled', fc='#CCCCCC', normed=True)
ax.plot(y, Py, '-k')
ax.set_xlim(0.85, 2.9)
ax.xaxis.set_major_locator(plt.MaxNLocator(6))
ax.text(0.95, 0.95, '$y=\exp(x)$\n$p_y(y)=p_x(\ln y) / y$',
va='top', ha='right',
transform=ax.transAxes)
ax.set_xlabel('$y$')
ax.set_ylabel('$p_y(y)$')
plt.show()
| bsd-2-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_Rot_inst/Geneva_Rot_inst_age0/peaks_reader.py | 33 | 2761 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
#input files
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
# ---------------------------------------------------
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
savetxt('peaks', max_values, delimiter='\t')
| gpl-2.0 |
gtrdp/twitter-clustering | old/clustering-dbscan.py | 1 | 2437 | from __future__ import print_function
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import feedparser
import pandas as pd
from sklearn.cluster import DBSCAN
from sklearn import metrics
import numpy as np
if __name__ == "__main__":
# read the data, preprocessing involves:
# - removing URLS, special characters
# - all to small letters
# read from precrawled twitter tweets
print("reading the files, stemming, and stopwords removal")
raw_data = pd.read_csv('output.csv')
# replace URL
raw_data = raw_data.replace(
['http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', "&", "\[pic\]"],
['', '', ''], regex=True)
users_long = raw_data['user'].tolist()
texts_long = [x.lower() for x in raw_data['text'].tolist()]
users = users_long
texts = texts_long
# # read from tempo.co rss
# tempo_data = feedparser.parse('tempo.xml')
#
# users = []
# texts = []
# for value in tempo_data['entries']:
# users.append(value['title'])
# texts.append(value['summary'])
# =============== end reading data =========================
# tfidf
# define vectorizer parameters (tuning parameters)
stopwords_english = nltk.corpus.stopwords.words('english')
with open("stopword_list_tala.txt", "r") as f:
stopwords_indonesian = f.read().splitlines()
print("TFIDF Vectorizer")
tfidf_vectorizer = TfidfVectorizer(max_df=0.5, max_features=10000,
min_df=2, stop_words=stopwords_english + stopwords_indonesian,
use_idf=True)
# uni-gram bi-gram tri-gram: separated and compare the result
# kcm and fcm clustering
# using dbi
# jaccard and cosine similarity
tfidf_matrix = tfidf_vectorizer.fit_transform(texts) # fit the vectorizer
terms = tfidf_vectorizer.get_feature_names()
dist = 1 - cosine_similarity(tfidf_matrix)
# print(len(terms))
print(tfidf_matrix.shape)
# print(len(dist[0]))
print("beginning clustering...")
db = DBSCAN(eps=0.7, min_samples=10).fit(tfidf_matrix)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(tfidf_matrix, labels))
| mit |
luo66/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/metrics/metrics.py | 233 | 1262 | import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
| bsd-3-clause |
takat0m0/test_code | test_pymc/linear.py | 1 | 1421 | # -*- coding:utf-8 -*-
import os
import sys
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
if __name__ == '__main__':
num_sample = 100
x = np.random.uniform(-10.0, 10, num_sample)
y = 3.5 * x + 2.2 + np.random.uniform(-5.0, 5.0, num_sample)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x, y)
plt.savefig('test.png')
model = pm.Model()
with model:
a = pm.Normal('a', mu = 0, sd = 5.0)
b = pm.Normal('b', mu = 0, sd = 5.0)
sigma = pm.HalfNormal('sigma', sd = 1.0)
mu = a * x + b
obs = pm.Normal('y', mu = mu, sd = sigma, observed = y)
with model:
trace = pm.sample(10, tune = 100, step = pm.NUTS())
print(trace['a'])
inputs = np.asarray([0.1 * i - 20 for i in range(400)])
lower = []
upper = []
for x in inputs:
tmp = []
for i in range(10):
a = trace['a'][i]
b = trace['b'][i]
sigma = trace['sigma'][i]
tmp.append(np.random.normal(a * x + b, sigma, 100))
tmp = np.asarray(tmp)
mean, var = np.mean(tmp), np.var(tmp)
lower.append(mean - 3 * var)
upper.append(mean + 3 * var)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.fill_between(inputs, upper, lower, alpha = 0.5)
plt.savefig('variance.png')
| mit |
mattilyra/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
msarahan/bokeh | bokeh/charts/conftest.py | 12 | 1484 | """Defines chart-wide shared test fixtures."""
import numpy as np
import pandas as pd
import pytest
from bokeh.sampledata.autompg import autompg
class TestData(object):
"""Contains properties with easy access to data used across tests."""
def __init__(self):
self.cat_list = ['a', 'c', 'a', 'b']
self.list_data = [[1, 2, 3, 4], [2, 3, 4, 5]]
self.array_data = [np.array(item) for item in self.list_data]
self.dict_data = {'col1': self.list_data[0],
'col2': self.list_data[1]}
self.pd_data = pd.DataFrame(self.dict_data)
self.records_data = self.pd_data.to_dict(orient='records')
self.auto_data = autompg
self._setup_auto_mpg()
def _setup_auto_mpg(self):
# add a boolean column
self.auto_data['large_displ'] = self.auto_data['displ'] > 350
# add categorical column
cat = pd.Categorical.from_array(self.auto_data['cyl'])
new_order = list(reversed(sorted(cat.categories.values.tolist())))
self.auto_data['reversed_cyl'] = cat.reorder_categories(new_order)
@pytest.fixture(scope='module')
def test_data():
return TestData()
@pytest.fixture(scope='module')
def wide_data_with_cat(test_data):
data = test_data.dict_data.copy()
data['col3'] = test_data.cat_list
return data
@pytest.fixture(scope='module')
def df_with_cat_index(test_data):
return pd.DataFrame(test_data.dict_data, index=test_data.cat_list)
| bsd-3-clause |
ingokegel/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_xml.py | 9 | 13206 | """Contains methods for building XML structures for interacting with IDE
The methods from this file are used for the debugger interaction. Please note
that Python console now uses Thrift structures with the similar methods
contained in `pydevd_thrift.py` file.
"""
import sys
import traceback
from _pydev_bundle import pydev_log
from _pydev_bundle.pydev_imports import quote
from _pydevd_bundle import pydevd_extension_utils
from _pydevd_bundle import pydevd_resolver
from _pydevd_bundle.pydevd_constants import dict_iter_items, dict_keys, IS_PY3K, \
MAXIMUM_VARIABLE_REPRESENTATION_SIZE, RETURN_VALUES_DICT, LOAD_VALUES_POLICY, DEFAULT_VALUES_DICT
from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider, StrPresentationProvider
from _pydevd_bundle.pydevd_utils import take_first_n_coll_elements, is_pandas_container, is_string, pandas_to_str, \
should_evaluate_full_value, should_evaluate_shape
try:
import types
frame_type = types.FrameType
except:
frame_type = None
def make_valid_xml_value(s):
# Same thing as xml.sax.saxutils.escape but also escaping double quotes.
return s.replace("&", "&").replace('<', '<').replace('>', '>').replace('"', '"')
class ExceptionOnEvaluate:
def __init__(self, result):
self.result = result
_IS_JYTHON = sys.platform.startswith("java")
def _create_default_type_map():
if not _IS_JYTHON:
default_type_map = [
# None means that it should not be treated as a compound variable
# isintance does not accept a tuple on some versions of python, so, we must declare it expanded
(type(None), None,),
(int, None),
(float, None),
(complex, None),
(str, None),
(tuple, pydevd_resolver.tupleResolver),
(list, pydevd_resolver.tupleResolver),
(dict, pydevd_resolver.dictResolver),
]
try:
default_type_map.append((long, None)) # @UndefinedVariable
except:
pass # not available on all python versions
try:
default_type_map.append((unicode, None)) # @UndefinedVariable
except:
pass # not available on all python versions
try:
default_type_map.append((set, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
default_type_map.append((frozenset, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
from django.utils.datastructures import MultiValueDict
default_type_map.insert(0, (MultiValueDict, pydevd_resolver.multiValueDictResolver))
# we should put it before dict
except:
pass # django may not be installed
try:
from django.forms import BaseForm
default_type_map.insert(0, (BaseForm, pydevd_resolver.djangoFormResolver))
# we should put it before instance resolver
except:
pass # django may not be installed
try:
from collections import deque
default_type_map.append((deque, pydevd_resolver.dequeResolver))
except:
pass
try:
from collections import OrderedDict
default_type_map.insert(0, (OrderedDict, pydevd_resolver.orderedDictResolver))
# we should put it before dict
except:
pass
if frame_type is not None:
default_type_map.append((frame_type, pydevd_resolver.frameResolver))
else:
from org.python import core # @UnresolvedImport
default_type_map = [
(core.PyNone, None),
(core.PyInteger, None),
(core.PyLong, None),
(core.PyFloat, None),
(core.PyComplex, None),
(core.PyString, None),
(core.PyTuple, pydevd_resolver.tupleResolver),
(core.PyList, pydevd_resolver.tupleResolver),
(core.PyDictionary, pydevd_resolver.dictResolver),
(core.PyStringMap, pydevd_resolver.dictResolver),
]
if hasattr(core, 'PyJavaInstance'):
# Jython 2.5b3 removed it.
default_type_map.append((core.PyJavaInstance, pydevd_resolver.instanceResolver))
return default_type_map
class TypeResolveHandler(object):
NO_PROVIDER = [] # Sentinel value (any mutable object to be used as a constant would be valid).
def __init__(self):
# Note: don't initialize with the types we already know about so that the extensions can override
# the default resolvers that are already available if they want.
self._type_to_resolver_cache = {}
self._type_to_str_provider_cache = {}
self._initialized = False
def _initialize(self):
self._default_type_map = _create_default_type_map()
self._resolve_providers = pydevd_extension_utils.extensions_of_type(TypeResolveProvider)
self._str_providers = pydevd_extension_utils.extensions_of_type(StrPresentationProvider)
self._initialized = True
def get_type(self, o):
try:
try:
# Faster than type(o) as we don't need the function call.
type_object = o.__class__
except:
# Not all objects have __class__ (i.e.: there are bad bindings around).
type_object = type(o)
type_name = type_object.__name__
except:
# This happens for org.python.core.InitModule
return 'Unable to get Type', 'Unable to get Type', None
return self._get_type(o, type_object, type_name)
def _get_type(self, o, type_object, type_name):
resolver = self._type_to_resolver_cache.get(type_object)
if resolver is not None:
return type_object, type_name, resolver
if not self._initialized:
self._initialize()
try:
for resolver in self._resolve_providers:
if resolver.can_provide(type_object, type_name):
# Cache it
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
for t in self._default_type_map:
if isinstance(o, t[0]):
# Cache it
resolver = t[1]
self._type_to_resolver_cache[type_object] = resolver
return (type_object, type_name, resolver)
except:
traceback.print_exc()
# No match return default (and cache it).
resolver = pydevd_resolver.defaultResolver
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
if _IS_JYTHON:
_base_get_type = _get_type
def _get_type(self, o, type_object, type_name):
if type_name == 'org.python.core.PyJavaInstance':
return type_object, type_name, pydevd_resolver.instanceResolver
if type_name == 'org.python.core.PyArray':
return type_object, type_name, pydevd_resolver.jyArrayResolver
return self._base_get_type(o, type_name, type_name)
def str_from_providers(self, o, type_object, type_name):
provider = self._type_to_str_provider_cache.get(type_object)
if provider is self.NO_PROVIDER:
return None
if provider is not None:
return provider.get_str(o)
if not self._initialized:
self._initialize()
for provider in self._str_providers:
if provider.can_provide(type_object, type_name):
self._type_to_str_provider_cache[type_object] = provider
return provider.get_str(o)
self._type_to_str_provider_cache[type_object] = self.NO_PROVIDER
return None
_TYPE_RESOLVE_HANDLER = TypeResolveHandler()
"""
def get_type(o):
Receives object and returns a triple (typeObject, typeString, resolver).
resolver != None means that variable is a container, and should be displayed as a hierarchy.
Use the resolver to get its attributes.
All container objects should have a resolver.
"""
get_type = _TYPE_RESOLVE_HANDLER.get_type
_str_from_providers = _TYPE_RESOLVE_HANDLER.str_from_providers
def frame_vars_to_xml(frame_f_locals, hidden_ns=None):
""" dumps frame variables to XML
<var name="var_name" scope="local" type="type" value="value"/>
"""
xml = ""
keys = dict_keys(frame_f_locals)
if hasattr(keys, 'sort'):
keys.sort() # Python 3.0 does not have it
else:
keys = sorted(keys) # Jython 2.1 does not have it
return_values_xml = ''
for k in keys:
try:
v = frame_f_locals[k]
eval_full_val = should_evaluate_full_value(v)
if k == RETURN_VALUES_DICT:
for name, val in dict_iter_items(v):
return_values_xml += var_to_xml(val, name, additional_in_xml=' isRetVal="True"')
else:
if hidden_ns is not None and k in hidden_ns:
xml += var_to_xml(v, str(k), additional_in_xml=' isIPythonHidden="True"',
evaluate_full_value=eval_full_val)
else:
xml += var_to_xml(v, str(k), evaluate_full_value=eval_full_val)
except Exception:
traceback.print_exc()
pydev_log.error("Unexpected error, recovered safely.\n")
# Show return values as the first entry.
return return_values_xml + xml
def var_to_xml(val, name, doTrim=True, additional_in_xml='', evaluate_full_value=True, format='%s'):
""" single variable or dictionary to xml representation """
try:
# This should be faster than isinstance (but we have to protect against not having a '__class__' attribute).
is_exception_on_eval = val.__class__ == ExceptionOnEvaluate
except:
is_exception_on_eval = False
if is_exception_on_eval:
v = val.result
else:
v = val
_type, typeName, resolver = get_type(v)
type_qualifier = getattr(_type, "__module__", "")
if not evaluate_full_value:
value = DEFAULT_VALUES_DICT[LOAD_VALUES_POLICY]
else:
try:
str_from_provider = _str_from_providers(v, _type, typeName)
if str_from_provider is not None:
value = str_from_provider
elif hasattr(v, '__class__'):
if v.__class__ == frame_type:
value = pydevd_resolver.frameResolver.get_frame_name(v)
elif v.__class__ in (list, tuple, set, frozenset, dict):
if len(v) > pydevd_resolver.MAX_ITEMS_TO_HANDLE:
value = '%s' % take_first_n_coll_elements(v, pydevd_resolver.MAX_ITEMS_TO_HANDLE)
value = value.rstrip(')]}') + '...'
else:
value = '%s' % v
else:
value = format % v
else:
value = str(v)
except:
try:
value = repr(v)
except:
value = 'Unable to get repr for %s' % v.__class__
try:
name = quote(name, '/>_= ') # TODO: Fix PY-5834 without using quote
except:
pass
xml = '<var name="%s" type="%s" ' % (make_valid_xml_value(name), make_valid_xml_value(typeName))
if type_qualifier:
xml_qualifier = 'qualifier="%s"' % make_valid_xml_value(type_qualifier)
else:
xml_qualifier = ''
# cannot be too big... communication may not handle it.
if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE and doTrim:
value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]
value += '...'
# fix to work with unicode values
try:
if not IS_PY3K:
if value.__class__ == unicode: # @UndefinedVariable
value = value.encode('utf-8')
else:
if value.__class__ == bytes:
value = value.encode('utf-8')
except TypeError: # in java, unicode is a function
pass
if is_pandas_container(type_qualifier, typeName, v):
value = pandas_to_str(v, typeName, value, pydevd_resolver.MAX_ITEMS_TO_HANDLE)
xml_value = ' value="%s"' % (make_valid_xml_value(quote(value, '/>_= ')))
xml_shape = ''
try:
if should_evaluate_shape():
if hasattr(v, 'shape') and not callable(v.shape):
xml_shape = ' shape="%s"' % make_valid_xml_value(str(tuple(v.shape)))
elif hasattr(v, '__len__') and not is_string(v):
xml_shape = ' shape="%s"' % make_valid_xml_value("%s" % str(len(v)))
except:
pass
if is_exception_on_eval:
xml_container = ' isErrorOnEval="True"'
else:
if resolver is not None:
xml_container = ' isContainer="True"'
else:
xml_container = ''
return ''.join((xml, xml_qualifier, xml_value, xml_container, xml_shape, additional_in_xml, ' />\n'))
| apache-2.0 |
rdeits/director | src/python/ddapp/segmentation.py | 4 | 160980 | import os
import sys
import math
import vtk
import colorsys
import time
import functools
import traceback
import PythonQt
from PythonQt import QtCore, QtGui
import ddapp.applogic as app
from ddapp import objectmodel as om
from ddapp import perception
from ddapp import lcmUtils
from ddapp import roboturdf
from ddapp import transformUtils
from ddapp import visualization as vis
from ddapp.transformUtils import getTransformFromAxes
from ddapp.timercallback import TimerCallback
from ddapp import mapsregistrar
from ddapp import affordancemanager
from ddapp.affordanceitems import *
from ddapp.visualization import *
from ddapp.filterUtils import *
from ddapp.fieldcontainer import FieldContainer
from ddapp.segmentationroutines import *
from ddapp import cameraview
import numpy as np
import vtkNumpy
from debugVis import DebugData
from shallowCopy import shallowCopy
import ioUtils
from ddapp.uuidutil import newUUID
import drc as lcmdrc
import bot_core as lcmbotcore
import vs as lcmvs
from ddapp import lcmUtils
DRILL_TRIANGLE_BOTTOM_LEFT = 'bottom left'
DRILL_TRIANGLE_BOTTOM_RIGHT = 'bottom right'
DRILL_TRIANGLE_TOP_LEFT = 'top left'
DRILL_TRIANGLE_TOP_RIGHT = 'top right'
# using drc plane segmentation instead of PCL
planeSegmentationFilter = vtk.vtkPlaneSegmentation
#planeSegmentationFilter = vtk.vtkPCLSACSegmentationPlane
_defaultSegmentationView = None
def getSegmentationView():
return _defaultSegmentationView or app.getViewManager().findView('Segmentation View')
def getDRCView():
return app.getDRCView()
def switchToView(viewName):
app.getViewManager().switchToView(viewName)
def getCurrentView():
return app.getCurrentRenderView()
def initAffordanceManager(view):
'''
Normally the affordance manager is initialized by the application.
This function can be called from scripts and tests to initialize the manager.
'''
global affordanceManager
affordanceManager = affordancemanager.AffordanceObjectModelManager(view)
def cropToLineSegment(polyData, point1, point2):
line = np.array(point2) - np.array(point1)
length = np.linalg.norm(line)
axis = line / length
polyData = labelPointDistanceAlongAxis(polyData, axis, origin=point1, resultArrayName='dist_along_line')
return thresholdPoints(polyData, 'dist_along_line', [0.0, length])
'''
icp programmable filter
import vtkFiltersGeneralPython as filtersGeneral
points = inputs[0]
block = inputs[1]
print points.GetNumberOfPoints()
print block.GetNumberOfPoints()
if points.GetNumberOfPoints() < block.GetNumberOfPoints():
block, points = points, block
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(points.VTKObject)
icp.SetTarget(block.VTKObject)
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.Update()
t = filtersGeneral.vtkTransformPolyDataFilter()
t.SetInput(points.VTKObject)
t.SetTransform(icp)
t.Update()
output.ShallowCopy(t.GetOutput())
'''
def lockAffordanceToHand(aff, hand='l_hand'):
linkFrame = getLinkFrame(hand)
affT = aff.actor.GetUserTransform()
if not hasattr(aff, 'handToAffT') or not aff.handToAffT:
aff.handToAffT = computeAToB(linkFrame, affT)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(aff.handToAffT)
t.Concatenate(linkFrame)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
handAffUpdater = None
def lockToHandOn():
aff = getDefaultAffordanceObject()
if not aff:
return
global handAffUpdater
if handAffUpdater is None:
handAffUpdater = TimerCallback()
handAffUpdater.targetFps = 30
handAffUpdater.callback = functools.partial(lockAffordanceToHand, aff)
handAffUpdater.start()
def lockToHandOff():
aff = getDefaultAffordanceObject()
if not aff:
return
handAffUpdater.stop()
aff.handToAffT = None
class DisparityPointCloudItem(vis.PolyDataItem):
def __init__(self, name, imagesChannel, cameraName, imageManager):
vis.PolyDataItem.__init__(self, name, vtk.vtkPolyData(), view=None)
self.addProperty('Channel', imagesChannel)
self.addProperty('Camera name', cameraName)
self.addProperty('Decimation', 0, attributes=om.PropertyAttributes(enumNames=['1', '2', '4', '8', '16']))
self.addProperty('Remove Size', 1000, attributes=om.PropertyAttributes(decimals=0, minimum=0, maximum=100000.0, singleStep=1000))
self.addProperty('Target FPS', 1.0, attributes=om.PropertyAttributes(decimals=1, minimum=0.1, maximum=30.0, singleStep=0.1))
self.timer = TimerCallback()
self.timer.callback = self.update
self.lastUtime = 0
self.imageManager = imageManager
self.cameraName = cameraName
self.setProperty('Visible', False)
def _onPropertyChanged(self, propertySet, propertyName):
vis.PolyDataItem._onPropertyChanged(self, propertySet, propertyName)
if propertyName == 'Visible':
if self.getProperty(propertyName):
self.timer.start()
else:
self.timer.stop()
elif propertyName in ('Decimation', 'Remove outliers'):
self.lastUtime = 0
def onRemoveFromObjectModel(self):
vis.PolyDataItem.onRemoveFromObjectModel(self)
self.timer.stop()
def update(self):
utime = self.imageManager.queue.getCurrentImageTime(self.cameraName)
if utime == self.lastUtime:
return
if (utime < self.lastUtime ):
temp=0 # dummy
elif (utime - self.lastUtime < 1E6/self.getProperty('Target FPS')):
return
decimation = int(self.properties.getPropertyEnumValue('Decimation'))
removeSize = int(self.properties.getProperty('Remove Size'))
polyData = getDisparityPointCloud(decimation, imagesChannel=self.getProperty('Channel'), cameraName=self.getProperty('Camera name'),
removeOutliers=False, removeSize=removeSize)
self.setPolyData(polyData)
if not self.lastUtime:
self.setProperty('Color By', 'rgb_colors')
self.lastUtime = utime
def getRandomColor():
'''
Return a random color as a list of RGB values between 0.0 and 1.0.
'''
return colorsys.hsv_to_rgb(np.random.rand(), 1.0, 0.9)
def extractLargestCluster(polyData, minClusterSize=100):
polyData = applyEuclideanClustering(polyData, minClusterSize=minClusterSize)
return thresholdPoints(polyData, 'cluster_labels', [1, 1])
def segmentGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
''' A More complex ground removal algorithm. Works when plane isn't
preceisely flat. First clusters on z to find approx ground height, then fits a plane there
'''
searchRegionThickness = 0.5
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
groundHeight = np.percentile(zvalues, 5)
vtkNumpy.addNumpyToVtk(polyData, zvalues.copy(), 'z')
searchRegion = thresholdPoints(polyData, 'z', [groundHeight - searchRegionThickness/2.0, groundHeight + searchRegionThickness/2.0])
updatePolyData(searchRegion, 'ground search region', parent=getDebugFolder(), colorByName='z', visible=False)
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=[0,0,1], perpendicularAxis=[0,0,1], returnOrigin=True)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-groundThickness/2.0, groundThickness/2.0])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [sceneHeightFromGround, 100])
return origin, normal, groundPoints, scenePoints
def segmentGroundPlane():
inputObj = om.findObjectByName('pointcloud snapshot')
inputObj.setProperty('Visible', False)
polyData = shallowCopy(inputObj.polyData)
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
groundHeight = np.percentile(zvalues, 5)
searchRegion = thresholdPoints(polyData, 'z', [groundHeight - 0.3, groundHeight + 0.3])
updatePolyData(searchRegion, 'ground search region', parent=getDebugFolder(), colorByName='z', visible=False)
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=[0,0,1], perpendicularAxis=[0,0,1], returnOrigin=True)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [0.05, 10])
updatePolyData(groundPoints, 'ground points', alpha=0.3)
updatePolyData(scenePoints, 'scene points', alpha=0.3)
#scenePoints = applyEuclideanClustering(scenePoints, clusterTolerance=0.10, minClusterSize=100, maxClusterSize=1e6)
#updatePolyData(scenePoints, 'scene points', colorByName='cluster_labels')
def applyLocalPlaneFit(polyData, searchPoint, searchRadius, searchRadiusEnd=None, removeGroundFirst=True):
useVoxelGrid = True
voxelGridSize = 0.03
distanceToPlaneThreshold = 0.02
if useVoxelGrid:
polyData = applyVoxelGrid(polyData, leafSize=voxelGridSize)
if removeGroundFirst:
_, polyData = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.04)
cropped = cropToSphere(polyData, searchPoint, searchRadius)
updatePolyData(cropped, 'crop to sphere', visible=False, colorByName='distance_to_point')
polyData, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, searchOrigin=searchPoint, searchRadius=searchRadius)
if searchRadiusEnd is not None:
polyData, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, perpendicularAxis=normal, angleEpsilon=math.radians(30), searchOrigin=searchPoint, searchRadius=searchRadiusEnd)
fitPoints = thresholdPoints(polyData, 'dist_to_plane', [-distanceToPlaneThreshold, distanceToPlaneThreshold])
updatePolyData(fitPoints, 'fitPoints', visible=False)
fitPoints = labelDistanceToPoint(fitPoints, searchPoint)
clusters = extractClusters(fitPoints, clusterTolerance=0.05, minClusterSize=3)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
fitPoints = clusters[0]
return fitPoints, normal
normalEstimationSearchRadius = 0.065
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(polyData)
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotPlaneNormal = np.abs(np.dot(normals, normal))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotPlaneNormal, 'normals_dot_plane_normal')
showPolyData(scenePoints, 'scene_with_normals', parent=getDebugFolder(), colorByName='normals_dot_plane_normal')
surfaces = thresholdPoints(scenePoints, 'normals_dot_plane_normal', [0.95, 1.0])
clusters = extractClusters(surfaces, clusterTolerance=0.1, minClusterSize=5)
clusters = clusters[:10]
for i, cluster in enumerate(clusters):
showPolyData(cluster, 'plane cluster %i' % i, parent=getDebugFolder(), visible=False)
return fitPoints
def orientToMajorPlane(polyData, pickedPoint):
'''
Find the largest plane and transform the cloud to align that plane
Use the given point as the origin
'''
distanceToPlaneThreshold=0.02
searchRadius = 0.5
planePoints, origin, normal = applyPlaneFit(polyData, distanceToPlaneThreshold, searchOrigin=pickedPoint, searchRadius=searchRadius, returnOrigin=True)
vis.updatePolyData(planePoints, 'local plane fit', color=[0,1,0], parent=getDebugFolder(), visible=False)
planeFrame = transformUtils.getTransformFromOriginAndNormal(pickedPoint, normal)
vis.updateFrame(planeFrame, 'plane frame', scale=0.15, parent=getDebugFolder(), visible=False)
polyData = transformPolyData(polyData, planeFrame.GetLinearInverse() )
# if the mean point is below the horizontal plane, flip the cloud
zvalues = vtkNumpy.getNumpyFromVtk(polyData, 'Points')[:,2]
midCloudHeight = np.mean(zvalues)
if (midCloudHeight < 0):
flipTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,180,0])
polyData = transformPolyData(polyData, flipTransform )
return polyData, planeFrame
def getMajorPlanes(polyData, useVoxelGrid=True):
voxelGridSize = 0.01
distanceToPlaneThreshold = 0.02
if useVoxelGrid:
polyData = applyVoxelGrid(polyData, leafSize=voxelGridSize)
polyDataList = []
minClusterSize = 100
while len(polyDataList) < 25:
f = planeSegmentationFilter()
f.SetInput(polyData)
f.SetDistanceThreshold(distanceToPlaneThreshold)
f.Update()
polyData = shallowCopy(f.GetOutput())
outliers = thresholdPoints(polyData, 'ransac_labels', [0, 0])
inliers = thresholdPoints(polyData, 'ransac_labels', [1, 1])
largestCluster = extractLargestCluster(inliers)
#i = len(polyDataList)
#showPolyData(inliers, 'inliers %d' % i, color=getRandomColor(), parent='major planes')
#showPolyData(outliers, 'outliers %d' % i, color=getRandomColor(), parent='major planes')
#showPolyData(largestCluster, 'cluster %d' % i, color=getRandomColor(), parent='major planes')
if largestCluster.GetNumberOfPoints() > minClusterSize:
polyDataList.append(largestCluster)
polyData = outliers
else:
break
return polyDataList
def showMajorPlanes(polyData=None):
if not polyData:
inputObj = om.findObjectByName('pointcloud snapshot')
inputObj.setProperty('Visible', False)
polyData = inputObj.polyData
om.removeFromObjectModel(om.findObjectByName('major planes'))
folderObj = om.findObjectByName('segmentation')
folderObj = om.getOrCreateContainer('major planes', folderObj)
origin = SegmentationContext.getGlobalInstance().getViewFrame().GetPosition()
polyData = labelDistanceToPoint(polyData, origin)
polyData = thresholdPoints(polyData, 'distance_to_point', [1, 4])
polyDataList = getMajorPlanes(polyData)
for i, polyData in enumerate(polyDataList):
obj = showPolyData(polyData, 'plane %d' % i, color=getRandomColor(), visible=True, parent='major planes')
obj.setProperty('Point Size', 3)
def cropToBox(polyData, transform, dimensions):
'''
dimensions is length 3 describing box dimensions
'''
origin = np.array(transform.GetPosition())
axes = transformUtils.getAxesFromTransform(transform)
for axis, length in zip(axes, dimensions):
cropAxis = np.array(axis)*(length/2.0)
polyData = cropToLineSegment(polyData, origin - cropAxis, origin + cropAxis)
return polyData
def cropToBounds(polyData, transform, bounds):
'''
bounds is a 2x3 containing the min/max values along the transform axes to use for cropping
'''
origin = np.array(transform.GetPosition())
axes = transformUtils.getAxesFromTransform(transform)
for axis, bound in zip(axes, bounds):
axis = np.array(axis)/np.linalg.norm(axis)
polyData = cropToLineSegment(polyData, origin + axis*bound[0], origin + axis*bound[1])
return polyData
def cropToSphere(polyData, origin, radius):
polyData = labelDistanceToPoint(polyData, origin)
return thresholdPoints(polyData, 'distance_to_point', [0, radius])
def applyPlaneFit(polyData, distanceThreshold=0.02, expectedNormal=None, perpendicularAxis=None, angleEpsilon=0.2, returnOrigin=False, searchOrigin=None, searchRadius=None):
expectedNormal = expectedNormal if expectedNormal is not None else [-1,0,0]
fitInput = polyData
if searchOrigin is not None:
assert searchRadius
fitInput = cropToSphere(fitInput, searchOrigin, searchRadius)
# perform plane segmentation
f = planeSegmentationFilter()
f.SetInput(fitInput)
f.SetDistanceThreshold(distanceThreshold)
if perpendicularAxis is not None:
f.SetPerpendicularConstraintEnabled(True)
f.SetPerpendicularAxis(perpendicularAxis)
f.SetAngleEpsilon(angleEpsilon)
f.Update()
origin = f.GetPlaneOrigin()
normal = np.array(f.GetPlaneNormal())
# flip the normal if needed
if np.dot(normal, expectedNormal) < 0:
normal = -normal
# for each point, compute signed distance to plane
polyData = shallowCopy(polyData)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
if returnOrigin:
return polyData, origin, normal
else:
return polyData, normal
def flipNormalsWithViewDirection(polyData, viewDirection):
normals = vnp.getNumpyFromVtk(polyData, 'normals')
normals[np.dot(normals, viewDirection) > 0] *= -1
def normalEstimation(dataObj, searchCloud=None, searchRadius=0.05, useVoxelGrid=False, voxelGridLeafSize=0.05):
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(searchRadius)
f.SetInput(dataObj)
if searchCloud:
f.SetInput(1, searchCloud)
elif useVoxelGrid:
f.SetInput(1, applyVoxelGrid(dataObj, voxelGridLeafSize))
f.Update()
dataObj = shallowCopy(f.GetOutput())
dataObj.GetPointData().SetNormals(dataObj.GetPointData().GetArray('normals'))
return dataObj
def addCoordArraysToPolyData(polyData):
polyData = shallowCopy(polyData)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
vtkNumpy.addNumpyToVtk(polyData, points[:,0].copy(), 'x')
vtkNumpy.addNumpyToVtk(polyData, points[:,1].copy(), 'y')
vtkNumpy.addNumpyToVtk(polyData, points[:,2].copy(), 'z')
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
viewOrigin = viewFrame.TransformPoint([0.0, 0.0, 0.0])
viewX = viewFrame.TransformVector([1.0, 0.0, 0.0])
viewY = viewFrame.TransformVector([0.0, 1.0, 0.0])
viewZ = viewFrame.TransformVector([0.0, 0.0, 1.0])
polyData = labelPointDistanceAlongAxis(polyData, viewX, origin=viewOrigin, resultArrayName='distance_along_view_x')
polyData = labelPointDistanceAlongAxis(polyData, viewY, origin=viewOrigin, resultArrayName='distance_along_view_y')
polyData = labelPointDistanceAlongAxis(polyData, viewZ, origin=viewOrigin, resultArrayName='distance_along_view_z')
return polyData
def getDebugRevolutionData():
#dataDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../drc-data'))
#filename = os.path.join(dataDir, 'valve_wall.vtp')
#filename = os.path.join(dataDir, 'bungie_valve.vtp')
#filename = os.path.join(dataDir, 'cinder-blocks.vtp')
#filename = os.path.join(dataDir, 'cylinder_table.vtp')
#filename = os.path.join(dataDir, 'firehose.vtp')
#filename = os.path.join(dataDir, 'debris.vtp')
#filename = os.path.join(dataDir, 'rev1.vtp')
#filename = os.path.join(dataDir, 'drill-in-hand.vtp')
filename = os.path.expanduser('~/Desktop/scans/debris-scan.vtp')
return addCoordArraysToPolyData(ioUtils.readPolyData(filename))
def getCurrentScanBundle():
obj = om.findObjectByName('SCANS_HALF_SWEEP')
if not obj:
return None
revPolyData = obj.polyData
if not revPolyData or not revPolyData.GetNumberOfPoints():
return None
if useVoxelGrid:
revPolyData = applyVoxelGrid(revPolyData, leafSize=0.015)
return addCoordArraysToPolyData(revPolyData)
def getCurrentRevolutionData():
revPolyData = perception._multisenseItem.model.revPolyData
if not revPolyData or not revPolyData.GetNumberOfPoints():
return getCurrentScanBundle()
if useVoxelGrid:
revPolyData = applyVoxelGrid(revPolyData, leafSize=0.015)
return addCoordArraysToPolyData(revPolyData)
def getDisparityPointCloud(decimation=4, removeOutliers=True, removeSize=0, imagesChannel='CAMERA', cameraName='CAMERA_LEFT'):
p = cameraview.getStereoPointCloud(decimation, imagesChannel=imagesChannel, cameraName=cameraName, removeSize=removeSize)
if not p:
return None
if removeOutliers:
# attempt to scale outlier filtering, best tuned for decimation of 2 or 4
scaling = (10*16)/(decimation*decimation)
p = labelOutliers(p, searchRadius=0.06, neighborsInSearchRadius=scaling)
p = thresholdPoints(p, 'is_outlier', [0.0, 0.0])
return p
def getCurrentMapServerData():
mapServer = om.findObjectByName('Map Server')
polyData = None
if mapServer and mapServer.getProperty('Visible'):
polyData = mapServer.source.polyData
if not polyData or not polyData.GetNumberOfPoints():
return None
return addCoordArraysToPolyData(polyData)
useVoxelGrid = False
def segmentGroundPlanes():
objs = []
for obj in om.getObjects():
name = obj.getProperty('Name')
if name.startswith('pointcloud snapshot'):
objs.append(obj)
objs = sorted(objs, key=lambda x: x.getProperty('Name'))
d = DebugData()
prevHeadAxis = None
for obj in objs:
name = obj.getProperty('Name')
print '----- %s---------' % name
print 'head axis:', obj.headAxis
origin, normal, groundPoints, _ = segmentGround(obj.polyData)
print 'ground normal:', normal
showPolyData(groundPoints, name + ' ground points', visible=False)
a = np.array([0,0,1])
b = np.array(normal)
diff = math.degrees(math.acos(np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))))
if diff > 90:
print 180 - diff
else:
print diff
if prevHeadAxis is not None:
a = prevHeadAxis
b = np.array(obj.headAxis)
diff = math.degrees(math.acos(np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))))
if diff > 90:
print 180 - diff
else:
print diff
prevHeadAxis = np.array(obj.headAxis)
d.addLine([0,0,0], normal)
updatePolyData(d.getPolyData(), 'normals')
def extractCircle(polyData, distanceThreshold=0.04, radiusLimit=None):
circleFit = vtk.vtkPCLSACSegmentationCircle()
circleFit.SetDistanceThreshold(distanceThreshold)
circleFit.SetInput(polyData)
if radiusLimit is not None:
circleFit.SetRadiusLimit(radiusLimit)
circleFit.SetRadiusConstraintEnabled(True)
circleFit.Update()
polyData = thresholdPoints(circleFit.GetOutput(), 'ransac_labels', [1.0, 1.0])
return polyData, circleFit
def removeMajorPlane(polyData, distanceThreshold=0.02):
# perform plane segmentation
f = planeSegmentationFilter()
f.SetInput(polyData)
f.SetDistanceThreshold(distanceThreshold)
f.Update()
polyData = thresholdPoints(f.GetOutput(), 'ransac_labels', [0.0, 0.0])
return polyData, f
def removeGroundSimple(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
''' Simple ground plane removal algorithm. Uses ground height
and does simple z distance filtering.
Suitable for noisy data e.g. kinect/stereo camera
(Default args should be relaxed, filtering simplfied)
'''
groundHeight = SegmentationContext.getGlobalInstance().getGroundHeight()
origin = [0, 0, groundHeight]
normal = [0, 0, 1]
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
groundPoints = thresholdPoints(polyData, 'dist_to_plane', [-groundThickness/2.0, groundThickness/2.0])
scenePoints = thresholdPoints(polyData, 'dist_to_plane', [sceneHeightFromGround, 100])
return groundPoints, scenePoints
def removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05):
origin, normal, groundPoints, scenePoints = segmentGround(polyData, groundThickness, sceneHeightFromGround)
return groundPoints, scenePoints
def generateFeetForValve():
aff = om.findObjectByName('valve affordance')
assert aff
params = aff.params
origin = np.array(params['origin'])
origin[2] = 0.0
xaxis = -params['axis']
zaxis = np.array([0,0,1])
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
stanceWidth = 0.2
stanceRotation = 25.0
stanceOffset = [-1.0, -0.5, 0.0]
valveFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
valveFrame.PostMultiply()
valveFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(valveFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(boardFrame, 'board ground frame', parent=aff, scale=0.15, visible=False)
showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
#d = DebugData()
#d.addLine(valveFrame.GetPosition(), stanceFrame.GetPosition())
#updatePolyData(d.getPolyData(), 'stance debug')
#publishSteppingGoal(lfootFrame, rfootFrame)
def generateFeetForDebris():
aff = om.findObjectByName('board A')
if not aff:
return
params = aff.params
origin = np.array(params['origin'])
origin = origin + params['zaxis']*params['zwidth']/2.0 - params['xaxis']*params['xwidth']/2.0
origin[2] = 0.0
yaxis = params['zaxis']
zaxis = np.array([0,0,1])
xaxis = np.cross(yaxis, zaxis)
stanceWidth = 0.35
stanceRotation = 0.0
stanceOffset = [-0.48, -0.08, 0]
boardFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
boardFrame.PostMultiply()
boardFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(boardFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(boardFrame, 'board ground frame', parent=aff, scale=0.15, visible=False)
lfoot = showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
rfoot = showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
for obj in [lfoot, rfoot]:
obj.addToView(app.getDRCView())
#d = DebugData()
#d.addLine(valveFrame.GetPosition(), stanceFrame.GetPosition())
#updatePolyData(d.getPolyData(), 'stance debug')
#publishSteppingGoal(lfootFrame, rfootFrame)
def generateFeetForWye():
aff = om.findObjectByName('wye points')
if not aff:
return
params = aff.params
origin = np.array(params['origin'])
origin[2] = 0.0
yaxis = params['xaxis']
xaxis = -params['zaxis']
zaxis = np.cross(xaxis, yaxis)
stanceWidth = 0.20
stanceRotation = 0.0
stanceOffset = [-0.48, -0.08, 0]
affGroundFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
affGroundFrame.PostMultiply()
affGroundFrame.Translate(origin)
stanceFrame, lfootFrame, rfootFrame = getFootFramesFromReferenceFrame(affGroundFrame, stanceWidth, stanceRotation, stanceOffset)
showFrame(affGroundFrame, 'affordance ground frame', parent=aff, scale=0.15, visible=False)
lfoot = showFrame(lfootFrame, 'lfoot frame', parent=aff, scale=0.15)
rfoot = showFrame(rfootFrame, 'rfoot frame', parent=aff, scale=0.15)
for obj in [lfoot, rfoot]:
obj.addToView(app.getDRCView())
def getFootFramesFromReferenceFrame(referenceFrame, stanceWidth, stanceRotation, stanceOffset):
footHeight=0.0745342
ref = vtk.vtkTransform()
ref.SetMatrix(referenceFrame.GetMatrix())
stanceFrame = vtk.vtkTransform()
stanceFrame.PostMultiply()
stanceFrame.RotateZ(stanceRotation)
stanceFrame.Translate(stanceOffset)
stanceFrame.Concatenate(ref)
lfootFrame = vtk.vtkTransform()
lfootFrame.PostMultiply()
lfootFrame.Translate(0, stanceWidth/2.0, footHeight)
lfootFrame.Concatenate(stanceFrame)
rfootFrame = vtk.vtkTransform()
rfootFrame.PostMultiply()
rfootFrame.Translate(0, -stanceWidth/2.0, footHeight)
rfootFrame.Concatenate(stanceFrame)
return stanceFrame, lfootFrame, rfootFrame
def poseFromFrame(frame):
trans = lcmdrc.vector_3d_t()
trans.x, trans.y, trans.z = frame.GetPosition()
wxyz = range(4)
perception.drc.vtkMultisenseSource.GetBotQuaternion(frame, wxyz)
quat = lcmdrc.quaternion_t()
quat.w, quat.x, quat.y, quat.z = wxyz
pose = lcmdrc.position_3d_t()
pose.translation = trans
pose.rotation = quat
return pose
def cropToPlane(polyData, origin, normal, threshold):
polyData = shallowCopy(polyData)
normal = normal/np.linalg.norm(normal)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dist = np.dot(points - origin, normal)
vtkNumpy.addNumpyToVtk(polyData, dist, 'dist_to_plane')
cropped = thresholdPoints(polyData, 'dist_to_plane', threshold)
return cropped, polyData
def createLine(blockDimensions, p1, p2):
sliceWidth = np.array(blockDimensions).max()/2.0 + 0.02
sliceThreshold = [-sliceWidth, sliceWidth]
# require p1 to be point on left
if p1[0] > p2[0]:
p1, p2 = p2, p1
_, worldPt1 = getRayFromDisplayPoint(app.getCurrentRenderView(), p1)
_, worldPt2 = getRayFromDisplayPoint(app.getCurrentRenderView(), p2)
cameraPt = np.array(app.getCurrentRenderView().camera().GetPosition())
leftRay = worldPt1 - cameraPt
rightRay = worldPt2 - cameraPt
middleRay = (leftRay + rightRay) / 2.0
d = DebugData()
d.addLine(cameraPt, worldPt1)
d.addLine(cameraPt, worldPt2)
d.addLine(worldPt1, worldPt2)
d.addLine(cameraPt, cameraPt + middleRay)
updatePolyData(d.getPolyData(), 'line annotation', parent=getDebugFolder(), visible=False)
inputObj = om.findObjectByName('pointcloud snapshot')
if inputObj:
polyData = shallowCopy(inputObj.polyData)
else:
polyData = getCurrentRevolutionData()
origin = cameraPt
normal = np.cross(rightRay, leftRay)
leftNormal = np.cross(normal, leftRay)
rightNormal = np.cross(rightRay, normal)
normal /= np.linalg.norm(normal)
leftNormal /= np.linalg.norm(leftNormal)
rightNormal /= np.linalg.norm(rightNormal)
middleRay /= np.linalg.norm(middleRay)
cropped, polyData = cropToPlane(polyData, origin, normal, sliceThreshold)
updatePolyData(polyData, 'slice dist', parent=getDebugFolder(), colorByName='dist_to_plane', colorByRange=[-0.5, 0.5], visible=False)
updatePolyData(cropped, 'slice', parent=getDebugFolder(), colorByName='dist_to_plane', visible=False)
cropped, _ = cropToPlane(cropped, origin, leftNormal, [-1e6, 0])
cropped, _ = cropToPlane(cropped, origin, rightNormal, [-1e6, 0])
updatePolyData(cropped, 'slice segment', parent=getDebugFolder(), colorByName='dist_to_plane', visible=False)
planePoints, planeNormal = applyPlaneFit(cropped, distanceThreshold=0.005, perpendicularAxis=middleRay, angleEpsilon=math.radians(60))
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-0.005, 0.005])
updatePolyData(planePoints, 'board segmentation', parent=getDebugFolder(), color=getRandomColor(), visible=False)
'''
names = ['board A', 'board B', 'board C', 'board D', 'board E', 'board F', 'board G', 'board H', 'board I']
for name in names:
if not om.findObjectByName(name):
break
else:
name = 'board'
'''
name = 'board'
segmentBlockByTopPlane(planePoints, blockDimensions, expectedNormal=-middleRay, expectedXAxis=middleRay, edgeSign=-1, name=name)
def updateBlockAffordances(polyData=None):
for obj in om.getObjects():
if isinstance(obj, BoxAffordanceItem):
if 'refit' in obj.getProperty('Name'):
om.removeFromObjectModel(obj)
for obj in om.getObjects():
if isinstance(obj, BoxAffordanceItem):
updateBlockFit(obj, polyData)
def updateBlockFit(affordanceObj, polyData=None):
affordanceObj.updateParamsFromActorTransform()
name = affordanceObj.getProperty('Name') + ' refit'
origin = affordanceObj.params['origin']
normal = affordanceObj.params['yaxis']
edgePerpAxis = affordanceObj.params['xaxis']
blockDimensions = [affordanceObj.params['xwidth'], affordanceObj.params['ywidth']]
if polyData is None:
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
cropThreshold = 0.1
cropped = polyData
cropped, _ = cropToPlane(cropped, origin, normal, [-cropThreshold, cropThreshold])
cropped, _ = cropToPlane(cropped, origin, edgePerpAxis, [-cropThreshold, cropThreshold])
updatePolyData(cropped, 'refit search region', parent=getDebugFolder(), visible=False)
cropped = extractLargestCluster(cropped)
planePoints, planeNormal = applyPlaneFit(cropped, distanceThreshold=0.005, perpendicularAxis=normal, angleEpsilon=math.radians(10))
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-0.005, 0.005])
updatePolyData(planePoints, 'refit board segmentation', parent=getDebugFolder(), visible=False)
refitObj = segmentBlockByTopPlane(planePoints, blockDimensions, expectedNormal=normal, expectedXAxis=edgePerpAxis, edgeSign=-1, name=name)
refitOrigin = np.array(refitObj.params['origin'])
refitLength = refitObj.params['zwidth']
refitZAxis = refitObj.params['zaxis']
refitEndPoint1 = refitOrigin + refitZAxis*refitLength/2.0
originalLength = affordanceObj.params['zwidth']
correctedOrigin = refitEndPoint1 - refitZAxis*originalLength/2.0
originDelta = correctedOrigin - refitOrigin
refitObj.params['zwidth'] = originalLength
refitObj.polyData.DeepCopy(affordanceObj.polyData)
refitObj.actor.GetUserTransform().Translate(originDelta)
refitObj.updateParamsFromActorTransform()
def startInteractiveLineDraw(blockDimensions):
picker = LineDraw(app.getCurrentRenderView())
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(createLine, blockDimensions)
def startLeverValveSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentLeverValve)
def refitValveAffordance(aff, point1, origin, normal):
xaxis = aff.params['xaxis']
yaxis = aff.params['yaxis']
zaxis = aff.params['zaxis']
origin = aff.params['origin']
zaxis = normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
aff.updateParamsFromActorTransform()
def segmentValve(expectedValveRadius, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, _, wallNormal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
polyData, _, _ = applyPlaneFit(polyData, expectedNormal=wallNormal, searchOrigin=point2, searchRadius=expectedValveRadius, angleEpsilon=0.2, returnOrigin=True)
valveCluster = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
valveCluster = cropToSphere(valveCluster, point2, expectedValveRadius*2)
valveCluster = extractLargestCluster(valveCluster, minClusterSize=1)
updatePolyData(valveCluster, 'valve cluster', parent=getDebugFolder(), visible=False)
origin = np.average(vtkNumpy.getNumpyFromVtk(valveCluster, 'Points') , axis=0)
zaxis = wallNormal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
zwidth = 0.03
radius = expectedValveRadius
d = DebugData()
d.addLine(np.array([0,0,-zwidth/2.0]), np.array([0,0,zwidth/2.0]), radius=radius)
name = 'valve affordance'
obj = showPolyData(d.getPolyData(), name, cls=FrameAffordanceItem, parent='affordances', color=[0,1,0])
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
refitWallCallbacks.append(functools.partial(refitValveAffordance, obj))
params = dict(axis=zaxis, radius=radius, length=zwidth, origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis,
xwidth=radius, ywidth=radius, zwidth=zwidth,
otdf_type='steering_cyl', friendly_name='valve')
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, scale=radius, visible=False)
frameObj.addToView(app.getDRCView())
def segmentValveByBoundingBox(polyData, searchPoint):
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData = cropToSphere(polyData, searchPoint, radius=0.6)
polyData = applyVoxelGrid(polyData, leafSize=0.015)
# extract tube search region
polyData = labelDistanceToLine(polyData, searchPoint, np.array(searchPoint) + np.array([0,0,1]))
searchRegion = thresholdPoints(polyData, 'distance_to_line', [0.0, 0.2])
updatePolyData(searchRegion, 'valve tube search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
# guess valve plane
_, origin, normal = applyPlaneFit(searchRegion, distanceThreshold=0.01, perpendicularAxis=viewDirection, angleEpsilon=math.radians(30), expectedNormal=-viewDirection, returnOrigin=True)
# extract plane search region
polyData = labelPointDistanceAlongAxis(polyData, normal, origin)
searchRegion = thresholdPoints(polyData, 'distance_along_axis', [-0.05, 0.05])
updatePolyData(searchRegion, 'valve plane search region', parent=getDebugFolder(), colorByName='distance_along_axis', visible=False)
valvePoints = extractLargestCluster(searchRegion, minClusterSize=1)
updatePolyData(valvePoints, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
valvePoints, _ = applyPlaneFit(valvePoints, expectedNormal=normal, perpendicularAxis=normal, distanceThreshold=0.01)
valveFit = thresholdPoints(valvePoints, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(valveFit, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
points = vtkNumpy.getNumpyFromVtk(valveFit, 'Points')
zvalues = points[:,2].copy()
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
tubeRadius = 0.017
radius = float((maxZ - minZ) / 2.0) - tubeRadius
fields = makePolyDataFields(valveFit)
origin = np.array(fields.frame.GetPosition())
#origin = computeCentroid(valveFit)
zaxis = [0,0,1]
xaxis = normal
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=radius, Segments=20)
desc['Tube Radius'] = tubeRadius
obj = affordanceManager.newAffordanceFromDescription(desc)
obj.params = dict(radius=radius)
return obj
def segmentDoorPlane(polyData, doorPoint, stanceFrame):
doorPoint = np.array(doorPoint)
doorBand = 1.5
polyData = cropToLineSegment(polyData, doorPoint + [0.0,0.0,doorBand/2], doorPoint - [0.0,0.0,doorBand/2])
fitPoints, normal = applyLocalPlaneFit(polyData, doorPoint, searchRadius=0.2, searchRadiusEnd=1.0, removeGroundFirst=False)
updatePolyData(fitPoints, 'door points', visible=False, color=[0,1,0])
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
if np.dot(normal, viewDirection) > 0:
normal = -normal
origin = computeCentroid(fitPoints)
groundHeight = stanceFrame.GetPosition()[2]
origin = [origin[0], origin[1], groundHeight]
xaxis = -normal
zaxis = [0,0,1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
return t
def segmentValveByRim(polyData, rimPoint1, rimPoint2):
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
yaxis = np.array(rimPoint2) - np.array(rimPoint1)
zaxis = [0,0,1]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
# flip xaxis to be with view direction
if np.dot(xaxis, viewDirection) < 0:
xaxis = -xaxis
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
origin = (np.array(rimPoint2) + np.array(rimPoint1)) / 2.0
polyData = labelPointDistanceAlongAxis(polyData, xaxis, origin)
polyData = thresholdPoints(polyData, 'distance_along_axis', [-0.05, 0.05])
updatePolyData(polyData, 'valve plane region', parent=getDebugFolder(), colorByName='distance_along_axis', visible=False)
polyData = cropToSphere(polyData, origin, radius=0.4)
polyData = applyVoxelGrid(polyData, leafSize=0.015)
updatePolyData(polyData, 'valve search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
valveFit = extractLargestCluster(polyData, minClusterSize=1)
updatePolyData(valveFit, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
points = vtkNumpy.getNumpyFromVtk(valveFit, 'Points')
zvalues = points[:,2].copy()
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
tubeRadius = 0.017
radius = float((maxZ - minZ) / 2.0) - tubeRadius
fields = makePolyDataFields(valveFit)
origin = np.array(fields.frame.GetPosition())
vis.updatePolyData(transformPolyData(fields.box, fields.frame), 'valve cluster bounding box', visible=False)
#origin = computeCentroid(valveFit)
'''
zaxis = [0,0,1]
xaxis = normal
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
'''
radius = np.max(fields.dims)/2.0 - tubeRadius
proj = [np.abs(np.dot(xaxis, axis)) for axis in fields.axes]
xaxisNew = fields.axes[np.argmax(proj)]
if np.dot(xaxisNew, xaxis) < 0:
xaxisNew = -xaxisNew
xaxis = xaxisNew
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=float(radius), Segments=20)
desc['Tube Radius'] = tubeRadius
obj = affordanceManager.newAffordanceFromDescription(desc)
obj.params = dict(radius=radius)
return obj
def segmentValveByWallPlane(expectedValveRadius, point1, point2):
centerPoint = (point1 + point2) / 2.0
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
_ , polyData = removeGround(polyData)
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
perpLine = np.cross(point2 - point1, normal)
#perpLine /= np.linalg.norm(perpLine)
#perpLine * np.linalg.norm(point2 - point1)/2.0
point3, point4 = centerPoint + perpLine/2.0, centerPoint - perpLine/2.0
d = DebugData()
d.addLine(point1, point2)
d.addLine(point3, point4)
updatePolyData(d.getPolyData(), 'crop lines', parent=getDebugFolder(), visible=False)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'valve wall', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.05, 0.5])
searchRegion = cropToLineSegment(searchRegion, point1, point2)
searchRegion = cropToLineSegment(searchRegion, point3, point4)
updatePolyData(searchRegion, 'valve search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
searchRegionSpokes = shallowCopy(searchRegion)
searchRegion, origin, _ = applyPlaneFit(searchRegion, expectedNormal=normal, perpendicularAxis=normal, returnOrigin=True)
searchRegion = thresholdPoints(searchRegion, 'dist_to_plane', [-0.015, 0.015])
updatePolyData(searchRegion, 'valve search region 2', parent=getDebugFolder(), color=[0,1,0], visible=False)
largestCluster = extractLargestCluster(searchRegion, minClusterSize=1)
updatePolyData(largestCluster, 'valve cluster', parent=getDebugFolder(), color=[0,1,0], visible=False)
radiusLimit = [expectedValveRadius - 0.01, expectedValveRadius + 0.01] if expectedValveRadius else None
#radiusLimit = None
polyData, circleFit = extractCircle(largestCluster, distanceThreshold=0.01, radiusLimit=radiusLimit)
updatePolyData(polyData, 'circle fit', parent=getDebugFolder(), visible=False)
#polyData, circleFit = extractCircle(polyData, distanceThreshold=0.01)
#showPolyData(polyData, 'circle fit', colorByName='z')
radius = circleFit.GetCircleRadius()
origin = np.array(circleFit.GetCircleOrigin())
circleNormal = np.array(circleFit.GetCircleNormal())
circleNormal = circleNormal/np.linalg.norm(circleNormal)
if np.dot(circleNormal, normal) < 0:
circleNormal *= -1
# force use of the plane normal
circleNormal = normal
radius = expectedValveRadius
d = DebugData()
d.addLine(origin - normal*radius, origin + normal*radius)
d.addCircle(origin, circleNormal, radius)
updatePolyData(d.getPolyData(), 'valve axes', parent=getDebugFolder(), visible=False)
zaxis = -circleNormal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
#t = getTransformFromAxes(xaxis, yaxis, zaxis) # this was added to be consistent with segmentValveByRim
t = getTransformFromAxes(zaxis, -yaxis, xaxis) # this was added to be consistent with segmentValveByRim
t.PostMultiply()
t.Translate(origin)
# Spoke angle fitting:
if (1==0): # disabled jan 2015
# extract the relative positon of the points to the valve axis:
searchRegionSpokes = labelDistanceToLine(searchRegionSpokes, origin, [origin + circleNormal])
searchRegionSpokes = thresholdPoints(searchRegionSpokes, 'distance_to_line', [0.05, radius-0.04])
updatePolyData(searchRegionSpokes, 'valve spoke search', parent=getDebugFolder(), visible=False)
searchRegionSpokesLocal = transformPolyData(searchRegionSpokes, t.GetLinearInverse() )
points = vtkNumpy.getNumpyFromVtk(searchRegionSpokesLocal , 'Points')
spoke_angle = findValveSpokeAngle(points)
else:
spoke_angle = 0
spokeAngleTransform = transformUtils.frameFromPositionAndRPY([0,0,0], [0,0,spoke_angle])
spokeTransform = transformUtils.copyFrame(t)
spokeAngleTransform.Concatenate(spokeTransform)
spokeObj = showFrame(spokeAngleTransform, 'spoke frame', parent=getDebugFolder(), visible=False, scale=radius)
spokeObj.addToView(app.getDRCView())
t = spokeAngleTransform
tubeRadius = 0.017
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0,1,0], Radius=float(radius), Segments=20)
desc['Tube Radius'] = tubeRadius
obj = affordanceManager.newAffordanceFromDescription(desc)
obj.params = dict(radius=radius)
def showHistogram(polyData, arrayName, numberOfBins=100):
import matplotlib.pyplot as plt
x = vnp.getNumpyFromVtk(polyData, arrayName)
hist, bins = np.histogram(x, bins=numberOfBins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
return bins[np.argmax(hist)] + (bins[1] - bins[0])/2.0
def applyKmeansLabel(polyData, arrayName, numberOfClusters, whiten=False):
import scipy.cluster
ar = vnp.getNumpyFromVtk(polyData, arrayName).copy()
if whiten:
scipy.cluster.vq.whiten(ar)
codes, disturbances = scipy.cluster.vq.kmeans(ar, numberOfClusters)
if arrayName == 'normals' and numberOfClusters == 2:
v1 = codes[0]
v2 = codes[1]
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
angle = np.arccos(np.dot(v1, v2))
print 'angle between normals:', np.degrees(angle)
code, distance = scipy.cluster.vq.vq(ar, codes)
polyData = shallowCopy(polyData)
vnp.addNumpyToVtk(polyData, code, '%s_kmeans_label' % arrayName)
return polyData
def findValveSpokeAngle(points):
'''
Determine the location of the valve spoke angle
By binning the spoke returns. returns angle in degrees
'''
#np.savetxt("/home/mfallon/Desktop/spoke_points.csv", points, delimiter=",")
# convert all points to degrees in range [0,120]
angle = np.degrees( np.arctan2( points[:,1] , points[:,0] ) )
qq = np.where(angle < 0)[0]
angle[qq] += 360
angle = np.mod( angle, 120)
# find the spoke as the max of a histogram:
bins = range(0,130,10) # 0,10,...130
freq, bins = np.histogram(angle, bins)
amax = np.argmax(freq)
spoke_angle = bins[amax] + 5 # correct for 5deg offset
return spoke_angle
def findWallCenter(polyData, removeGroundMethod=removeGround):
'''
Find a frame at the center of the valve wall
X&Y: average of points on the wall plane
Z: 4 feet off the ground (determined using robot's feet
Orientation: z-normal into plane, y-axis horizontal
'''
_ , polyData = removeGroundMethod(polyData)
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
wallPoints = applyVoxelGrid(wallPoints, leafSize=0.03)
wallPoints = extractLargestCluster(wallPoints, minClusterSize=100)
updatePolyData(wallPoints, 'auto valve wall', parent=getDebugFolder(), visible=False)
xvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,0]
yvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,1]
# median or mid of max or min?
#xcenter = np.median(xvalues)
#ycenter = np.median(yvalues)
xcenter = (np.max(xvalues)+np.min(xvalues))/2
ycenter = (np.max(yvalues)+np.min(yvalues))/2
# not used, not very reliable
#zvalues = vtkNumpy.getNumpyFromVtk(wallPoints, 'Points')[:,2]
#zcenter = np.median(zvalues)
zcenter = SegmentationContext.getGlobalInstance().getGroundHeight() + 1.2192 # valves are 4ft from ground
point1 =np.array([ xcenter, ycenter, zcenter ]) # center of the valve wall
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
normalObj = showFrame(t, 'valve wall frame', parent=getDebugFolder(), visible=False) # z direction out of wall
normalObj.addToView(app.getDRCView())
return t
def segmentValveWallAuto(expectedValveRadius=.195, mode='both', removeGroundMethod=removeGround ):
'''
Automatically segment a valve hanging in front of the wall at the center
'''
# find the valve wall and its center
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
t = findWallCenter(polyData, removeGroundMethod)
valve_point1 = [ 0 , 0.6 , 0]
valveTransform1 = transformUtils.frameFromPositionAndRPY(valve_point1, [0,0,0])
valveTransform1.Concatenate(t)
point1 = np.array(valveTransform1.GetPosition()) # left of wall
valve_point2 = [ 0 , -0.6 , 0]
valveTransform2 = transformUtils.frameFromPositionAndRPY(valve_point2, [0,0,0])
valveTransform2.Concatenate(t)
point2 = np.array(valveTransform2.GetPosition()) # left of wall
valve_point3 = [ 0 , 1.0 , 0] # lever can over hang
valveTransform3 = transformUtils.frameFromPositionAndRPY(valve_point3, [0,0,0])
valveTransform3.Concatenate(t)
point3 =valveTransform3.GetPosition() # right of wall
d = DebugData()
d.addSphere(point2, radius=0.01)
d.addSphere(point1, radius=0.03)
d.addSphere(point3, radius=0.01)
updatePolyData(d.getPolyData(), 'auto wall points', parent=getDebugFolder(), visible=False)
if (mode=='valve'):
segmentValveByWallPlane(expectedValveRadius, point1, point2)
elif (mode=='lever'):
segmentLeverByWallPlane(point1, point3)
elif (mode=='both'):
segmentValveByWallPlane(expectedValveRadius, point1, point2)
segmentLeverByWallPlane(point1, point3)
else:
raise Exception('unexpected segmentation mode: ' + mode)
def segmentLeverByWallPlane(point1, point2):
'''
determine the position (including rotation of a lever near a wall
input is as for the valve - to points on the wall either side of the lever
'''
# 1. determine the wall plane and normal
centerPoint = (point1 + point2) / 2.0
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=-viewDirection, returnOrigin=True)
# 2. Crop the cloud down to the lever only using the wall plane
perpLine = np.cross(point2 - point1, -normal)
#perpLine /= np.linalg.norm(perpLine)
#perpLine * np.linalg.norm(point2 - point1)/2.0
point3, point4 = centerPoint + perpLine/2.0, centerPoint - perpLine/2.0
d = DebugData()
d.addLine(point1, point2)
d.addLine(point3, point4)
updatePolyData(d.getPolyData(), 'lever crop lines', parent=getDebugFolder(), visible=False)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'lever valve wall', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.12, 0.2]) # very tight threshold
searchRegion = cropToLineSegment(searchRegion, point1, point2)
searchRegion = cropToLineSegment(searchRegion, point3, point4)
updatePolyData(searchRegion, 'lever search region', parent=getDebugFolder(), color=[1,0,0], visible=False)
# 3. fit line to remaining points - all assumed to be the lever
linePoint, lineDirection, _ = applyLineFit(searchRegion, distanceThreshold=0.02)
#if np.dot(lineDirection, forwardDirection) < 0:
# lineDirection = -lineDirection
d = DebugData()
d.addSphere(linePoint, radius=0.02)
updatePolyData(d.getPolyData(), 'lever point', parent=getDebugFolder(), visible=False)
pts = vtkNumpy.getNumpyFromVtk(searchRegion, 'Points')
dists = np.dot(pts-linePoint, lineDirection)
lever_center = linePoint + lineDirection*np.min(dists)
lever_tip = linePoint + lineDirection*np.max(dists)
# 4. determine which lever point is closest to the lower left of the wall. That's the lever_center point
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
# a distant point down and left from wall
wall_point_lower_left = [ -20 , -20.0 , 0]
wall_point_lower_left_Transform = transformUtils.frameFromPositionAndRPY(wall_point_lower_left, [0,0,0])
wall_point_lower_left_Transform.Concatenate(t)
wall_point_lower_left = wall_point_lower_left_Transform.GetPosition()
d1 = np.sqrt( np.sum((wall_point_lower_left- projectPointToPlane(lever_center, origin, normal) )**2) )
d2 = np.sqrt( np.sum((wall_point_lower_left- projectPointToPlane(lever_tip, origin, normal) )**2) )
if (d2 < d1): # flip the points to match variable names
p_temp = lever_center
lever_center = lever_tip
lever_tip = p_temp
lineDirection = -lineDirection
# 5. compute the rotation angle of the lever and, using that, its frame
zaxis = -normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(lever_center) # nominal frame at lever center
rotationAngle = -computeSignedAngleBetweenVectors(lineDirection, [0, 0, 1], -normal)
t_lever = transformUtils.frameFromPositionAndRPY( [0,0,0], [0,0, math.degrees( rotationAngle ) ] )
t_lever.PostMultiply()
t_lever.Concatenate(t)
d = DebugData()
# d.addSphere( point1 , radius=0.1)
d.addSphere( wall_point_lower_left , radius=0.1)
d.addSphere(lever_center, radius=0.04)
d.addSphere(lever_tip, radius=0.01)
d.addLine(lever_center, lever_tip)
updatePolyData(d.getPolyData(), 'lever end points', color=[0,1,0], parent=getDebugFolder(), visible=False)
radius = 0.01
length = np.sqrt( np.sum((lever_tip - lever_center )**2) )
d = DebugData()
d.addLine([0,0,0], [length, 0, 0], radius=radius)
d.addSphere ( [0, 0, 0], 0.02)
geometry = d.getPolyData()
obj = showPolyData(geometry, 'valve lever', cls=FrameAffordanceItem, parent='affordances' , color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t_lever)
obj.addToView(app.getDRCView())
frameObj = showFrame(t_lever, 'lever frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
otdfType = 'lever_valve'
params = dict(origin=np.array(t_lever.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, radius=radius, length=length, friendly_name=otdfType, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def applyICP(source, target):
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(source)
icp.SetTarget(target)
icp.GetLandmarkTransform().SetModeToRigidBody()
icp.Update()
t = vtk.vtkTransform()
t.SetMatrix(icp.GetMatrix())
return t
def applyDiskGlyphs(polyData):
voxelGridLeafSize = 0.03
normalEstimationSearchRadius = 0.05
diskRadius = 0.015
diskResolution = 12
scanInput = polyData
pd = applyVoxelGrid(scanInput, leafSize=voxelGridLeafSize)
pd = labelOutliers(pd, searchRadius=normalEstimationSearchRadius, neighborsInSearchRadius=3)
pd = thresholdPoints(pd, 'is_outlier', [0, 0])
pd = normalEstimation(pd, searchRadius=normalEstimationSearchRadius, searchCloud=scanInput)
disk = vtk.vtkDiskSource()
disk.SetOuterRadius(diskRadius)
disk.SetInnerRadius(0.0)
disk.SetRadialResolution(0)
disk.SetCircumferentialResolution(diskResolution)
disk.Update()
t = vtk.vtkTransform()
t.RotateY(90)
disk = transformPolyData(disk.GetOutput(), t)
glyph = vtk.vtkGlyph3D()
glyph.ScalingOff()
glyph.OrientOn()
glyph.SetSource(disk)
glyph.SetInput(pd)
glyph.SetVectorModeToUseNormal()
glyph.Update()
return shallowCopy(glyph.GetOutput())
def applyArrowGlyphs(polyData, computeNormals=True, voxelGridLeafSize=0.03, normalEstimationSearchRadius=0.05, arrowSize=0.02):
polyData = applyVoxelGrid(polyData, leafSize=0.02)
if computeNormals:
voxelData = applyVoxelGrid(polyData, leafSize=voxelGridLeafSize)
polyData = normalEstimation(polyData, searchRadius=normalEstimationSearchRadius, searchCloud=voxelData)
polyData = removeNonFinitePoints(polyData, 'normals')
flipNormalsWithViewDirection(polyData, SegmentationContext.getGlobalInstance().getViewDirection())
assert polyData.GetPointData().GetNormals()
arrow = vtk.vtkArrowSource()
arrow.Update()
glyph = vtk.vtkGlyph3D()
glyph.SetScaleFactor(arrowSize)
glyph.SetSource(arrow.GetOutput())
glyph.SetInput(polyData)
glyph.SetVectorModeToUseNormal()
glyph.Update()
return shallowCopy(glyph.GetOutput())
def segmentLeverValve(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
radius = 0.01
length = 0.33
normal = -normal # set z to face into wall
zaxis = normal
xaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point2)
leverP1 = point2
leverP2 = point2 + xaxis * length
d = DebugData()
d.addLine([0,0,0], [length, 0, 0], radius=radius)
d.addSphere ( [0, 0, 0], 0.02)
geometry = d.getPolyData()
obj = showPolyData(geometry, 'valve lever', cls=FrameAffordanceItem, parent='affordances', color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
frameObj = showFrame(t, 'lever frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
otdfType = 'lever_valve'
params = dict(origin=np.array(t.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, radius=radius, length=length, friendly_name=otdfType, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def segmentWye(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
wyeMesh = ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/wye.obj'))
wyeMeshPoint = np.array([0.0, 0.0, 0.005])
wyeMeshLeftHandle = np.array([0.032292, 0.02949, 0.068485])
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-wyeMeshPoint)
t.PostMultiply()
t.Translate(point2)
d = DebugData()
d.addSphere(point2, radius=0.005)
updatePolyData(d.getPolyData(), 'wye pick point', parent=getDebugFolder(), visible=False)
wyeObj = showPolyData(wyeMesh, 'wye', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
wyeObj.actor.SetUserTransform(t)
wyeObj.addToView(app.getDRCView())
frameObj = showFrame(t, 'wye frame', parent=wyeObj, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=np.array(t.GetPosition()), xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, friendly_name='wye', otdf_type='wye')
wyeObj.setAffordanceParams(params)
wyeObj.updateParamsFromActorTransform()
def segmentDoorHandle(otdfType, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
handlePoint = np.array([0.005, 0.065, 0.011])
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
xwidth = 0.01
ywidth = 0.13
zwidth = 0.022
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
#t.PreMultiply()
#t.Translate(-handlePoint)
t.PostMultiply()
t.Translate(point2)
name = 'door handle'
obj = showPolyData(cube, name, cls=FrameAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, friendly_name=name, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
def segmentTruss(point1, point2):
edge = point2 - point1
edgeLength = np.linalg.norm(edge)
stanceOffset = [-0.42, 0.0, 0.0]
stanceYaw = 0.0
d = DebugData()
p1 = [0.0, 0.0, 0.0]
p2 = -np.array([0.0, -1.0, 0.0]) * edgeLength
d.addSphere(p1, radius=0.02)
d.addSphere(p2, radius=0.02)
d.addLine(p1, p2)
stanceTransform = vtk.vtkTransform()
stanceTransform.PostMultiply()
stanceTransform.Translate(stanceOffset)
#stanceTransform.RotateZ(stanceYaw)
geometry = transformPolyData(d.getPolyData(), stanceTransform.GetLinearInverse())
yaxis = edge/edgeLength
zaxis = [0.0, 0.0, 1.0]
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xwidth = 0.1
ywidth = edgeLength
zwidth = 0.1
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Concatenate(stanceTransform)
t.PostMultiply()
t.Translate(point1)
name = 'truss'
otdfType = 'robot_knees'
obj = showPolyData(geometry, name, cls=FrameAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=t.GetPosition(), xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, friendly_name=name, otdf_type=otdfType)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
frameObj = showFrame(obj.actor.GetUserTransform(), name + ' frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
def segmentHoseNozzle(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
searchRegion = cropToSphere(polyData, point1, 0.10)
updatePolyData(searchRegion, 'nozzle search region', parent=getDebugFolder(), visible=False)
xaxis = [1,0,0]
yaxis = [0,-1,0]
zaxis = [0,0,-1]
origin = point1
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(point1)
nozzleRadius = 0.0266
nozzleLength = 0.042
nozzleTipRadius = 0.031
nozzleTipLength = 0.024
d = DebugData()
d.addLine(np.array([0,0,-nozzleLength/2.0]), np.array([0,0,nozzleLength/2.0]), radius=nozzleRadius)
d.addLine(np.array([0,0,nozzleLength/2.0]), np.array([0,0,nozzleLength/2.0 + nozzleTipLength]), radius=nozzleTipRadius)
obj = showPolyData(d.getPolyData(), 'hose nozzle', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
frameObj = showFrame(t, 'nozzle frame', parent=obj, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1, friendly_name='firehose', otdf_type='firehose')
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def segmentDrillWall(point1, point2, point3):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
points = [point1, point2, point3]
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
expectedNormal = np.cross(point2 - point1, point3 - point1)
expectedNormal /= np.linalg.norm(expectedNormal)
if np.dot(expectedNormal, viewPlaneNormal) < 0:
expectedNormal *= -1.0
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, searchOrigin=(point1 + point2 + point3)/3.0, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
points = [projectPointToPlane(point, origin, normal) for point in points]
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(points[0])
d = DebugData()
pointsInWallFrame = []
for p in points:
pp = np.zeros(3)
t.GetLinearInverse().TransformPoint(p, pp)
pointsInWallFrame.append(pp)
d.addSphere(pp, radius=0.02)
for a, b in zip(pointsInWallFrame, pointsInWallFrame[1:] + [pointsInWallFrame[0]]):
d.addLine(a, b, radius=0.015)
aff = showPolyData(d.getPolyData(), 'drill target', cls=FrameAffordanceItem, color=[0,1,0], visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill target frame', parent=aff, visible=False)
refitWallCallbacks.append(functools.partial(refitDrillWall, aff))
params = dict(origin=points[0], xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
p1y=pointsInWallFrame[0][1], p1z=pointsInWallFrame[0][2],
p2y=pointsInWallFrame[1][1], p2z=pointsInWallFrame[1][2],
p3y=pointsInWallFrame[2][1], p3z=pointsInWallFrame[2][2],
friendly_name='drill_wall', otdf_type='drill_wall')
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
refitWallCallbacks = []
def refitWall(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
wallPoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(wallPoints, 'wall points', parent=getDebugFolder(), visible=False)
for func in refitWallCallbacks:
func(point1, origin, normal)
def refitDrillWall(aff, point1, origin, normal):
t = aff.actor.GetUserTransform()
targetOrigin = np.array(t.GetPosition())
projectedOrigin = projectPointToPlane(targetOrigin, origin, normal)
projectedOrigin[2] = targetOrigin[2]
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(projectedOrigin)
aff.actor.GetUserTransform().SetMatrix(t.GetMatrix())
# this should be depreciated!
def getGroundHeightFromFeet():
rfoot = getLinkFrame( drcargs.getDirectorConfig()['rightFootLink'] )
return np.array(rfoot.GetPosition())[2] - 0.0745342
# this should be depreciated!
def getTranslationRelativeToFoot(t):
rfoot = getLinkFrame( drcargs.getDirectorConfig()['rightFootLink'] )
def segmentDrillWallConstrained(rightAngleLocation, point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
expectedNormal = np.cross(point2 - point1, [0.0, 0.0, 1.0])
expectedNormal /= np.linalg.norm(expectedNormal)
if np.dot(expectedNormal, viewPlaneNormal) < 0:
expectedNormal *= -1.0
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
triangleOrigin = projectPointToPlane(point2, origin, normal)
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(triangleOrigin)
createDrillWall(rightAngleLocation, t)
def createDrillWall(rightAngleLocation, trianglePose):
# recover the origin and axes from the pose:
triangleOrigin = trianglePose.GetPosition()
xaxis, yaxis, zaxis = transformUtils.getAxesFromTransform( trianglePose )
# 0.6096 = 24 * .0254 (m = feet)
# 0.3048 = 12 * .0254 (m = feet)
edgeRight = np.array([0.0, -1.0, 0.0]) * (0.6)
edgeUp = np.array([0.0, 0.0, 1.0]) * (0.3)
pointsInWallFrame = np.zeros((3,3))
if rightAngleLocation == DRILL_TRIANGLE_BOTTOM_LEFT:
pointsInWallFrame[1] = edgeUp
pointsInWallFrame[2] = edgeRight
elif rightAngleLocation == DRILL_TRIANGLE_BOTTOM_RIGHT:
pointsInWallFrame[1] = edgeUp # edgeRight +edgeUp
pointsInWallFrame[2] = -edgeRight # edgeRight
elif rightAngleLocation == DRILL_TRIANGLE_TOP_LEFT:
pointsInWallFrame[1] = edgeRight
pointsInWallFrame[2] = -edgeUp
elif rightAngleLocation == DRILL_TRIANGLE_TOP_RIGHT:
pointsInWallFrame[1] = edgeRight
pointsInWallFrame[2] = edgeRight - edgeUp
else:
raise Exception('unexpected value for right angle location: ', + rightAngleLocation)
center = pointsInWallFrame.sum(axis=0)/3.0
shrinkFactor = 1#0.90
shrinkPoints = (pointsInWallFrame - center) * shrinkFactor + center
d = DebugData()
for p in pointsInWallFrame:
d.addSphere(p, radius=0.015)
for a, b in zip(pointsInWallFrame, np.vstack((pointsInWallFrame[1:], pointsInWallFrame[0]))):
d.addLine(a, b, radius=0.005)#01)
for a, b in zip(shrinkPoints, np.vstack((shrinkPoints[1:], shrinkPoints[0]))):
d.addLine(a, b, radius=0.005)#0.025
folder = om.getOrCreateContainer('affordances')
wall = om.findObjectByName('wall')
om.removeFromObjectModel(wall)
aff = showPolyData(d.getPolyData(), 'wall', cls=FrameAffordanceItem, color=[0,1,0], visible=True, parent=folder)
aff.actor.SetUserTransform(trianglePose)
aff.addToView(app.getDRCView())
refitWallCallbacks.append(functools.partial(refitDrillWall, aff))
frameObj = showFrame(trianglePose, 'wall frame', parent=aff, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
params = dict(origin=triangleOrigin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
p1y=shrinkPoints[0][1], p1z=shrinkPoints[0][2],
p2y=shrinkPoints[1][1], p2z=shrinkPoints[1][2],
p3y=shrinkPoints[2][1], p3z=shrinkPoints[2][2],
friendly_name='drill_wall', otdf_type='drill_wall')
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
'''
rfoot = getLinkFrame(drcargs.getDirectorConfig()['rightFootLink'])
tt = getTransformFromAxes(xaxis, yaxis, zaxis)
tt.PostMultiply()
tt.Translate(rfoot.GetPosition())
showFrame(tt, 'rfoot with wall orientation')
aff.footToAffTransform = computeAToB(tt, trianglePose)
footToAff = list(aff.footToAffTransform.GetPosition())
tt.TransformVector(footToAff, footToAff)
d = DebugData()
d.addSphere(tt.GetPosition(), radius=0.02)
d.addLine(tt.GetPosition(), np.array(tt.GetPosition()) + np.array(footToAff))
showPolyData(d.getPolyData(), 'rfoot debug')
'''
def getDrillAffordanceParams(origin, xaxis, yaxis, zaxis, drillType="dewalt_button"):
if (drillType=="dewalt_button"):
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
button_x=0.007,
button_y=-0.035,
button_z=-0.06,
button_roll=-90.0,
button_pitch=-90.0,
button_yaw=0.0,
bit_x=-0.01,
bit_y=0.0,
bit_z=0.15,
bit_roll=0,
bit_pitch=-90,
bit_yaw=0,
friendly_name='dewalt_button', otdf_type='dewalt_button')
else:
params = dict(origin=origin, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis, xwidth=0.1, ywidth=0.1, zwidth=0.1,
button_x=0.007,
button_y=-0.035,
button_z=-0.06,
button_roll=0.0,
button_pitch=0.0,
button_yaw=0.0,
bit_x=0.18,
bit_y=0.0,
bit_z=0.13,
bit_roll=0,
bit_pitch=0,
bit_yaw=0,
friendly_name='dewalt_barrel', otdf_type='dewalt_barrel')
return params
def getDrillMesh(applyBitOffset=False):
button = np.array([0.007, -0.035, -0.06])
drillMesh = ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/dewalt_button.obj'))
if applyBitOffset:
t = vtk.vtkTransform()
t.Translate(0.01, 0.0, 0.0)
drillMesh = transformPolyData(drillMesh, t)
d = DebugData()
d.addPolyData(drillMesh)
d.addSphere(button, radius=0.005, color=[0,1,0])
d.addLine([0.0,0.0,0.155], [0.0, 0.0, 0.14], radius=0.001, color=[0,1,0])
return shallowCopy(d.getPolyData())
def getDrillBarrelMesh():
return ioUtils.readPolyData(os.path.join(app.getDRCBase(), 'software/models/otdf/dewalt.ply'), computeNormals=True)
def segmentDrill(point1, point2, point3):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=viewPlaneNormal, searchOrigin=point1, searchRadius=0.2, angleEpsilon=0.7, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.03, 0.4])
searchRegion = cropToSphere(searchRegion, point2, 0.30)
drillPoints = extractLargestCluster(searchRegion)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = point3 - point2
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(point2)
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(origin, xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def makePolyDataFields(pd):
mesh = computeDelaunay3D(pd)
if not mesh.GetNumberOfPoints():
return None
origin, edges, wireframe = getOrientedBoundingBox(mesh)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
boxCenter = computeCentroid(wireframe)
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(boxCenter)
pd = transformPolyData(pd, t.GetLinearInverse())
wireframe = transformPolyData(wireframe, t.GetLinearInverse())
mesh = transformPolyData(mesh, t.GetLinearInverse())
return FieldContainer(points=pd, box=wireframe, mesh=mesh, frame=t, dims=edgeLengths, axes=axes)
def makeMovable(obj, initialTransform=None):
'''
Adds a child frame to the given PolyDataItem. If initialTransform is not
given, then an origin frame is computed for the polydata using the
center and orientation of the oriented bounding of the polydata. The polydata
is transformed using the inverse of initialTransform and then a child frame
is assigned to the object to reposition it.
'''
pd = obj.polyData
t = initialTransform
if t is None:
origin, edges, wireframe = getOrientedBoundingBox(pd)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
boxCenter = computeCentroid(wireframe)
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(boxCenter)
pd = transformPolyData(pd, t.GetLinearInverse())
obj.setPolyData(pd)
frame = obj.getChildFrame()
if frame:
frame.copyFrame(t)
else:
frame = vis.showFrame(t, obj.getProperty('Name') + ' frame', parent=obj, scale=0.2, visible=False)
obj.actor.SetUserTransform(t)
def segmentTable(polyData, searchPoint):
'''
Segment a horizontal table surface (perpendicular to +Z) in the given polyData
using the given search point.
Returns polyData, tablePoints, origin, normal
polyData is the input polyData with a new 'dist_to_plane' attribute.
'''
expectedNormal = np.array([0.0, 0.0, 1.0])
tableNormalEpsilon = 0.4
polyData = applyVoxelGrid(polyData, leafSize=0.01)
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, searchOrigin=searchPoint, searchRadius=0.3, angleEpsilon=tableNormalEpsilon, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
tablePoints = labelDistanceToPoint(tablePoints, searchPoint)
tablePointsClusters = extractClusters(tablePoints, minClusterSize=10, clusterTolerance=0.1)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
return polyData, tablePoints, origin, normal
def filterClusterObjects(clusters):
result = []
for cluster in clusters:
if np.abs(np.dot(cluster.axes[0], [0,0,1])) < 0.5:
continue
if cluster.dims[0] < 0.1:
continue
result.append(cluster)
return result
def segmentTableThenFindDrills(polyData,pickedPoint):
''' Given a point cloud of a table with drills on it.
Find all clusters and fit drills
Assumes that all clusters are of drills
Nothing else is ever on a table ;)
'''
# 1 segment a table and return clusters and the plane normal
clusters, tablePoints, plane_origin, plane_normal = segmentTableSceneClusters(polyData, pickedPoint, True)
# 2 Detect drills within the clusters:
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
forwardDirection = np.array([1.0, 0.0, 0.0])
viewFrame.TransformVector(forwardDirection, forwardDirection)
robotForward =forwardDirection
fitResults=[]
for clusterObj in clusters:
# vis.showPolyData(clusterObj, 'cluster debug')
drillFrame = fitDrillBarrel (clusterObj, robotForward, plane_origin, plane_normal)
if drillFrame is not None:
fitResults.append((clusterObj, drillFrame))
if not fitResults:
return
for i, fitResult in enumerate(fitResults):
cluster, drillFrame = fitResult
drillOrigin = np.array(drillFrame.GetPosition())
drillMesh = getDrillBarrelMesh()
#drill = om.findObjectByName('drill')
name= 'drill %d' % i
name2= 'drill %d frame' % i
drill = showPolyData(drillMesh, name, cls=FrameAffordanceItem, color=[0, 1, 0], visible=True)
drillFrame = updateFrame(drillFrame, name2, parent=drill, scale=0.2, visible=False)
drill.actor.SetUserTransform(drillFrame.transform)
drill.setSolidColor([0, 1, 0])
#cluster.setProperty('Visible', True)
def segmentTableScene(polyData, searchPoint, filterClustering = True):
''' This seems to be unused, depreciated? '''
objectClusters, tablePoints, _, _ = segmentTableSceneClusters(polyData, searchPoint)
clusters = [makePolyDataFields(cluster) for cluster in objectClusters]
clusters = [cluster for cluster in clusters if cluster is not None]
if (filterClustering):
clusters = filterClusterObjects(clusters)
return FieldContainer(table=makePolyDataFields(tablePoints), clusters=clusters)
def segmentTableSceneClusters(polyData, searchPoint, clusterInXY=False):
''' Given a point cloud of a table with some objects on it
and a point on that table
determine the plane of the table and
extract clusters above the table
'''
polyData, tablePoints, plane_origin, plane_normal = segmentTable(polyData, searchPoint)
tableCentroid = computeCentroid(tablePoints)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.02, 0.5])
# TODO: replace with 'all points above the table':
searchRegion = cropToSphere(searchRegion, tableCentroid, 0.5) # was 1.0
tableCentroidFrame = transformUtils.frameFromPositionAndRPY(tableCentroid, [0,0,0])
showFrame(tableCentroidFrame, 'tableCentroid', visible=False, parent=getDebugFolder(), scale=0.15)
showPolyData(searchRegion, 'searchRegion', color=[1,0,0], visible=False, parent=getDebugFolder())
objectClusters = extractClusters(searchRegion, clusterInXY, clusterTolerance=0.02, minClusterSize=10)
#print 'got %d clusters' % len(objectClusters)
for i,c in enumerate(objectClusters):
name= "cluster %d" % i
showPolyData(c, name, color=getRandomColor(), visible=False, parent=getDebugFolder())
return objectClusters, tablePoints, plane_origin, plane_normal
def segmentTableEdge(polyData, searchPoint, edgePoint):
'''
segment a table using two points:
searchPoint is a point on the table top
edgePoint is a point on the edge facing the robot
'''
polyData, tablePoints, origin, normal = segmentTable(polyData, searchPoint)
tableMesh = computeDelaunay3D(tablePoints)
origin, edges, wireframe = getOrientedBoundingBox(tableMesh)
origin = origin + 0.5*np.sum(edges, axis=0)
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
def findAxis(referenceVector):
refAxis = referenceVector / np.linalg.norm(referenceVector)
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
axisIndex = axisProjections.argmax()
axis = axes[axisIndex]
if np.dot(axis, refAxis) < 0:
axis = -axis
return axis, axisIndex
tableXAxis, tableXAxisIndex = findAxis(searchPoint - edgePoint)
tableZAxis, tableZAxisIndex = findAxis([0,0,1])
tableYAxis, tableYAxisIndex = findAxis(np.cross(tableZAxis, tableXAxis))
assert len(set([tableXAxisIndex, tableYAxisIndex, tableZAxisIndex])) == 3
axes = tableXAxis, tableYAxis, tableZAxis
edgeLengths = edgeLengths[tableXAxisIndex], edgeLengths[tableYAxisIndex], edgeLengths[tableZAxisIndex]
edgeCenter = origin - 0.5 * axes[0]*edgeLengths[0] + 0.5*axes[2]*edgeLengths[2]
edgeLeft = edgeCenter + 0.5 * axes[1]*edgeLengths[1]
edgeRight = edgeCenter - 0.5 * axes[1]*edgeLengths[1]
t = getTransformFromAxes(axes[0], axes[1], axes[2])
t.PostMultiply()
t.Translate(edgeRight)
table_center = [edgeLengths[0]/2, edgeLengths[1]/2, -edgeLengths[2]/2]
t.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY(table_center,[0,0,0])
t.Concatenate(t3)
tablePoints = transformPolyData(tablePoints, t.GetLinearInverse())
wireframe = transformPolyData(wireframe, t.GetLinearInverse())
tableMesh = transformPolyData(tableMesh, t.GetLinearInverse())
return FieldContainer(points=tablePoints, box=wireframe, mesh=tableMesh, frame=t, dims=edgeLengths, axes=axes)
def segmentDrillAuto(point1, polyData=None):
if polyData is None:
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
expectedNormal = np.array([0.0, 0.0, 1.0])
polyData, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, searchOrigin=point1, searchRadius=0.4, angleEpsilon=0.2, returnOrigin=True)
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
tablePoints = labelDistanceToPoint(tablePoints, point1)
tablePointsClusters = extractClusters(tablePoints)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.03, 0.4])
searchRegion = cropToSphere(searchRegion, point1, 0.30)
drillPoints = extractLargestCluster(searchRegion, minClusterSize=1)
# determine drill orientation (rotation about z axis)
centroids = computeCentroids(drillPoints, axis=normal)
centroidsPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(centroids)
d = DebugData()
updatePolyData(centroidsPolyData, 'cluster centroids', parent=getDebugFolder(), visible=False)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = centroids[0] - centroids[-1]
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
# note this hack to orient the drill correctly:
t = getTransformFromAxes(yaxis, -xaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(centroids[-1])
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False, scale=0.2).addToView(app.getDRCView())
params = getDrillAffordanceParams(origin, xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def segmentDrillButton(point1):
d = DebugData()
d.addSphere([0,0,0], radius=0.005)
obj = updatePolyData(d.getPolyData(), 'sensed drill button', color=[0,0.5,0.5], visible=True)
# there is no orientation, but this allows the XYZ point to be queried
pointerTipFrame = transformUtils.frameFromPositionAndRPY(point1, [0,0,0])
obj.actor.SetUserTransform(pointerTipFrame)
obj.addToView(app.getDRCView())
frameObj = updateFrame(obj.actor.GetUserTransform(), 'sensed drill button frame', parent=obj, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
def segmentPointerTip(point1):
d = DebugData()
d.addSphere([0,0,0], radius=0.005)
obj = updatePolyData(d.getPolyData(), 'sensed pointer tip', color=[0.5,0.5,0.0], visible=True)
# there is no orientation, but this allows the XYZ point to be queried
pointerTipFrame = transformUtils.frameFromPositionAndRPY(point1, [0,0,0])
obj.actor.SetUserTransform(pointerTipFrame)
obj.addToView(app.getDRCView())
frameObj = updateFrame(obj.actor.GetUserTransform(), 'sensed pointer tip frame', parent=obj, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
def fitGroundObject(polyData=None, expectedDimensionsMin=[0.2, 0.02], expectedDimensionsMax=[1.3, 0.1]):
removeGroundFunc = removeGroundSimple
polyData = polyData or getCurrentRevolutionData()
groundPoints, scenePoints = removeGroundFunc(polyData, groundThickness=0.02, sceneHeightFromGround=0.035)
searchRegion = thresholdPoints(scenePoints, 'dist_to_plane', [0.05, 0.2])
clusters = extractClusters(searchRegion, clusterTolerance=0.07, minClusterSize=4)
candidates = []
for clusterId, cluster in enumerate(clusters):
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges[:2]]
found = (expectedDimensionsMin[0] <= edgeLengths[0] < expectedDimensionsMax[0]
and expectedDimensionsMin[1] <= edgeLengths[1] < expectedDimensionsMax[1])
if not found:
updatePolyData(cluster, 'candidate cluster %d' % clusterId, color=[1,1,0], parent=getDebugFolder(), visible=False)
continue
updatePolyData(cluster, 'cluster %d' % clusterId, color=[0,1,0], parent=getDebugFolder(), visible=False)
candidates.append(cluster)
if not candidates:
return None
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
viewOrigin = np.array(viewFrame.GetPosition())
dists = [np.linalg.norm(viewOrigin - computeCentroid(cluster)) for cluster in candidates]
candidates = [candidates[i] for i in np.argsort(dists)]
cluster = candidates[0]
obj = makePolyDataFields(cluster)
return vis.showClusterObjects([obj], parent='segmentation')[0]
def findHorizontalSurfaces(polyData, removeGroundFirst=False, normalEstimationSearchRadius=0.05,
clusterTolerance=0.025, distanceToPlaneThreshold=0.0025, normalsDotUpRange=[0.95, 1.0], showClusters=False):
'''
Find the horizontal surfaces, tuned to work with walking terrain
'''
searchZ = [0.0, 2.0]
voxelGridLeafSize = 0.01
minClusterSize = 150
verboseFlag = False
if (removeGroundFirst):
groundPoints, scenePoints = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.05)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', searchZ)
updatePolyData(groundPoints, 'ground points', parent=getDebugFolder(), visible=verboseFlag)
else:
scenePoints = polyData
if not scenePoints.GetNumberOfPoints():
return
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(scenePoints)
f.SetInput(1, applyVoxelGrid(scenePoints, voxelGridLeafSize))
# Duration 0.2 sec for V1 log:
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotUp = np.abs(np.dot(normals, [0,0,1]))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotUp, 'normals_dot_up')
surfaces = thresholdPoints(scenePoints, 'normals_dot_up', normalsDotUpRange)
updatePolyData(scenePoints, 'scene points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=verboseFlag)
updatePolyData(surfaces, 'surfaces points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=verboseFlag)
clusters = extractClusters(surfaces, clusterTolerance=clusterTolerance, minClusterSize=minClusterSize)
planeClusters = []
clustersLarge = []
om.removeFromObjectModel(om.findObjectByName('surface clusters'))
folder = om.getOrCreateContainer('surface clusters', parentObj=getDebugFolder())
for i, cluster in enumerate(clusters):
updatePolyData(cluster, 'surface cluster %d' % i, parent=folder, color=getRandomColor(), visible=verboseFlag)
planePoints, _ = applyPlaneFit(cluster, distanceToPlaneThreshold)
planePoints = thresholdPoints(planePoints, 'dist_to_plane', [-distanceToPlaneThreshold, distanceToPlaneThreshold])
if planePoints.GetNumberOfPoints() > minClusterSize:
clustersLarge.append(cluster)
obj = makePolyDataFields(planePoints)
if obj is not None:
planeClusters.append(obj)
folder = om.getOrCreateContainer('surface objects', parentObj=getDebugFolder())
if showClusters:
vis.showClusterObjects(planeClusters, parent=folder)
return clustersLarge
def fitVerticalPosts(polyData):
groundPoints, scenePoints = removeGround(polyData)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', [0.1, 4.0])
if not scenePoints.GetNumberOfPoints():
return
scenePoints = applyVoxelGrid(scenePoints, leafSize=0.03)
clusters = extractClusters(scenePoints, clusterTolerance=0.15, minClusterSize=10)
def isPostCluster(cluster, lineDirection):
up = [0,0,1]
minPostLength = 1.0
maxRadius = 0.3
angle = math.degrees(math.acos(np.dot(up,lineDirection) / (np.linalg.norm(up) * np.linalg.norm(lineDirection))))
if angle > 15:
return False
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges]
if edgeLengths[0] < minPostLength:
return False
# extract top half
zvalues = vtkNumpy.getNumpyFromVtk(cluster, 'Points')[:,2].copy()
vtkNumpy.addNumpyToVtk(cluster, zvalues, 'z')
minZ = np.min(zvalues)
maxZ = np.max(zvalues)
cluster = thresholdPoints(cluster, 'z', [(minZ + maxZ)/2.0, maxZ])
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges]
if edgeLengths[1] > maxRadius or edgeLengths[2] > maxRadius:
return False
return True
def makeCylinderAffordance(linePoints, lineDirection, lineOrigin, postId):
pts = vtkNumpy.getNumpyFromVtk(linePoints, 'Points')
dists = np.dot(pts-lineOrigin, lineDirection)
p1 = lineOrigin + lineDirection*np.min(dists)
p2 = lineOrigin + lineDirection*np.max(dists)
origin = (p1+p2)/2.0
lineLength = np.linalg.norm(p2-p1)
t = transformUtils.getTransformFromOriginAndNormal(origin, lineDirection)
pose = transformUtils.poseFromTransform(t)
desc = dict(classname='CylinderAffordanceItem', Name='post %d' % postId,
uuid=newUUID(), pose=pose, Radius=0.05, Length=float(lineLength), Color=[0.0, 1.0, 0.0])
desc['Collision Enabled'] = True
return affordanceManager.newAffordanceFromDescription(desc)
rejectFolder = om.getOrCreateContainer('nonpost clusters', parentObj=getDebugFolder())
keepFolder = om.getOrCreateContainer('post clusters', parentObj=getDebugFolder())
for i, cluster in enumerate(clusters):
linePoint, lineDirection, linePoints = applyLineFit(cluster, distanceThreshold=0.1)
if isPostCluster(cluster, lineDirection):
vis.showPolyData(cluster, 'cluster %d' % i, visible=False, color=getRandomColor(), alpha=0.5, parent=keepFolder)
makeCylinderAffordance(linePoints, lineDirection, linePoint, i)
else:
vis.showPolyData(cluster, 'cluster %d' % i, visible=False, color=getRandomColor(), alpha=0.5, parent=rejectFolder)
def findAndFitDrillBarrel(polyData=None):
''' Find the horizontal surfaces
on the horizontal surfaces, find all the drills
'''
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = polyData or inputObj.polyData
groundPoints, scenePoints = removeGround(polyData, groundThickness=0.02, sceneHeightFromGround=0.50)
scenePoints = thresholdPoints(scenePoints, 'dist_to_plane', [0.5, 1.7])
if not scenePoints.GetNumberOfPoints():
return
normalEstimationSearchRadius = 0.10
f = vtk.vtkPCLNormalEstimation()
f.SetSearchRadius(normalEstimationSearchRadius)
f.SetInput(scenePoints)
f.Update()
scenePoints = shallowCopy(f.GetOutput())
normals = vtkNumpy.getNumpyFromVtk(scenePoints, 'normals')
normalsDotUp = np.abs(np.dot(normals, [0,0,1]))
vtkNumpy.addNumpyToVtk(scenePoints, normalsDotUp, 'normals_dot_up')
surfaces = thresholdPoints(scenePoints, 'normals_dot_up', [0.95, 1.0])
updatePolyData(groundPoints, 'ground points', parent=getDebugFolder(), visible=False)
updatePolyData(scenePoints, 'scene points', parent=getDebugFolder(), colorByName='normals_dot_up', visible=False)
updatePolyData(surfaces, 'surfaces', parent=getDebugFolder(), visible=False)
clusters = extractClusters(surfaces, clusterTolerance=0.15, minClusterSize=50)
fitResults = []
viewFrame = SegmentationContext.getGlobalInstance().getViewFrame()
forwardDirection = np.array([1.0, 0.0, 0.0])
viewFrame.TransformVector(forwardDirection, forwardDirection)
robotOrigin = viewFrame.GetPosition()
robotForward =forwardDirection
#print 'robot origin:', robotOrigin
#print 'robot forward:', robotForward
centroid =[]
for clusterId, cluster in enumerate(clusters):
clusterObj = updatePolyData(cluster, 'surface cluster %d' % clusterId, color=[1,1,0], parent=getDebugFolder(), visible=False)
origin, edges, _ = getOrientedBoundingBox(cluster)
edgeLengths = [np.linalg.norm(edge) for edge in edges[:2]]
skipCluster = False
for edgeLength in edgeLengths:
#print 'cluster %d edge length: %f' % (clusterId, edgeLength)
if edgeLength < 0.35 or edgeLength > 0.75:
skipCluster = True
if skipCluster:
continue
clusterObj.setSolidColor([0, 0, 1])
centroid = np.average(vtkNumpy.getNumpyFromVtk(cluster, 'Points'), axis=0)
try:
drillFrame = segmentDrillBarrelFrame(centroid, polyData=scenePoints, forwardDirection=robotForward)
if drillFrame is not None:
fitResults.append((clusterObj, drillFrame))
except:
print traceback.format_exc()
print 'fit drill failed for cluster:', clusterId
if not fitResults:
return
sortFittedDrills(fitResults, robotOrigin, robotForward)
return centroid
def sortFittedDrills(fitResults, robotOrigin, robotForward):
angleToFitResults = []
for fitResult in fitResults:
cluster, drillFrame = fitResult
drillOrigin = np.array(drillFrame.GetPosition())
angleToDrill = np.abs(computeSignedAngleBetweenVectors(robotForward, drillOrigin - robotOrigin, [0,0,1]))
angleToFitResults.append((angleToDrill, cluster, drillFrame))
#print 'angle to candidate drill:', angleToDrill
angleToFitResults.sort(key=lambda x: x[0])
#print 'using drill at angle:', angleToFitResults[0][0]
drillMesh = getDrillBarrelMesh()
for i, fitResult in enumerate(angleToFitResults):
angleToDrill, cluster, drillFrame = fitResult
if i == 0:
drill = om.findObjectByName('drill')
drill = updatePolyData(drillMesh, 'drill', color=[0, 1, 0], cls=FrameAffordanceItem, visible=True)
drillFrame = updateFrame(drillFrame, 'drill frame', parent=drill, visible=False)
drill.actor.SetUserTransform(drillFrame.transform)
drill.setAffordanceParams(dict(otdf_type='dewalt_button', friendly_name='dewalt_button'))
drill.updateParamsFromActorTransform()
drill.setSolidColor([0, 1, 0])
#cluster.setProperty('Visible', True)
else:
drill = showPolyData(drillMesh, 'drill candidate', color=[1,0,0], visible=False, parent=getDebugFolder())
drill.actor.SetUserTransform(drillFrame)
om.addToObjectModel(drill, parentObj=getDebugFolder())
def computeSignedAngleBetweenVectors(v1, v2, perpendicularVector):
'''
Computes the signed angle between two vectors in 3d, given a perpendicular vector
to determine sign. Result returned is radians.
'''
v1 = np.array(v1)
v2 = np.array(v2)
perpendicularVector = np.array(perpendicularVector)
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
perpendicularVector /= np.linalg.norm(perpendicularVector)
return math.atan2(np.dot(perpendicularVector, np.cross(v1, v2)), np.dot(v1, v2))
def segmentDrillBarrelFrame(point1, polyData, forwardDirection):
tableClusterSearchRadius = 0.4
drillClusterSearchRadius = 0.5 #0.3
expectedNormal = np.array([0.0, 0.0, 1.0])
if not polyData.GetNumberOfPoints():
return
polyData, plane_origin, plane_normal = applyPlaneFit(polyData, expectedNormal=expectedNormal,
perpendicularAxis=expectedNormal, searchOrigin=point1,
searchRadius=tableClusterSearchRadius, angleEpsilon=0.2, returnOrigin=True)
if not polyData.GetNumberOfPoints():
return
tablePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.01, 0.01])
updatePolyData(tablePoints, 'table plane points', parent=getDebugFolder(), visible=False)
tablePoints = labelDistanceToPoint(tablePoints, point1)
tablePointsClusters = extractClusters(tablePoints)
tablePointsClusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
if not tablePointsClusters:
return
tablePoints = tablePointsClusters[0]
updatePolyData(tablePoints, 'table points', parent=getDebugFolder(), visible=False)
searchRegion = thresholdPoints(polyData, 'dist_to_plane', [0.02, 0.3])
if not searchRegion.GetNumberOfPoints():
return
searchRegion = cropToSphere(searchRegion, point1, drillClusterSearchRadius)
#drillPoints = extractLargestCluster(searchRegion, minClusterSize=1)
t = fitDrillBarrel (searchRegion, forwardDirection, plane_origin, plane_normal)
return t
def segmentDrillBarrel(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
forwardDirection = -np.array(getCurrentView().camera().GetViewPlaneNormal())
t = segmentDrillBarrel(point1, polyData, forwardDirection)
assert t is not None
drillMesh = getDrillBarrelMesh()
aff = showPolyData(drillMesh, 'drill', visible=True)
aff.addToView(app.getDRCView())
aff.actor.SetUserTransform(t)
drillFrame = showFrame(t, 'drill frame', parent=aff, visible=False)
drillFrame.addToView(app.getDRCView())
return aff, drillFrame
def segmentDrillAlignedWithTable(point, polyData = None):
'''
Yet Another Drill Fitting Algorithm [tm]
This one fits the button drill assuming its on the table
and aligned with the table frame (because the button drill orientation is difficult to find)
Table must have long side facing robot
'''
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = polyData or inputObj.polyData
# segment the table and recover the precise up direction normal:
polyDataOut, tablePoints, origin, normal = segmentTable(polyData,point)
#print origin # this origin is bunk
#tableCentroid = computeCentroid(tablePoints)
# get the bounding box edges
OBBorigin, edges, _ = getOrientedBoundingBox(tablePoints)
#print "OBB out"
#print OBBorigin
#print edges
edgeLengths = np.array([np.linalg.norm(edge) for edge in edges])
axes = [edge / np.linalg.norm(edge) for edge in edges]
#print edgeLengths
#print axes
# check which direction the robot is facing and flip x-axis of table if necessary
viewDirection = SegmentationContext.getGlobalInstance().getViewDirection()
#print "main axes", axes[1]
#print "viewDirection", viewDirection
#dp = np.dot(axes[1], viewDirection)
#print dp
if np.dot(axes[1], viewDirection) < 0:
#print "flip the x-direction"
axes[1] = -axes[1]
# define the x-axis to be along the 2nd largest edge
xaxis = axes[1]
xaxis = np.array(xaxis)
zaxis = np.array( normal )
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
tableOrientation = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
#tableTransform = transformUtils.frameFromPositionAndRPY( tableCentroid , tableOrientation.GetOrientation() )
#updateFrame(tableTransform, 'table frame [z up, x away face]', parent="segmentation", visible=True).addToView(app.getDRCView())
data = segmentTableScene(polyData, point )
#vis.showClusterObjects(data.clusters + [data.table], parent='segmentation')
# crude use of the table frame to determine the frame of the drill on the table
#t2 = transformUtils.frameFromPositionAndRPY([0,0,0], [180, 0 , 90] )
#drillOrientationTransform = transformUtils.copyFrame( om.findObjectByName('object 1 frame').transform )
#drillOrientationTransform.PreMultiply()
#drillOrientationTransform.Concatenate(t2)
#vis.updateFrame(t, 'drillOrientationTransform',visible=True)
#table_xaxis, table_yaxis, table_zaxis = transformUtils.getAxesFromTransform( data.table.frame )
#drillOrientation = transformUtils.orientationFromAxes( table_yaxis, table_xaxis, -1*np.array( table_zaxis) )
drillTransform = transformUtils.frameFromPositionAndRPY( data.clusters[0].frame.GetPosition() , tableOrientation.GetOrientation() )
drillMesh = getDrillMesh()
drill = om.findObjectByName('drill')
om.removeFromObjectModel(drill)
aff = showPolyData(drillMesh, 'drill', color=[0.0, 1.0, 0.0], cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(drillTransform)
aff.addToView(app.getDRCView())
frameObj = updateFrame(drillTransform, 'drill frame', parent=aff, scale=0.2, visible=False)
frameObj.addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(drillTransform.GetPosition()), [1,0,0], [0,1,0], [0,0,1], drillType="dewalt_button")
aff.setAffordanceParams(params)
def segmentDrillInHand(p1, p2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
distanceToLineThreshold = 0.05
polyData = labelDistanceToLine(polyData, p1, p2)
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, distanceToLineThreshold])
lineSegment = p2 - p1
lineLength = np.linalg.norm(lineSegment)
cropped, polyData = cropToPlane(polyData, p1, lineSegment/lineLength, [-0.03, lineLength + 0.03])
updatePolyData(cropped, 'drill cluster', parent=getDebugFolder(), visible=False)
drillPoints = cropped
normal = lineSegment/lineLength
centroids = computeCentroids(drillPoints, axis=normal)
centroidsPolyData = vtkNumpy.getVtkPolyDataFromNumpyPoints(centroids)
d = DebugData()
updatePolyData(centroidsPolyData, 'cluster centroids', parent=getDebugFolder(), visible=False)
drillToTopPoint = np.array([-0.002904, -0.010029, 0.153182])
zaxis = normal
yaxis = centroids[0] - centroids[-1]
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PreMultiply()
t.Translate(-drillToTopPoint)
t.PostMultiply()
t.Translate(p2)
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(t.GetPosition()), xaxis, yaxis, zaxis)
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
def addDrillAffordance():
drillMesh = getDrillMesh()
aff = showPolyData(drillMesh, 'drill', cls=FrameAffordanceItem, visible=True)
t = vtk.vtkTransform()
t.PostMultiply()
aff.actor.SetUserTransform(t)
showFrame(t, 'drill frame', parent=aff, visible=False).addToView(app.getDRCView())
params = getDrillAffordanceParams(np.array(t.GetPosition()), [1,0,0], [0,1,0], [0,0,1])
aff.setAffordanceParams(params)
aff.updateParamsFromActorTransform()
aff.addToView(app.getDRCView())
return aff
def getLinkFrame(linkName):
robotStateModel = om.findObjectByName('robot state model')
robotStateModel = robotStateModel or getVisibleRobotModel()
assert robotStateModel
t = vtk.vtkTransform()
robotStateModel.model.getLinkToWorld(linkName, t)
return t
def getDrillInHandOffset(zRotation=0.0, zTranslation=0.0, xTranslation=0.0, yTranslation=0.0,flip=False):
drillOffset = vtk.vtkTransform()
drillOffset.PostMultiply()
if flip:
drillOffset.RotateY(180)
drillOffset.RotateZ(zRotation)
drillOffset.RotateY(-90)
#drillOffset.Translate(0, 0.09, zTranslation - 0.015)
#drillOffset.Translate(zTranslation - 0.015, 0.035 + xTranslation, 0.0)
drillOffset.Translate(zTranslation, xTranslation, 0.0 + yTranslation)
return drillOffset
def moveDrillToHand(drillOffset, hand='right'):
drill = om.findObjectByName('drill')
if not drill:
drill = addDrillAffordance()
assert hand in ('right', 'left')
drillTransform = drill.actor.GetUserTransform()
rightBaseLink = getLinkFrame('%s_hand_face' % hand[0])
drillTransform.PostMultiply()
drillTransform.Identity()
drillTransform.Concatenate(drillOffset)
drillTransform.Concatenate(rightBaseLink)
drill._renderAllViews()
class PointPicker(TimerCallback):
def __init__(self, numberOfPoints=3):
TimerCallback.__init__(self)
self.targetFps = 30
self.enabled = False
self.numberOfPoints = numberOfPoints
self.annotationObj = None
self.drawLines = True
self.clear()
def clear(self):
self.points = [None for i in xrange(self.numberOfPoints)]
self.hoverPos = None
self.annotationFunc = None
self.lastMovePos = [0, 0]
def onMouseMove(self, displayPoint, modifiers=None):
self.lastMovePos = displayPoint
def onMousePress(self, displayPoint, modifiers=None):
#print 'mouse press:', modifiers
#if not modifiers:
# return
for i in xrange(self.numberOfPoints):
if self.points[i] is None:
self.points[i] = self.hoverPos
break
if self.points[-1] is not None:
self.finish()
def finish(self):
self.enabled = False
om.removeFromObjectModel(self.annotationObj)
points = [p.copy() for p in self.points]
if self.annotationFunc is not None:
self.annotationFunc(*points)
removeViewPicker(self)
def handleRelease(self, displayPoint):
pass
def draw(self):
d = DebugData()
points = [p if p is not None else self.hoverPos for p in self.points]
# draw points
for p in points:
if p is not None:
d.addSphere(p, radius=0.01)
if self.drawLines:
# draw lines
for a, b in zip(points, points[1:]):
if b is not None:
d.addLine(a, b)
# connect end points
if points[-1] is not None:
d.addLine(points[0], points[-1])
self.annotationObj = updatePolyData(d.getPolyData(), 'annotation', parent=getDebugFolder())
self.annotationObj.setProperty('Color', QtGui.QColor(0, 255, 0))
self.annotationObj.actor.SetPickable(False)
def tick(self):
if not self.enabled:
return
if not om.findObjectByName('pointcloud snapshot'):
self.annotationFunc = None
self.finish()
return
self.hoverPos = pickPoint(self.lastMovePos, getSegmentationView(), obj='pointcloud snapshot')
self.draw()
class LineDraw(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.targetFps = 30
self.enabled = False
self.view = view
self.renderer = view.renderer()
self.line = vtk.vtkLeaderActor2D()
self.line.SetArrowPlacementToNone()
self.line.GetPositionCoordinate().SetCoordinateSystemToViewport()
self.line.GetPosition2Coordinate().SetCoordinateSystemToViewport()
self.line.GetProperty().SetLineWidth(4)
self.line.SetPosition(0,0)
self.line.SetPosition2(0,0)
self.clear()
def clear(self):
self.p1 = None
self.p2 = None
self.annotationFunc = None
self.lastMovePos = [0, 0]
self.renderer.RemoveActor2D(self.line)
def onMouseMove(self, displayPoint, modifiers=None):
self.lastMovePos = displayPoint
def onMousePress(self, displayPoint, modifiers=None):
if self.p1 is None:
self.p1 = list(self.lastMovePos)
if self.p1 is not None:
self.renderer.AddActor2D(self.line)
else:
self.p2 = self.lastMovePos
self.finish()
def finish(self):
self.enabled = False
self.renderer.RemoveActor2D(self.line)
if self.annotationFunc is not None:
self.annotationFunc(self.p1, self.p2)
def handleRelease(self, displayPoint):
pass
def tick(self):
if not self.enabled:
return
if self.p1:
self.line.SetPosition(self.p1)
self.line.SetPosition2(self.lastMovePos)
self.view.render()
viewPickers = []
def addViewPicker(picker):
global viewPickers
viewPickers.append(picker)
def removeViewPicker(picker):
global viewPickers
viewPickers.remove(picker)
def distanceToLine(x0, x1, x2):
numerator = np.sqrt(np.sum(np.cross((x0 - x1), (x0-x2))**2))
denom = np.linalg.norm(x2-x1)
return numerator / denom
def labelDistanceToLine(polyData, linePoint1, linePoint2, resultArrayName='distance_to_line'):
x0 = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
x1 = np.array(linePoint1)
x2 = np.array(linePoint2)
numerator = np.sqrt(np.sum(np.cross((x0 - x1), (x0-x2))**2, axis=1))
denom = np.linalg.norm(x2-x1)
dists = numerator / denom
polyData = shallowCopy(polyData)
vtkNumpy.addNumpyToVtk(polyData, dists, resultArrayName)
return polyData
def labelDistanceToPoint(polyData, point, resultArrayName='distance_to_point'):
assert polyData.GetNumberOfPoints()
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
points = points - point
dists = np.sqrt(np.sum(points**2, axis=1))
polyData = shallowCopy(polyData)
vtkNumpy.addNumpyToVtk(polyData, dists, resultArrayName)
return polyData
def getPlaneEquationFromPolyData(polyData, expectedNormal):
_, origin, normal = applyPlaneFit(polyData, expectedNormal=expectedNormal, returnOrigin=True)
return origin, normal, np.hstack((normal, [np.dot(origin, normal)]))
def computeEdge(polyData, edgeAxis, perpAxis, binWidth=0.03):
polyData = labelPointDistanceAlongAxis(polyData, edgeAxis, resultArrayName='dist_along_edge')
polyData = labelPointDistanceAlongAxis(polyData, perpAxis, resultArrayName='dist_perp_to_edge')
polyData, bins = binByScalar(polyData, 'dist_along_edge', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
distToEdge = vtkNumpy.getNumpyFromVtk(polyData, 'dist_perp_to_edge')
numberOfBins = len(bins) - 1
edgePoints = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
binDists = distToEdge[binLabels == i]
if len(binDists):
edgePoints.append(binPoints[binDists.argmax()])
return np.array(edgePoints)
def computeCentroids(polyData, axis, binWidth=0.025):
polyData = labelPointDistanceAlongAxis(polyData, axis, resultArrayName='dist_along_axis')
polyData, bins = binByScalar(polyData, 'dist_along_axis', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
numberOfBins = len(bins) - 1
centroids = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
if len(binPoints):
centroids.append(np.average(binPoints, axis=0))
return np.array(centroids)
def computePointCountsAlongAxis(polyData, axis, binWidth=0.025):
polyData = labelPointDistanceAlongAxis(polyData, axis, resultArrayName='dist_along_axis')
polyData, bins = binByScalar(polyData, 'dist_along_axis', binWidth)
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
binLabels = vtkNumpy.getNumpyFromVtk(polyData, 'bin_labels')
numberOfBins = len(bins) - 1
binCount = []
for i in xrange(numberOfBins):
binPoints = points[binLabels == i]
binCount.append(len(binPoints))
return np.array(binCount)
def binByScalar(lidarData, scalarArrayName, binWidth, binLabelsArrayName='bin_labels'):
'''
Gets the array with name scalarArrayName from lidarData.
Computes bins by dividing the scalar array into bins of size binWidth.
Adds a new label array to the lidar points identifying which bin the point belongs to,
where the first bin is labeled with 0.
Returns the new, labeled lidar data and the bins.
The bins are an array where each value represents a bin edge.
'''
scalars = vtkNumpy.getNumpyFromVtk(lidarData, scalarArrayName)
bins = np.arange(scalars.min(), scalars.max()+binWidth, binWidth)
binLabels = np.digitize(scalars, bins) - 1
assert(len(binLabels) == len(scalars))
newData = shallowCopy(lidarData)
vtkNumpy.addNumpyToVtk(newData, binLabels, binLabelsArrayName)
return newData, bins
def showObbs(polyData):
labelsArrayName = 'cluster_labels'
assert polyData.GetPointData().GetArray(labelsArrayName)
f = vtk.vtkAnnotateOBBs()
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, labelsArrayName)
f.SetInput(polyData)
f.Update()
showPolyData(f.GetOutput(), 'bboxes')
def getOrientedBoundingBox(polyData):
'''
returns origin, edges, and outline wireframe
'''
nPoints = polyData.GetNumberOfPoints()
assert nPoints
polyData = shallowCopy(polyData)
labelsArrayName = 'bbox_labels'
labels = np.ones(nPoints)
vtkNumpy.addNumpyToVtk(polyData, labels, labelsArrayName)
f = vtk.vtkAnnotateOBBs()
f.SetInputArrayToProcess(0,0,0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, labelsArrayName)
f.SetInput(polyData)
f.Update()
assert f.GetNumberOfBoundingBoxes() == 1
origin = np.zeros(3)
edges = [np.zeros(3) for i in xrange(3)]
f.GetBoundingBoxOrigin(0, origin)
for i in xrange(3):
f.GetBoundingBoxEdge(0, i, edges[i])
return origin, edges, shallowCopy(f.GetOutput())
def segmentBlockByAnnotation(blockDimensions, p1, p2, p3):
segmentationObj = om.findObjectByName('pointcloud snapshot')
segmentationObj.mapper.ScalarVisibilityOff()
segmentationObj.setProperty('Point Size', 2)
segmentationObj.setProperty('Alpha', 0.8)
# constraint z to lie in plane
#p1[2] = p2[2] = p3[2] = max(p1[2], p2[2], p3[2])
zedge = p2 - p1
zaxis = zedge / np.linalg.norm(zedge)
#xwidth = distanceToLine(p3, p1, p2)
# expected dimensions
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(zedge)
yaxis = np.cross(p2 - p1, p3 - p1)
yaxis = yaxis / np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
# reorient axes
viewPlaneNormal = getSegmentationView().camera().GetViewPlaneNormal()
if np.dot(yaxis, viewPlaneNormal) < 0:
yaxis *= -1
if np.dot(xaxis, p3 - p1) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
origin = ((p1 + p2) / 2.0) + xaxis*xwidth/2.0 + yaxis*ywidth/2.0
d = DebugData()
d.addSphere(origin, radius=0.01)
d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
obj = updatePolyData(d.getPolyData(), 'block axes')
obj.setProperty('Color', QtGui.QColor(255, 255, 0))
obj.setProperty('Visible', False)
om.findObjectByName('annotation').setProperty('Visible', False)
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = updatePolyData(cube, 'block affordance', cls=BlockAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
####
# debrs task ground frame
def getBoardCorners(params):
axes = [np.array(params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
widths = [np.array(params[axis])/2.0 for axis in ['xwidth', 'ywidth', 'zwidth']]
edges = [axes[i] * widths[i] for i in xrange(3)]
origin = np.array(params['origin'])
return [
origin + edges[0] + edges[1] + edges[2],
origin - edges[0] + edges[1] + edges[2],
origin - edges[0] - edges[1] + edges[2],
origin + edges[0] - edges[1] + edges[2],
origin + edges[0] + edges[1] - edges[2],
origin - edges[0] + edges[1] - edges[2],
origin - edges[0] - edges[1] - edges[2],
origin + edges[0] - edges[1] - edges[2],
]
def getPointDistances(target, points):
return np.array([np.linalg.norm(target - p) for p in points])
def computeClosestCorner(aff, referenceFrame):
corners = getBoardCorners(aff.params)
dists = getPointDistances(np.array(referenceFrame.GetPosition()), corners)
return corners[dists.argmin()]
def computeGroundFrame(aff, referenceFrame):
refAxis = [0.0, -1.0, 0.0]
referenceFrame.TransformVector(refAxis, refAxis)
refAxis = np.array(refAxis)
axes = [np.array(aff.params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
boardAxis = axes[axisProjections.argmax()]
if np.dot(boardAxis, refAxis) < 0:
boardAxis = -boardAxis
xaxis = boardAxis
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
closestCorner = computeClosestCorner(aff, referenceFrame)
groundFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
groundFrame.PostMultiply()
groundFrame.Translate(closestCorner[0], closestCorner[1], 0.0)
return groundFrame
def computeCornerFrame(aff, referenceFrame):
refAxis = [0.0, -1.0, 0.0]
referenceFrame.TransformVector(refAxis, refAxis)
refAxis = np.array(refAxis)
axes = [np.array(aff.params[axis]) for axis in ['xaxis', 'yaxis', 'zaxis']]
edgeLengths = [edgeLength for edgeLength in ['xwidth', 'ywidth', 'zwidth']]
axisProjections = np.array([np.abs(np.dot(axis, refAxis)) for axis in axes])
boardAxis = axes[axisProjections.argmax()]
if np.dot(boardAxis, refAxis) < 0:
boardAxis = -boardAxis
longAxis = axes[np.argmax(edgeLengths)]
xaxis = boardAxis
yaxis = axes[2]
zaxis = np.cross(xaxis, yaxis)
closestCorner = computeClosestCorner(aff, referenceFrame)
cornerFrame = getTransformFromAxes(xaxis, yaxis, zaxis)
cornerFrame.PostMultiply()
cornerFrame.Translate(closestCorner)
return cornerFrame
def publishTriad(transform, collectionId=1234):
o = lcmvs.obj_t()
xyz = transform.GetPosition()
rpy = transformUtils.rollPitchYawFromTransform(transform)
o.roll, o.pitch, o.yaw = rpy
o.x, o.y, o.z = xyz
o.id = 1
m = lcmvs.obj_collection_t()
m.id = collectionId
m.name = 'stance_triads'
m.type = lcmvs.obj_collection_t.AXIS3D
m.nobjs = 1
m.reset = False
m.objs = [o]
lcmUtils.publish('OBJ_COLLECTION', m)
def createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name, parent='affordances'):
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = BoxAffordanceItem(name, view=app.getCurrentRenderView())
obj.setProperty('Dimensions', [float(v) for v in [xwidth, ywidth, zwidth]])
obj.actor.SetUserTransform(t)
om.addToObjectModel(obj, parentObj=om.getOrCreateContainer(parent))
frameObj = vis.showFrame(t, name + ' frame', scale=0.2, visible=False, parent=obj)
obj.addToView(app.getDRCView())
frameObj.addToView(app.getDRCView())
affordanceManager.registerAffordance(obj)
return obj
def segmentBlockByTopPlane(polyData, blockDimensions, expectedNormal, expectedXAxis, edgeSign=1, name='block affordance'):
polyData, planeOrigin, normal = applyPlaneFit(polyData, distanceThreshold=0.05, expectedNormal=expectedNormal, returnOrigin=True)
_, lineDirection, _ = applyLineFit(polyData)
zaxis = lineDirection
yaxis = normal
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, expectedXAxis) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis /= np.linalg.norm(zaxis)
expectedXAxis = np.array(xaxis)
edgePoints = computeEdge(polyData, zaxis, xaxis*edgeSign)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
d = DebugData()
obj = updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
zaxis = lineDirection
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, expectedXAxis) < 0:
xaxis *= -1
# make right handed
zaxis = np.cross(xaxis, yaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis /= np.linalg.norm(zaxis)
polyData = labelPointDistanceAlongAxis(polyData, xaxis, resultArrayName='dist_along_line')
pts = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dists = np.dot(pts-linePoint, zaxis)
p1 = linePoint + zaxis*np.min(dists)
p2 = linePoint + zaxis*np.max(dists)
p1 = projectPointToPlane(p1, planeOrigin, normal)
p2 = projectPointToPlane(p2, planeOrigin, normal)
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(p2 - p1)
origin = p1 - edgeSign*xaxis*xwidth/2.0 - yaxis*ywidth/2.0 + zaxis*zwidth/2.0
d = DebugData()
#d.addSphere(linePoint, radius=0.02)
#d.addLine(linePoint, linePoint + yaxis*ywidth)
#d.addLine(linePoint, linePoint + xaxis*xwidth)
#d.addLine(linePoint, linePoint + zaxis*zwidth)
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
d.addSphere(origin, radius=0.01)
#d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
#d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
#d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
d.addLine(origin, origin + xaxis*xwidth/2.0)
d.addLine(origin, origin + yaxis*ywidth/2.0)
d.addLine(origin, origin + zaxis*zwidth/2.0)
#obj = updatePolyData(d.getPolyData(), 'block axes')
#obj.setProperty('Color', QtGui.QColor(255, 255, 0))
#obj.setProperty('Visible', False)
obj = createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name)
obj.setProperty('Color', [222/255.0, 184/255.0, 135/255.0])
computeDebrisGraspSeed(obj)
t = computeDebrisStanceFrame(obj)
if t:
showFrame(t, 'debris stance frame', parent=obj)
obj.publishCallback = functools.partial(publishDebrisStanceFrame, obj)
return obj
def computeDebrisGraspSeed(aff):
debrisReferenceFrame = om.findObjectByName('debris reference frame')
if debrisReferenceFrame:
debrisReferenceFrame = debrisReferenceFrame.transform
affCornerFrame = computeCornerFrame(aff, debrisReferenceFrame)
showFrame(affCornerFrame, 'board corner frame', parent=aff, visible=False)
def computeDebrisStanceFrame(aff):
debrisReferenceFrame = om.findObjectByName('debris reference frame')
debrisWallEdge = om.findObjectByName('debris plane edge')
if debrisReferenceFrame and debrisWallEdge:
debrisReferenceFrame = debrisReferenceFrame.transform
affGroundFrame = computeGroundFrame(aff, debrisReferenceFrame)
updateFrame(affGroundFrame, 'board ground frame', parent=getDebugFolder(), visible=False)
affWallEdge = computeGroundFrame(aff, debrisReferenceFrame)
framePos = np.array(affGroundFrame.GetPosition())
p1, p2 = debrisWallEdge.points
edgeAxis = p2 - p1
edgeAxis /= np.linalg.norm(edgeAxis)
projectedPos = p1 + edgeAxis * np.dot(framePos - p1, edgeAxis)
affWallFrame = vtk.vtkTransform()
affWallFrame.PostMultiply()
useWallFrameForRotation = True
if useWallFrameForRotation:
affWallFrame.SetMatrix(debrisReferenceFrame.GetMatrix())
affWallFrame.Translate(projectedPos - np.array(debrisReferenceFrame.GetPosition()))
stanceWidth = 0.20
stanceOffsetX = -0.35
stanceOffsetY = 0.45
stanceRotation = 0.0
else:
affWallFrame.SetMatrix(affGroundFrame.GetMatrix())
affWallFrame.Translate(projectedPos - framePos)
stanceWidth = 0.20
stanceOffsetX = -0.35
stanceOffsetY = -0.45
stanceRotation = math.pi/2.0
stanceFrame, _, _ = getFootFramesFromReferenceFrame(affWallFrame, stanceWidth, math.degrees(stanceRotation), [stanceOffsetX, stanceOffsetY, 0.0])
return stanceFrame
def publishDebrisStanceFrame(aff):
frame = computeDebrisStanceFrame(aff)
publishTriad(frame)
def segmentBlockByPlanes(blockDimensions):
planes = om.findObjectByName('selected planes').children()[:2]
viewPlaneNormal = getSegmentationView().camera().GetViewPlaneNormal()
origin1, normal1, plane1 = getPlaneEquationFromPolyData(planes[0].polyData, expectedNormal=viewPlaneNormal)
origin2, normal2, plane2 = getPlaneEquationFromPolyData(planes[1].polyData, expectedNormal=viewPlaneNormal)
xaxis = normal2
yaxis = normal1
zaxis = np.cross(xaxis, yaxis)
xaxis = np.cross(yaxis, zaxis)
pts1 = vtkNumpy.getNumpyFromVtk(planes[0].polyData, 'Points')
pts2 = vtkNumpy.getNumpyFromVtk(planes[1].polyData, 'Points')
linePoint = np.zeros(3)
centroid2 = np.sum(pts2, axis=0)/len(pts2)
vtk.vtkPlane.ProjectPoint(centroid2, origin1, normal1, linePoint)
dists = np.dot(pts1-linePoint, zaxis)
p1 = linePoint + zaxis*np.min(dists)
p2 = linePoint + zaxis*np.max(dists)
xwidth, ywidth = blockDimensions
zwidth = np.linalg.norm(p2 - p1)
origin = p1 + xaxis*xwidth/2.0 + yaxis*ywidth/2.0 + zaxis*zwidth/2.0
d = DebugData()
d.addSphere(linePoint, radius=0.02)
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
d.addSphere(origin, radius=0.01)
d.addLine(origin - xaxis*xwidth/2.0, origin + xaxis*xwidth/2.0)
d.addLine(origin - yaxis*ywidth/2.0, origin + yaxis*ywidth/2.0)
d.addLine(origin - zaxis*zwidth/2.0, origin + zaxis*zwidth/2.0)
obj = updatePolyData(d.getPolyData(), 'block axes')
obj.setProperty('Color', QtGui.QColor(255, 255, 0))
obj.setProperty('Visible', False)
cube = vtk.vtkCubeSource()
cube.SetXLength(xwidth)
cube.SetYLength(ywidth)
cube.SetZLength(zwidth)
cube.Update()
cube = shallowCopy(cube.GetOutput())
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(origin)
obj = updatePolyData(cube, 'block affordance', cls=BlockAffordanceItem, parent='affordances')
obj.actor.SetUserTransform(t)
obj.addToView(app.getDRCView())
params = dict(origin=origin, xwidth=xwidth, ywidth=ywidth, zwidth=zwidth, xaxis=xaxis, yaxis=yaxis, zaxis=zaxis)
obj.setAffordanceParams(params)
obj.updateParamsFromActorTransform()
def estimatePointerTip(robotModel, polyData):
'''
Given a robot model, uses forward kinematics to determine a pointer tip
search region, then does a ransac line fit in the search region to find
points on the pointer, and selects the maximum point along the line fit
as the pointer tip. Returns the pointer tip xyz on success and returns
None on failure.
'''
palmFrame = robotModel.getLinkFrame('r_hand_force_torque')
p1 = [0.0, 0.14, -0.06]
p2 = [0.0, 0.24, -0.06]
palmFrame.TransformPoint(p1, p1)
palmFrame.TransformPoint(p2, p2)
p1 = np.array(p1)
p2 = np.array(p2)
d = DebugData()
d.addSphere(p1, radius=0.005)
d.addSphere(p2, radius=0.005)
d.addLine(p1, p2)
vis.updatePolyData(d.getPolyData(), 'pointer line', color=[1,0,0], parent=getDebugFolder(), visible=False)
polyData = cropToLineSegment(polyData, p1, p2)
if not polyData.GetNumberOfPoints():
#print 'pointer search region is empty'
return None
vis.updatePolyData(polyData, 'cropped to pointer line', parent=getDebugFolder(), visible=False)
polyData = labelDistanceToLine(polyData, p1, p2)
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, 0.07])
if polyData.GetNumberOfPoints() < 2:
#print 'pointer search region is empty'
return None
updatePolyData(polyData, 'distance to pointer line', colorByName='distance_to_line', parent=getDebugFolder(), visible=False)
ransacDistanceThreshold = 0.0075
lineOrigin, lineDirection, polyData = applyLineFit(polyData, distanceThreshold=ransacDistanceThreshold)
updatePolyData(polyData, 'line fit ransac', colorByName='ransac_labels', parent=getDebugFolder(), visible=False)
lineDirection = np.array(lineDirection)
lineDirection /= np.linalg.norm(lineDirection)
if np.dot(lineDirection, (p2 - p1)) < 0:
lineDirection *= -1
polyData = thresholdPoints(polyData, 'ransac_labels', [1.0, 1.0])
if polyData.GetNumberOfPoints() < 2:
#print 'pointer ransac line fit failed to find inliers'
return None
obj = updatePolyData(polyData, 'line fit points', colorByName='dist_along_line', parent=getDebugFolder(), visible=True)
obj.setProperty('Point Size', 5)
pts = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
dists = np.dot(pts-lineOrigin, lineDirection)
p1 = lineOrigin + lineDirection*np.min(dists)
p2 = lineOrigin + lineDirection*np.max(dists)
d = DebugData()
#d.addSphere(p1, radius=0.005)
d.addSphere(p2, radius=0.005)
d.addLine(p1, p2)
vis.updatePolyData(d.getPolyData(), 'fit pointer line', color=[0,1,0], parent=getDebugFolder(), visible=True)
return p2
def startBoundedPlaneSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentBoundedPlaneByAnnotation)
def startValveSegmentationByWallPlane(expectedValveRadius):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentValveByWallPlane, expectedValveRadius)
def startValveSegmentationManual(expectedValveRadius):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentValve, expectedValveRadius)
def startRefitWall():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = refitWall
def startWyeSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentWye)
def startDoorHandleSegmentation(otdfType):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDoorHandle, otdfType)
def startTrussSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentTruss)
def startHoseNozzleSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentHoseNozzle)
def storePoint(p):
global _pickPoint
_pickPoint = p
def getPickPoint():
global _pickPoint
return _pickPoint
def startPickPoint():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = storePoint
def startSelectToolTip():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = selectToolTip
def startDrillSegmentation():
picker = PointPicker(numberOfPoints=3)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrill)
def startDrillAutoSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillAuto)
def startDrillButtonSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillButton)
def startPointerTipSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentPointerTip)
def startDrillAutoSegmentationAlignedWithTable():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillAlignedWithTable)
def startDrillBarrelSegmentation():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillBarrel)
def startDrillWallSegmentation():
picker = PointPicker(numberOfPoints=3)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentDrillWall)
def startDrillWallSegmentationConstrained(rightAngleLocation):
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = False
picker.start()
picker.annotationFunc = functools.partial(segmentDrillWallConstrained, rightAngleLocation)
def startDrillInHandSegmentation():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.drawLines = True
picker.start()
picker.annotationFunc = functools.partial(segmentDrillInHand)
def startSegmentDebrisWall():
picker = PointPicker(numberOfPoints=1)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentDebrisWall)
def startSegmentDebrisWallManual():
picker = PointPicker(numberOfPoints=2)
addViewPicker(picker)
picker.enabled = True
picker.start()
picker.annotationFunc = functools.partial(segmentDebrisWallManual)
def selectToolTip(point1):
print point1
def segmentDebrisWallManual(point1, point2):
p1, p2 = point1, point2
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
edgeObj = updatePolyData(d.getPolyData(), 'debris plane edge', visible=True)
edgeObj.points = [p1, p2]
xaxis = p2 - p1
xaxis /= np.linalg.norm(xaxis)
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(p1)
updateFrame(t, 'debris plane frame', parent=edgeObj, visible=False)
refFrame = vtk.vtkTransform()
refFrame.PostMultiply()
refFrame.SetMatrix(t.GetMatrix())
refFrame.Translate(-xaxis + yaxis + zaxis*20.0)
updateFrame(refFrame, 'debris reference frame', parent=edgeObj, visible=False)
def segmentDebrisWall(point1):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, distanceThreshold=0.02, expectedNormal=viewPlaneNormal, perpendicularAxis=viewPlaneNormal,
searchOrigin=point1, searchRadius=0.25, angleEpsilon=0.7, returnOrigin=True)
planePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.02, 0.02])
updatePolyData(planePoints, 'unbounded plane points', parent=getDebugFolder(), visible=False)
planePoints = applyVoxelGrid(planePoints, leafSize=0.03)
planePoints = labelOutliers(planePoints, searchRadius=0.06, neighborsInSearchRadius=10)
updatePolyData(planePoints, 'voxel plane points', parent=getDebugFolder(), colorByName='is_outlier', visible=False)
planePoints = thresholdPoints(planePoints, 'is_outlier', [0, 0])
planePoints = labelDistanceToPoint(planePoints, point1)
clusters = extractClusters(planePoints, clusterTolerance=0.10)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
planePoints = clusters[0]
planeObj = updatePolyData(planePoints, 'debris plane points', parent=getDebugFolder(), visible=False)
perpAxis = [0,0,-1]
perpAxis /= np.linalg.norm(perpAxis)
edgeAxis = np.cross(normal, perpAxis)
edgePoints = computeEdge(planePoints, edgeAxis, perpAxis)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
#binCounts = computePointCountsAlongAxis(planePoints, lineDirection)
xaxis = lineDirection
yaxis = normal
zaxis = np.cross(xaxis, yaxis)
if np.dot(zaxis, [0, 0, 1]) < 0:
zaxis *= -1
xaxis *= -1
pts = vtkNumpy.getNumpyFromVtk(planePoints, 'Points')
dists = np.dot(pts-linePoint, xaxis)
p1 = linePoint + xaxis*np.min(dists)
p2 = linePoint + xaxis*np.max(dists)
p1 = projectPointToPlane(p1, origin, normal)
p2 = projectPointToPlane(p2, origin, normal)
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
edgeObj = updatePolyData(d.getPolyData(), 'debris plane edge', parent=planeObj, visible=True)
edgeObj.points = [p1, p2]
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(p1)
updateFrame(t, 'debris plane frame', parent=planeObj, visible=False)
refFrame = vtk.vtkTransform()
refFrame.PostMultiply()
refFrame.SetMatrix(t.GetMatrix())
refFrame.Translate(-xaxis + yaxis + zaxis*20.0)
updateFrame(refFrame, 'debris reference frame', parent=planeObj, visible=False)
def segmentBoundedPlaneByAnnotation(point1, point2):
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = shallowCopy(inputObj.polyData)
viewPlaneNormal = np.array(getSegmentationView().camera().GetViewPlaneNormal())
polyData, origin, normal = applyPlaneFit(polyData, distanceThreshold=0.015, expectedNormal=viewPlaneNormal, perpendicularAxis=viewPlaneNormal,
searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.7, returnOrigin=True)
planePoints = thresholdPoints(polyData, 'dist_to_plane', [-0.015, 0.015])
updatePolyData(planePoints, 'unbounded plane points', parent=getDebugFolder(), visible=False)
planePoints = applyVoxelGrid(planePoints, leafSize=0.03)
planePoints = labelOutliers(planePoints, searchRadius=0.06, neighborsInSearchRadius=12)
updatePolyData(planePoints, 'voxel plane points', parent=getDebugFolder(), colorByName='is_outlier', visible=False)
planePoints = thresholdPoints(planePoints, 'is_outlier', [0, 0])
planePoints = labelDistanceToPoint(planePoints, point1)
clusters = extractClusters(planePoints, clusterTolerance=0.10)
clusters.sort(key=lambda x: vtkNumpy.getNumpyFromVtk(x, 'distance_to_point').min())
planePoints = clusters[0]
updatePolyData(planePoints, 'plane points', parent=getDebugFolder(), visible=False)
perpAxis = point2 - point1
perpAxis /= np.linalg.norm(perpAxis)
edgeAxis = np.cross(normal, perpAxis)
edgePoints = computeEdge(planePoints, edgeAxis, perpAxis)
edgePoints = vtkNumpy.getVtkPolyDataFromNumpyPoints(edgePoints)
updatePolyData(edgePoints, 'edge points', parent=getDebugFolder(), visible=False)
linePoint, lineDirection, _ = applyLineFit(edgePoints)
zaxis = normal
yaxis = lineDirection
xaxis = np.cross(yaxis, zaxis)
if np.dot(xaxis, perpAxis) < 0:
xaxis *= -1
# make right handed
yaxis = np.cross(zaxis, xaxis)
pts = vtkNumpy.getNumpyFromVtk(planePoints, 'Points')
dists = np.dot(pts-linePoint, yaxis)
p1 = linePoint + yaxis*np.min(dists)
p2 = linePoint + yaxis*np.max(dists)
p1 = projectPointToPlane(p1, origin, normal)
p2 = projectPointToPlane(p2, origin, normal)
d = DebugData()
d.addSphere(p1, radius=0.01)
d.addSphere(p2, radius=0.01)
d.addLine(p1, p2)
updatePolyData(d.getPolyData(), 'plane edge', parent=getDebugFolder(), visible=False)
t = getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate((p1 + p2)/ 2.0)
updateFrame(t, 'plane edge frame', parent=getDebugFolder(), visible=False)
savedCameraParams = None
def perspective():
global savedCameraParams
if savedCameraParams is None:
return
aff = getDefaultAffordanceObject()
if aff:
aff.setProperty('Alpha', 1.0)
obj = om.findObjectByName('pointcloud snapshot')
if obj is not None:
obj.actor.SetPickable(1)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOff()
c.SetPosition(savedCameraParams['Position'])
c.SetFocalPoint(savedCameraParams['FocalPoint'])
c.SetViewUp(savedCameraParams['ViewUp'])
view.setCameraManipulationStyle()
view.render()
def saveCameraParams(overwrite=False):
global savedCameraParams
if overwrite or (savedCameraParams is None):
view = getSegmentationView()
c = view.camera()
savedCameraParams = dict(Position=c.GetPosition(), FocalPoint=c.GetFocalPoint(), ViewUp=c.GetViewUp())
def getDefaultAffordanceObject():
obj = om.getActiveObject()
if isinstance(obj, AffordanceItem):
return obj
for obj in om.getObjects():
if isinstance(obj, AffordanceItem):
return obj
def getVisibleRobotModel():
for obj in om.getObjects():
if isinstance(obj, roboturdf.RobotModelItem) and obj.getProperty('Visible'):
return obj
def orthoX():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['xaxis']
viewUp = -aff.params['yaxis']
viewDistance = aff.params['xwidth']*3
scale = aff.params['zwidth']
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def orthoY():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['yaxis']
viewUp = -aff.params['xaxis']
viewDistance = aff.params['ywidth']*4
scale = aff.params['zwidth']
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def orthoZ():
aff = getDefaultAffordanceObject()
if not aff:
return
saveCameraParams()
aff.updateParamsFromActorTransform()
aff.setProperty('Alpha', 0.3)
om.findObjectByName('pointcloud snapshot').actor.SetPickable(0)
view = getSegmentationView()
c = view.camera()
c.ParallelProjectionOn()
origin = aff.params['origin']
viewDirection = aff.params['zaxis']
viewUp = -aff.params['yaxis']
viewDistance = aff.params['zwidth']
scale = aff.params['ywidth']*6
c.SetFocalPoint(origin)
c.SetPosition(origin - viewDirection*viewDistance)
c.SetViewUp(viewUp)
c.SetParallelScale(scale)
view.setActorManipulationStyle()
view.render()
def zoomToDisplayPoint(displayPoint, boundsRadius=0.5, view=None):
pickedPoint = pickPoint(displayPoint, getSegmentationView(), obj='pointcloud snapshot')
if pickedPoint is None:
return
view = view or app.getCurrentRenderView()
worldPt1, worldPt2 = getRayFromDisplayPoint(getSegmentationView(), displayPoint)
diagonal = np.array([boundsRadius, boundsRadius, boundsRadius])
bounds = np.hstack([pickedPoint - diagonal, pickedPoint + diagonal])
bounds = [bounds[0], bounds[3], bounds[1], bounds[4], bounds[2], bounds[5]]
view.renderer().ResetCamera(bounds)
view.camera().SetFocalPoint(pickedPoint)
view.render()
def extractPointsAlongClickRay(position, ray, polyData=None, distanceToLineThreshold=0.025, nearestToCamera=False):
#segmentationObj = om.findObjectByName('pointcloud snapshot')
if polyData is None:
polyData = getCurrentRevolutionData()
if not polyData or not polyData.GetNumberOfPoints():
return None
polyData = labelDistanceToLine(polyData, position, position + ray)
# extract points near line
polyData = thresholdPoints(polyData, 'distance_to_line', [0.0, distanceToLineThreshold])
if not polyData.GetNumberOfPoints():
return None
polyData = labelPointDistanceAlongAxis(polyData, ray, origin=position, resultArrayName='distance_along_line')
polyData = thresholdPoints(polyData, 'distance_along_line', [0.20, 1e6])
if not polyData.GetNumberOfPoints():
return None
updatePolyData(polyData, 'ray points', colorByName='distance_to_line', visible=False, parent=getDebugFolder())
if nearestToCamera:
dists = vtkNumpy.getNumpyFromVtk(polyData, 'distance_along_line')
else:
dists = vtkNumpy.getNumpyFromVtk(polyData, 'distance_to_line')
points = vtkNumpy.getNumpyFromVtk(polyData, 'Points')
intersectionPoint = points[dists.argmin()]
d = DebugData()
d.addSphere( intersectionPoint, radius=0.005)
d.addLine(position, intersectionPoint)
obj = updatePolyData(d.getPolyData(), 'intersecting ray', visible=False, color=[0,1,0], parent=getDebugFolder())
obj.actor.GetProperty().SetLineWidth(2)
d2 = DebugData()
end_of_ray = position + 2*ray
d2.addLine(position, end_of_ray)
obj2 = updatePolyData(d2.getPolyData(), 'camera ray', visible=False, color=[1,0,0], parent=getDebugFolder())
obj2.actor.GetProperty().SetLineWidth(2)
return intersectionPoint
def segmentDrillWallFromTag(position, ray):
'''
Fix the drill wall relative to a ray intersected with the wall
Desc: given a position and a ray (typically derived from a camera pixel)
Use that point to determine a position for the Drill Wall
This function uses a hard coded offset between the position on the wall
to produce the drill cutting origin
'''
#inputObj = om.findObjectByName('pointcloud snapshot')
#polyData = shallowCopy(inputObj.polyData)
polyData = getCurrentRevolutionData()
if (polyData is None): # no data yet
print "no LIDAR data yet"
return False
point1 = extractPointsAlongClickRay(position, ray, polyData )
# view direction is out:
viewDirection = -1 * SegmentationContext.getGlobalInstance().getViewDirection()
polyDataOut, origin, normal = applyPlaneFit(polyData, expectedNormal=viewDirection, searchOrigin=point1, searchRadius=0.3, angleEpsilon=0.3, returnOrigin=True)
# project the lidar point onto the plane (older, variance is >1cm with robot 2m away)
#intersection_point = projectPointToPlane(point1, origin, normal)
# intersect the ray with the plane (variance was about 4mm with robot 2m away)
intersection_point = intersectLineWithPlane(position, ray, origin, normal)
# Define a frame:
xaxis = -normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
zaxis = np.cross(xaxis, yaxis)
t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(intersection_point)
t2 = transformUtils.copyFrame(t)
t2.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY( [0,0.6,-0.25] , [0,0,0] )
t2.Concatenate(t3)
rightAngleLocation = 'bottom left'
createDrillWall(rightAngleLocation, t2)
wall= om.findObjectByName('wall')
vis.updateFrame( t ,'wall fit tag', parent=wall, visible=False, scale=0.2)
d = DebugData()
d.addSphere( intersection_point, radius=0.002)
obj = updatePolyData(d.getPolyData(), 'intersection', parent=wall, visible=False, color=[0,1,0]) #
obj.actor.GetProperty().SetLineWidth(1)
return True
def segmentDrillWallFromWallCenter():
'''
Get the drill wall target as an offset from the center of
the full wall
'''
# find the valve wall and its center
inputObj = om.findObjectByName('pointcloud snapshot')
polyData = inputObj.polyData
# hardcoded position to target frame from center of wall
# conincides with the distance from the april tag to this position
wallFrame = transformUtils.copyFrame( findWallCenter(polyData) )
wallFrame.PreMultiply()
t3 = transformUtils.frameFromPositionAndRPY( [-0.07,-0.3276,0] , [180,-90,0] )
wallFrame.Concatenate(t3)
rightAngleLocation = 'bottom left'
createDrillWall(rightAngleLocation, wallFrame)
wall= om.findObjectByName('wall')
vis.updateFrame( wallFrame ,'wall fit lidar', parent=wall, visible=False, scale=0.2)
| bsd-3-clause |
apache/incubator-superset | superset/datasets/commands/importers/v1/utils.py | 1 | 4283 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-branches
import gzip
import json
import logging
import re
from typing import Any, Dict
from urllib import request
import pandas as pd
from sqlalchemy import BigInteger, Date, DateTime, Float, String, Text
from sqlalchemy.orm import Session
from sqlalchemy.sql.visitors import VisitableType
from superset.connectors.sqla.models import SqlaTable
logger = logging.getLogger(__name__)
CHUNKSIZE = 512
VARCHAR = re.compile(r"VARCHAR\((\d+)\)", re.IGNORECASE)
JSON_KEYS = {"params", "template_params", "extra"}
type_map = {
"VARCHAR": String(255),
"STRING": String(255),
"TEXT": Text(),
"BIGINT": BigInteger(),
"FLOAT": Float(),
"FLOAT64": Float(),
"DOUBLE PRECISION": Float(),
"DATE": Date(),
"DATETIME": DateTime(),
"TIMESTAMP WITHOUT TIME ZONE": DateTime(timezone=False),
"TIMESTAMP WITH TIME ZONE": DateTime(timezone=True),
}
def get_sqla_type(native_type: str) -> VisitableType:
if native_type.upper() in type_map:
return type_map[native_type.upper()]
match = VARCHAR.match(native_type)
if match:
size = int(match.group(1))
return String(size)
raise Exception(f"Unknown type: {native_type}")
def get_dtype(df: pd.DataFrame, dataset: SqlaTable) -> Dict[str, VisitableType]:
return {
column.column_name: get_sqla_type(column.type)
for column in dataset.columns
if column.column_name in df.keys()
}
def import_dataset(
session: Session, config: Dict[str, Any], overwrite: bool = False
) -> SqlaTable:
existing = session.query(SqlaTable).filter_by(uuid=config["uuid"]).first()
if existing:
if not overwrite:
return existing
config["id"] = existing.id
# TODO (betodealmeida): move this logic to import_from_dict
config = config.copy()
for key in JSON_KEYS:
if config.get(key):
try:
config[key] = json.dumps(config[key])
except TypeError:
logger.info("Unable to encode `%s` field: %s", key, config[key])
for metric in config.get("metrics", []):
if metric.get("extra"):
try:
metric["extra"] = json.dumps(metric["extra"])
except TypeError:
logger.info("Unable to encode `extra` field: %s", metric["extra"])
# should we delete columns and metrics not present in the current import?
sync = ["columns", "metrics"] if overwrite else []
# should we also load data into the dataset?
data_uri = config.get("data")
# import recursively to include columns and metrics
dataset = SqlaTable.import_from_dict(session, config, recursive=True, sync=sync)
if dataset.id is None:
session.flush()
# load data
if data_uri:
data = request.urlopen(data_uri)
if data_uri.endswith(".gz"):
data = gzip.open(data)
df = pd.read_csv(data, encoding="utf-8")
dtype = get_dtype(df, dataset)
# convert temporal columns
for column_name, sqla_type in dtype.items():
if isinstance(sqla_type, (Date, DateTime)):
df[column_name] = pd.to_datetime(df[column_name])
df.to_sql(
dataset.table_name,
con=session.connection(),
schema=dataset.schema,
if_exists="replace",
chunksize=CHUNKSIZE,
dtype=dtype,
index=False,
method="multi",
)
return dataset
| apache-2.0 |
javier-cabezas/figplotter | setup.py | 1 | 3755 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
setup(
name='figplotter',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.2',
description='A figure plotter using matplotlib',
long_description='A figure plotter using matplotlib that has support for rich styling and figure introspection',
# The project's main homepage.
url='git+https://github.com/javier-cabezas/figplotter.git',
# Author details
author='Javier Cabezas',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Topic :: Multimedia :: Graphics',
'Topic :: Scientific/Engineering :: Visualization',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='data visualization plotting',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['tests']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['matplotlib'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
#'dev': ['check-manifest'],
#'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
#'sample': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
# 'sample=sample:main',
],
},
)
| mit |
map0logo/fourier_henon | henon.py | 1 | 2753 | from __future__ import division
from numba import autojit
import matplotlib.pyplot as plt
import pylab
@autojit
def HenonMap(x, y, a, b, alpha, gamma):
"""
Henon map iteration
:param x: x_n coordinate
:param y: y_n coordinate
:param a: a parameter
:param b: b parameter
:param alpha: alpha parameter
:param gamma: gamma parameter
:return: X-n+1 and y_n+1 coordinates
"""
return a - alpha * x**2 + b * y, gamma * x
def HenonIterations(n):
"""
List of the first n iterations of Henon map given x_0=0 y_0=0
:param n: number of iterations
:return: list of iterations
"""
iterations = []
a = 1.4
b = 0.3
alpha = 1.
gamma = 1.
x = 0.
y = 0.
iterations.append((x, y))
for i in xrange(n):
x, y = HenonMap(x, y, a, b, alpha, gamma)
iterations.append((x, y))
return iterations
def HenonPlot():
"""
Plot of Henon Attractor
:return:
"""
iterations = HenonIterations(1000)
x = [i for i, j in iterations]
y = [j for i, j in iterations]
T = range(len(iterations))
pylab.figure()
plt.scatter(x, y, c=T, marker='+', linewidth='1')
@autojit
def H(x, y, gamma):
"""
:param x: x_n coordinate
:param y: y_n coordinate
:param gamma: gamma parameter
:return: Henon map for a = 1, b = 1, and alpha = 0.2
"""
return HenonMap(x, y, 1., 1., 0.2, gamma)
@autojit
def HenonIterate(x0, y0, n, gamma):
"""
Iterates on previous H map starting on x_0, y_0
:param x0: initial x_ coordinate
:param y0: initial y_coordinate
:param n: number of iterations
:param gamma: gamma parameter
:return: number of iterations until x_i^2 + y_i^2 > 100 or n if never
having x_i^2 + y_i^2 > 100
"""
i = 0
x = x0
y = y0
while (x**2 + y**2 <= 100) and (i < n):
x, y = H(x, y, gamma)
i += 1
return i
def HenonPlot2(n, gamma):
"""
Plot of HenonIterate inside [-5, 5] x [5, 5] square
:param n: number of iterations
:param: gamma: gamma parameter
:return:
"""
lower = -5.
upper = 5.
step = (float(upper) - float(lower)) / float(n)
val = [lower + i * step for i in range(n + 1)]
mat = []
for i in xrange(len(val)):
mat.append([])
for j in xrange(len(val)):
mat[i].append(HenonIterate(val[i], val[j], n, gamma))
# x = len(val) * val
# y = [j for j in val for i in xrange(len(val))]
# T = [mat[i][j] for i in xrange(len(val)) for j in xrange(len(val))]
pylab.figure(2)
pylab.contourf(val, val, mat, 10)
# pylab.scatter(x, y, c=T, alpha=0.10)
pylab.show()
if __name__ == '__main__':
HenonPlot()
HenonPlot2(200, 1.03) | gpl-2.0 |
FluVigilanciaBR/seasonality | methods/data_filter/dbf2csv.py | 1 | 1275 | # coding:utf8
__author__ = 'Marcelo Ferreira da Costa Gomes'
'''
Convert dfb file to csv format
'''
import argparse
import logging
import pandas as pd
from argparse import RawDescriptionHelpFormatter
from dbfread import DBF
module_logger = logging.getLogger('update_system.data_filter.dbf2csv')
def dbf2csv(fin, fout):
for enc in ['utf-8', 'utf-16', 'latin-1']:
try:
table = DBF(fin, encoding=enc)
df = pd.DataFrame(iter(table))
break
except UnicodeDecodeError:
pass
df.to_csv(fout, encoding='utf-8')
return
def main(flist):
for fname in flist:
module_logger.info('DBF2CSV: PROCESSING %s' % fname)
fout = '.'.join(fname.split('.')[:-1]) + '.csv'
dbf2csv(fname, fout)
module_logger.info('DBF2CSV: CONVERTED TO %s' % fout)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Convert DBF file to CSV.\n" +
"python3 dbf2csv.py --path ../data/influ*.DBF\n",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--path', nargs='*', action='append', help='Path to data file')
args = parser.parse_args()
main(args.path[0])
| gpl-3.0 |
madjelan/scikit-learn | sklearn/decomposition/nmf.py | 30 | 19208 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/ticker.py | 10 | 56708 | """
Tick locating and formatting
============================
This module contains classes to support completely configurable tick locating
and formatting. Although the locators know nothing about major or minor
ticks, they are used by the Axis class to support major and minor tick
locating and formatting. Generic tick locators and formatters are provided,
as well as domain specific custom ones..
Default Formatter
-----------------
The default formatter identifies when the x-data being
plotted is a small range on top of a large off set. To
reduce the chances that the ticklabels overlap the ticks
are labeled as deltas from a fixed offset. For example::
ax.plot(np.arange(2000, 2010), range(10))
will have tick of 0-9 with an offset of +2e3. If this
is not desired turn off the use of the offset on the default
formatter::
ax.get_xaxis().get_major_formatter().set_useOffset(False)
set the rcParam ``axes.formatter.useoffset=False`` to turn it off
globally, or set a different formatter.
Tick locating
-------------
The Locator class is the base class for all tick locators. The locators
handle autoscaling of the view limits based on the data limits, and the
choosing of tick locations. A useful semi-automatic tick locator is
MultipleLocator. You initialize this with a base, e.g., 10, and it picks axis
limits and ticks that are multiples of your base.
The Locator subclasses defined here are
:class:`NullLocator`
No ticks
:class:`FixedLocator`
Tick locations are fixed
:class:`IndexLocator`
locator for index plots (e.g., where x = range(len(y)))
:class:`LinearLocator`
evenly spaced ticks from min to max
:class:`LogLocator`
logarithmically ticks from min to max
:class:`MultipleLocator`
ticks and range are a multiple of base;
either integer or float
:class:`OldAutoLocator`
choose a MultipleLocator and dyamically reassign it for
intelligent ticking during navigation
:class:`MaxNLocator`
finds up to a max number of ticks at nice locations
:class:`AutoLocator`
:class:`MaxNLocator` with simple defaults. This is the default
tick locator for most plotting.
:class:`AutoMinorLocator`
locator for minor ticks when the axis is linear and the
major ticks are uniformly spaced. It subdivides the major
tick interval into a specified number of minor intervals,
defaulting to 4 or 5 depending on the major interval.
There are a number of locators specialized for date locations - see
the dates module
You can define your own locator by deriving from Locator. You must
override the __call__ method, which returns a sequence of locations,
and you will probably want to override the autoscale method to set the
view limits from the data limits.
If you want to override the default locator, use one of the above or a
custom locator and pass it to the x or y axis instance. The relevant
methods are::
ax.xaxis.set_major_locator( xmajorLocator )
ax.xaxis.set_minor_locator( xminorLocator )
ax.yaxis.set_major_locator( ymajorLocator )
ax.yaxis.set_minor_locator( yminorLocator )
The default minor locator is the NullLocator, e.g., no minor ticks on by
default.
Tick formatting
---------------
Tick formatting is controlled by classes derived from Formatter. The
formatter operates on a single tick value and returns a string to the
axis.
:class:`NullFormatter`
no labels on the ticks
:class:`IndexFormatter`
set the strings from a list of labels
:class:`FixedFormatter`
set the strings manually for the labels
:class:`FuncFormatter`
user defined function sets the labels
:class:`FormatStrFormatter`
use a sprintf format string
:class:`ScalarFormatter`
default formatter for scalars; autopick the fmt string
:class:`LogFormatter`
formatter for log axes
You can derive your own formatter from the Formatter base class by
simply overriding the ``__call__`` method. The formatter class has access
to the axis view and data limits.
To control the major and minor tick label formats, use one of the
following methods::
ax.xaxis.set_major_formatter( xmajorFormatter )
ax.xaxis.set_minor_formatter( xminorFormatter )
ax.yaxis.set_major_formatter( ymajorFormatter )
ax.yaxis.set_minor_formatter( yminorFormatter )
See :ref:`pylab_examples-major_minor_demo1` for an example of setting
major and minor ticks. See the :mod:`matplotlib.dates` module for
more information and examples of using date locators and formatters.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
if six.PY3:
long = int
import decimal
import locale
import math
import numpy as np
from matplotlib import rcParams
from matplotlib import cbook
from matplotlib import transforms as mtransforms
class _DummyAxis(object):
def __init__(self, minpos=0):
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self._minpos = minpos
def get_view_interval(self):
return self.viewLim.intervalx
def set_view_interval(self, vmin, vmax):
self.viewLim.intervalx = vmin, vmax
def get_minpos(self):
return self._minpos
def get_data_interval(self):
return self.dataLim.intervalx
def set_data_interval(self, vmin, vmax):
self.dataLim.intervalx = vmin, vmax
class TickHelper(object):
axis = None
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self, **kwargs):
if self.axis is None:
self.axis = _DummyAxis(**kwargs)
def set_view_interval(self, vmin, vmax):
self.axis.set_view_interval(vmin, vmax)
def set_data_interval(self, vmin, vmax):
self.axis.set_data_interval(vmin, vmax)
def set_bounds(self, vmin, vmax):
self.set_view_interval(vmin, vmax)
self.set_data_interval(vmin, vmax)
class Formatter(TickHelper):
"""
Convert the tick location to a string
"""
# some classes want to see all the locs to help format
# individual ones
locs = []
def __call__(self, x, pos=None):
"""Return the format for tick val x at position pos; pos=None
indicated unspecified"""
raise NotImplementedError('Derived must override')
def format_data(self, value):
return self.__call__(value)
def format_data_short(self, value):
"""return a short string version"""
return self.format_data(value)
def get_offset(self):
return ''
def set_locs(self, locs):
self.locs = locs
def fix_minus(self, s):
"""
Some classes may want to replace a hyphen for minus with the
proper unicode symbol (U+2212) for typographical correctness.
The default is to not replace it.
Note, if you use this method, e.g., in :meth:`format_data` or
call, you probably don't want to use it for
:meth:`format_data_short` since the toolbar uses this for
interactive coord reporting and I doubt we can expect GUIs
across platforms will handle the unicode correctly. So for
now the classes that override :meth:`fix_minus` should have an
explicit :meth:`format_data_short` method
"""
return s
class IndexFormatter(Formatter):
"""
format the position x to the nearest i-th label where i=int(x+0.5)
"""
def __init__(self, labels):
self.labels = labels
self.n = len(labels)
def __call__(self, x, pos=None):
"""Return the format for tick val x at position pos; pos=None
indicated unspecified"""
i = int(x + 0.5)
if i < 0:
return ''
elif i >= self.n:
return ''
else:
return self.labels[i]
class NullFormatter(Formatter):
'Always return the empty string'
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return ''
class FixedFormatter(Formatter):
'Return fixed strings for tick labels'
def __init__(self, seq):
"""
*seq* is a sequence of strings. For positions ``i < len(seq)`` return
*seq[i]* regardless of *x*. Otherwise return ''
"""
self.seq = seq
self.offset_string = ''
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if pos is None or pos >= len(self.seq):
return ''
else:
return self.seq[pos]
def get_offset(self):
return self.offset_string
def set_offset_string(self, ofs):
self.offset_string = ofs
class FuncFormatter(Formatter):
"""
User defined function for formatting
"""
def __init__(self, func):
self.func = func
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.func(x, pos)
class FormatStrFormatter(Formatter):
"""
Use an old-style ('%' operator) format string to format the tick
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt % x
class StrMethodFormatter(Formatter):
"""
Use a new-style format string (as used by `str.format()`)
to format the tick. The field formatting must be labeled `x`.
"""
def __init__(self, fmt):
self.fmt = fmt
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
return self.fmt.format(x=x)
class OldScalarFormatter(Formatter):
"""
Tick location is a plain old number.
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
xmin, xmax = self.axis.get_view_interval()
d = abs(xmax - xmin)
return self.pprint_val(x, d)
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' % (mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class ScalarFormatter(Formatter):
"""
Tick location is a plain old number. If useOffset==True and the data range
is much smaller than the data average, then an offset will be determined
such that the tick labels are meaningful. Scientific notation is used for
data < 10^-n or data >= 10^m, where n and m are the power limits set using
set_powerlimits((n,m)). The defaults for these are controlled by the
axes.formatter.limits rc parameter.
"""
def __init__(self, useOffset=None, useMathText=None, useLocale=None):
# useOffset allows plotting small data ranges with large offsets: for
# example: [1+1e-9,1+2e-9,1+3e-9] useMathText will render the offset
# and scientific notation in mathtext
if useOffset is None:
useOffset = rcParams['axes.formatter.useoffset']
self.set_useOffset(useOffset)
self._usetex = rcParams['text.usetex']
if useMathText is None:
useMathText = rcParams['axes.formatter.use_mathtext']
self._useMathText = useMathText
self.orderOfMagnitude = 0
self.format = ''
self._scientific = True
self._powerlimits = rcParams['axes.formatter.limits']
if useLocale is None:
useLocale = rcParams['axes.formatter.use_locale']
self._useLocale = useLocale
def get_useOffset(self):
return self._useOffset
def set_useOffset(self, val):
if val in [True, False]:
self.offset = 0
self._useOffset = val
else:
self._useOffset = False
self.offset = val
useOffset = property(fget=get_useOffset, fset=set_useOffset)
def get_useLocale(self):
return self._useLocale
def set_useLocale(self, val):
if val is None:
self._useLocale = rcParams['axes.formatter.use_locale']
else:
self._useLocale = val
useLocale = property(fget=get_useLocale, fset=set_useLocale)
def fix_minus(self, s):
"""use a unicode minus rather than hyphen"""
if rcParams['text.usetex'] or not rcParams['axes.unicode_minus']:
return s
else:
return s.replace('-', '\u2212')
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
if len(self.locs) == 0:
return ''
else:
s = self.pprint_val(x)
return self.fix_minus(s)
def set_scientific(self, b):
'''True or False to turn scientific notation on or off
see also :meth:`set_powerlimits`
'''
self._scientific = bool(b)
def set_powerlimits(self, lims):
'''
Sets size thresholds for scientific notation.
e.g., ``formatter.set_powerlimits((-3, 4))`` sets the pre-2007 default
in which scientific notation is used for numbers less than 1e-3 or
greater than 1e4.
See also :meth:`set_scientific`.
'''
assert len(lims) == 2, "argument must be a sequence of length 2"
self._powerlimits = lims
def format_data_short(self, value):
"""return a short formatted string representation of a number"""
if self._useLocale:
return locale.format_string('%-12g', (value,))
else:
return '%-12g' % value
def format_data(self, value):
'return a formatted string representation of a number'
if self._useLocale:
s = locale.format_string('%1.10e', (value,))
else:
s = '%1.10e' % value
s = self._formatSciNotation(s)
return self.fix_minus(s)
def get_offset(self):
"""Return scientific notation, plus offset"""
if len(self.locs) == 0:
return ''
s = ''
if self.orderOfMagnitude or self.offset:
offsetStr = ''
sciNotStr = ''
if self.offset:
offsetStr = self.format_data(self.offset)
if self.offset > 0:
offsetStr = '+' + offsetStr
if self.orderOfMagnitude:
if self._usetex or self._useMathText:
sciNotStr = self.format_data(10 ** self.orderOfMagnitude)
else:
sciNotStr = '1e%d' % self.orderOfMagnitude
if self._useMathText:
if sciNotStr != '':
sciNotStr = r'\times\mathdefault{%s}' % sciNotStr
s = ''.join(('$', sciNotStr,
r'\mathdefault{', offsetStr, '}$'))
elif self._usetex:
if sciNotStr != '':
sciNotStr = r'\times%s' % sciNotStr
s = ''.join(('$', sciNotStr, offsetStr, '$'))
else:
s = ''.join((sciNotStr, offsetStr))
return self.fix_minus(s)
def set_locs(self, locs):
'set the locations of the ticks'
self.locs = locs
if len(self.locs) > 0:
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
if self._useOffset:
self._set_offset(d)
self._set_orderOfMagnitude(d)
self._set_format(vmin, vmax)
def _set_offset(self, range):
# offset of 20,001 is 20,000, for example
locs = self.locs
if locs is None or not len(locs) or range == 0:
self.offset = 0
return
ave_loc = np.mean(locs)
if ave_loc: # dont want to take log10(0)
ave_oom = math.floor(math.log10(np.mean(np.absolute(locs))))
range_oom = math.floor(math.log10(range))
if np.absolute(ave_oom - range_oom) >= 3: # four sig-figs
p10 = 10 ** range_oom
if ave_loc < 0:
self.offset = (math.ceil(np.max(locs) / p10) * p10)
else:
self.offset = (math.floor(np.min(locs) / p10) * p10)
else:
self.offset = 0
def _set_orderOfMagnitude(self, range):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the
# offset
if not self._scientific:
self.orderOfMagnitude = 0
return
locs = np.absolute(self.locs)
if self.offset:
oom = math.floor(math.log10(range))
else:
if locs[0] > locs[-1]:
val = locs[0]
else:
val = locs[-1]
if val == 0:
oom = 0
else:
oom = math.floor(math.log10(val))
if oom <= self._powerlimits[0]:
self.orderOfMagnitude = oom
elif oom >= self._powerlimits[1]:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def _set_format(self, vmin, vmax):
# set the format string to format all the ticklabels
if len(self.locs) < 2:
# Temporarily augment the locations with the axis end points.
_locs = list(self.locs) + [vmin, vmax]
else:
_locs = self.locs
locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
loc_range = np.ptp(locs)
if len(self.locs) < 2:
# We needed the end points only for the loc_range calculation.
locs = locs[:-2]
loc_range_oom = int(math.floor(math.log10(loc_range)))
# first estimate:
sigfigs = max(0, 3 - loc_range_oom)
# refined estimate:
thresh = 1e-3 * 10 ** loc_range_oom
while sigfigs >= 0:
if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
sigfigs -= 1
else:
break
sigfigs += 1
self.format = '%1.' + str(sigfigs) + 'f'
if self._usetex:
self.format = '$%s$' % self.format
elif self._useMathText:
self.format = '$\mathdefault{%s}$' % self.format
def pprint_val(self, x):
xp = (x - self.offset) / (10. ** self.orderOfMagnitude)
if np.absolute(xp) < 1e-8:
xp = 0
if self._useLocale:
return locale.format_string(self.format, (xp,))
else:
return self.format % xp
def _formatSciNotation(self, s):
# transform 1e+004 into 1e4, for example
if self._useLocale:
decimal_point = locale.localeconv()['decimal_point']
positive_sign = locale.localeconv()['positive_sign']
else:
decimal_point = '.'
positive_sign = '+'
tup = s.split('e')
try:
significand = tup[0].rstrip('0').rstrip(decimal_point)
sign = tup[1][0].replace(positive_sign, '')
exponent = tup[1][1:].lstrip('0')
if self._useMathText or self._usetex:
if significand == '1' and exponent != '':
# reformat 1x10^y as 10^y
significand = ''
if exponent:
exponent = '10^{%s%s}' % (sign, exponent)
if significand and exponent:
return r'%s{\times}%s' % (significand, exponent)
else:
return r'%s%s' % (significand, exponent)
else:
s = ('%se%s%s' % (significand, sign, exponent)).rstrip('e')
return s
except IndexError:
return s
class LogFormatter(Formatter):
"""
Format values for log axis;
"""
def __init__(self, base=10.0, labelOnlyBase=True):
"""
*base* is used to locate the decade tick,
which will be the only one to be labeled if *labelOnlyBase*
is ``False``
"""
self._base = base + 0.0
self.labelOnlyBase = labelOnlyBase
def base(self, base):
"""change the *base* for labeling - warning: should always match the
base used for :class:`LogLocator`"""
self._base = base
def label_minor(self, labelOnlyBase):
'switch on/off minor ticks labeling'
self.labelOnlyBase = labelOnlyBase
def __call__(self, x, pos=None):
"""Return the format for tick val *x* at position *pos*"""
vmin, vmax = self.axis.get_view_interval()
d = abs(vmax - vmin)
b = self._base
if x == 0.0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x)) / math.log(b)
isDecade = is_close_to_int(fx)
if not isDecade and self.labelOnlyBase:
s = ''
elif x > 10000:
s = '%1.0e' % x
elif x < 1:
s = '%1.0e' % x
else:
s = self.pprint_val(x, d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
def format_data(self, value):
b = self.labelOnlyBase
self.labelOnlyBase = False
value = cbook.strip_math(self.__call__(value))
self.labelOnlyBase = b
return value
def format_data_short(self, value):
'return a short formatted string representation of a number'
return '%-12g' % value
def pprint_val(self, x, d):
#if the number is not too big and it's an int, format it as an
#int
if abs(x) < 1e4 and x == int(x):
return '%d' % x
if d < 1e-2:
fmt = '%1.3e'
elif d < 1e-1:
fmt = '%1.3f'
elif d > 1e5:
fmt = '%1.1e'
elif d > 10:
fmt = '%1.1f'
elif d > 1:
fmt = '%1.2f'
else:
fmt = '%1.3f'
s = fmt % x
#print d, x, fmt, s
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
sign = tup[1][0].replace('+', '')
exponent = tup[1][1:].lstrip('0')
s = '%se%s%s' % (mantissa, sign, exponent)
else:
s = s.rstrip('0').rstrip('.')
return s
class LogFormatterExponent(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
"""Return the format for tick val *x* at position *pos*"""
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
d = abs(vmax - vmin)
b = self._base
if x == 0:
return '0'
sign = np.sign(x)
# only label the decades
fx = math.log(abs(x)) / math.log(b)
isDecade = is_close_to_int(fx)
if not isDecade and self.labelOnlyBase:
s = ''
elif abs(fx) > 10000:
s = '%1.0g' % fx
elif abs(fx) < 1:
s = '%1.0g' % fx
else:
s = self.pprint_val(fx, d)
if sign == -1:
s = '-%s' % s
return self.fix_minus(s)
class LogFormatterMathtext(LogFormatter):
"""
Format values for log axis; using ``exponent = log_base(value)``
"""
def __call__(self, x, pos=None):
'Return the format for tick val *x* at position *pos*'
b = self._base
usetex = rcParams['text.usetex']
# only label the decades
if x == 0:
if usetex:
return '$0$'
else:
return '$\mathdefault{0}$'
fx = math.log(abs(x)) / math.log(b)
is_decade = is_close_to_int(fx)
sign_string = '-' if x < 0 else ''
# use string formatting of the base if it is not an integer
if b % 1 == 0.0:
base = '%d' % b
else:
base = '%s' % b
if not is_decade and self.labelOnlyBase:
return ''
elif not is_decade:
if usetex:
return (r'$%s%s^{%.2f}$') % \
(sign_string, base, fx)
else:
return ('$\mathdefault{%s%s^{%.2f}}$') % \
(sign_string, base, fx)
else:
if usetex:
return (r'$%s%s^{%d}$') % (sign_string,
base,
nearest_long(fx))
else:
return (r'$\mathdefault{%s%s^{%d}}$') % (sign_string,
base,
nearest_long(fx))
class EngFormatter(Formatter):
"""
Formats axis values using engineering prefixes to represent powers of 1000,
plus a specified unit, e.g., 10 MHz instead of 1e7.
"""
# the unicode for -6 is the greek letter mu
# commeted here due to bug in pep8
# (https://github.com/jcrocholl/pep8/issues/271)
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "\u03bc",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, unit="", places=None):
self.unit = unit
self.places = places
def __call__(self, x, pos=None):
s = "%s%s" % (self.format_eng(x), self.unit)
return self.fix_minus(s)
def format_eng(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.places = 0
'0'
>>> format_eng(1000000) # for self.places = 1
'1.0 M'
>>> format_eng("-1e-6") # for self.places = 2
u'-1.00 \u03bc'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
dnum = decimal.Decimal(str(num))
sign = 1
if dnum < 0:
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
prefix = self.ENG_PREFIXES[int(pow10)]
mant = sign * dnum / (10 ** pow10)
if self.places is None:
format_str = "%g %s"
elif self.places == 0:
format_str = "%i %s"
elif self.places > 0:
format_str = ("%%.%if %%s" % self.places)
formatted = format_str % (mant, prefix)
return formatted.strip()
class Locator(TickHelper):
"""
Determine the tick locations;
Note, you should not use the same locator between different
:class:`~matplotlib.axis.Axis` because the locator stores references to
the Axis data and view limits
"""
# Some automatic tick locators can generate so many ticks they
# kill the machine when you try and render them.
# This parameter is set to cause locators to raise an error if too
# many ticks are generated.
MAXTICKS = 1000
def tick_values(self, vmin, vmax):
"""
Return the values of the located ticks given **vmin** and **vmax**.
.. note::
To get tick locations with the vmin and vmax values defined
automatically for the associated :attr:`axis` simply call
the Locator instance::
>>> print((type(loc)))
<type 'Locator'>
>>> print((loc()))
[1, 2, 3, 4]
"""
raise NotImplementedError('Derived must override')
def __call__(self):
"""Return the locations of the ticks"""
# note: some locators return data limits, other return view limits,
# hence there is no *one* interface to call self.tick_values.
raise NotImplementedError('Derived must override')
def raise_if_exceeds(self, locs):
"""raise a RuntimeError if Locator attempts to create more than
MAXTICKS locs"""
if len(locs) >= self.MAXTICKS:
msg = ('Locator attempting to generate %d ticks from %s to %s: ' +
'exceeds Locator.MAXTICKS') % (len(locs), locs[0], locs[-1])
raise RuntimeError(msg)
return locs
def view_limits(self, vmin, vmax):
"""
select a scale for the range from vmin to vmax
Normally this method is overridden by subclasses to
change locator behaviour.
"""
return mtransforms.nonsingular(vmin, vmax)
def autoscale(self):
"""autoscale the view limits"""
return self.view_limits(*self.axis.get_view_interval())
def pan(self, numsteps):
"""Pan numticks (can be positive or negative)"""
ticks = self()
numticks = len(ticks)
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if numticks > 2:
step = numsteps * abs(ticks[0] - ticks[1])
else:
d = abs(vmax - vmin)
step = numsteps * d / 6.
vmin += step
vmax += step
self.axis.set_view_interval(vmin, vmax, ignore=True)
def zoom(self, direction):
"Zoom in/out on axis; if direction is >0 zoom in, else zoom out"
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
interval = abs(vmax - vmin)
step = 0.1 * interval * direction
self.axis.set_view_interval(vmin + step, vmax - step, ignore=True)
def refresh(self):
"""refresh internal information based on current lim"""
pass
class IndexLocator(Locator):
"""
Place a tick on every multiple of some base number of points
plotted, e.g., on every 5th point. It is assumed that you are doing
index plotting; i.e., the axis is 0, len(data). This is mainly
useful for x ticks.
"""
def __init__(self, base, offset):
'place ticks on the i-th data points where (i-offset)%base==0'
self._base = base
self.offset = offset
def __call__(self):
"""Return the locations of the ticks"""
dmin, dmax = self.axis.get_data_interval()
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
return self.raise_if_exceeds(
np.arange(vmin + self.offset, vmax + 1, self._base))
class FixedLocator(Locator):
"""
Tick locations are fixed. If nbins is not None,
the array of possible positions will be subsampled to
keep the number of ticks <= nbins +1.
The subsampling will be done so as to include the smallest
absolute value; for example, if zero is included in the
array of possibilities, then it is guaranteed to be one of
the chosen ticks.
"""
def __init__(self, locs, nbins=None):
self.locs = np.asarray(locs)
self.nbins = nbins
if self.nbins is not None:
self.nbins = max(self.nbins, 2)
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are fixed, vmin and vmax are not used in this
method.
"""
if self.nbins is None:
return self.locs
step = max(int(0.99 + len(self.locs) / float(self.nbins)), 1)
ticks = self.locs[::step]
for i in range(1, step):
ticks1 = self.locs[i::step]
if np.absolute(ticks1).min() < np.absolute(ticks).min():
ticks = ticks1
return self.raise_if_exceeds(ticks)
class NullLocator(Locator):
"""
No ticks
"""
def __call__(self):
return self.tick_values(None, None)
def tick_values(self, vmin, vmax):
""""
Return the locations of the ticks.
.. note::
Because the values are Null, vmin and vmax are not used in this
method.
"""
return []
class LinearLocator(Locator):
"""
Determine the tick locations
The first time this function is called it will try to set the
number of ticks to make a nice tick partitioning. Thereafter the
number of ticks will be fixed so that interactive navigation will
be nice
"""
def __init__(self, numticks=None, presets=None):
"""
Use presets to set locs based on lom. A dict mapping vmin, vmax->locs
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if vmax < vmin:
vmin, vmax = vmax, vmin
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks is None:
self._set_numticks()
if self.numticks == 0:
return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return self.raise_if_exceeds(ticklocs)
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin -= 1
vmax += 1
exponent, remainder = divmod(math.log10(vmax - vmin), 1)
if remainder < 0.5:
exponent -= 1
scale = 10 ** (-exponent)
vmin = math.floor(scale * vmin) / scale
vmax = math.ceil(scale * vmax) / scale
return mtransforms.nonsingular(vmin, vmax)
def closeto(x, y):
if abs(x - y) < 1e-10:
return True
else:
return False
class Base:
'this solution has some hacks to deal with floating point inaccuracies'
def __init__(self, base):
assert(base > 0)
self._base = base
def lt(self, x):
'return the largest multiple of base < x'
d, m = divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return (d - 1) * self._base
return d * self._base
def le(self, x):
'return the largest multiple of base <= x'
d, m = divmod(x, self._base)
if closeto(m / self._base, 1): # was closeto(m, self._base)
#looks like floating point error
return (d + 1) * self._base
return d * self._base
def gt(self, x):
'return the smallest multiple of base > x'
d, m = divmod(x, self._base)
if closeto(m / self._base, 1):
#looks like floating point error
return (d + 2) * self._base
return (d + 1) * self._base
def ge(self, x):
'return the smallest multiple of base >= x'
d, m = divmod(x, self._base)
if closeto(m, 0) and not closeto(m / self._base, 1):
return d * self._base
return (d + 1) * self._base
def get_base(self):
return self._base
class MultipleLocator(Locator):
"""
Set a tick on every integer that is multiple of base in the
view interval
"""
def __init__(self, base=1.0):
self._base = Base(base)
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
if vmax < vmin:
vmin, vmax = vmax, vmin
vmin = self._base.ge(vmin)
base = self._base.get_base()
n = (vmax - vmin + 0.001 * base) // base
locs = vmin - base + np.arange(n + 3) * base
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
"""
Set the view limits to the nearest multiples of base that
contain the data
"""
vmin = self._base.le(dmin)
vmax = self._base.ge(dmax)
if vmin == vmax:
vmin -= 1
vmax += 1
return mtransforms.nonsingular(vmin, vmax)
def scale_range(vmin, vmax, n=1, threshold=100):
dv = abs(vmax - vmin)
if dv == 0: # maxabsv == 0 is a special case of this.
return 1.0, 0.0
# Note: this should never occur because
# vmin, vmax should have been checked by nonsingular(),
# and spread apart if necessary.
meanv = 0.5 * (vmax + vmin)
if abs(meanv) / dv < threshold:
offset = 0
elif meanv > 0:
ex = divmod(math.log10(meanv), 1)[0]
offset = 10 ** ex
else:
ex = divmod(math.log10(-meanv), 1)[0]
offset = -10 ** ex
ex = divmod(math.log10(dv / n), 1)[0]
scale = 10 ** ex
return scale, offset
class MaxNLocator(Locator):
"""
Select no more than N intervals at nice locations.
"""
default_params = dict(nbins=10,
steps=None,
trim=True,
integer=False,
symmetric=False,
prune=None)
def __init__(self, *args, **kwargs):
"""
Keyword args:
*nbins*
Maximum number of intervals; one less than max number of ticks.
*steps*
Sequence of nice numbers starting with 1 and ending with 10;
e.g., [1, 2, 4, 5, 10]
*integer*
If True, ticks will take only integer values.
*symmetric*
If True, autoscaling will result in a range symmetric
about zero.
*prune*
['lower' | 'upper' | 'both' | None]
Remove edge ticks -- useful for stacked or ganged plots
where the upper tick of one axes overlaps with the lower
tick of the axes above it.
If prune=='lower', the smallest tick will
be removed. If prune=='upper', the largest tick will be
removed. If prune=='both', the largest and smallest ticks
will be removed. If prune==None, no ticks will be removed.
"""
# I left "trim" out; it defaults to True, and it is not
# clear that there is any use case for False, so we may
# want to remove that kwarg. EF 2010/04/18
if args:
kwargs['nbins'] = args[0]
if len(args) > 1:
raise ValueError(
"Keywords are required for all arguments except 'nbins'")
self.set_params(**self.default_params)
self.set_params(**kwargs)
def set_params(self, **kwargs):
if 'nbins' in kwargs:
self._nbins = int(kwargs['nbins'])
if 'trim' in kwargs:
self._trim = kwargs['trim']
if 'integer' in kwargs:
self._integer = kwargs['integer']
if 'symmetric' in kwargs:
self._symmetric = kwargs['symmetric']
if 'prune' in kwargs:
prune = kwargs['prune']
if prune is not None and prune not in ['upper', 'lower', 'both']:
raise ValueError(
"prune must be 'upper', 'lower', 'both', or None")
self._prune = prune
if 'steps' in kwargs:
steps = kwargs['steps']
if steps is None:
self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]
else:
if int(steps[-1]) != 10:
steps = list(steps)
steps.append(10)
self._steps = steps
if 'integer' in kwargs:
self._integer = kwargs['integer']
if self._integer:
self._steps = [n for n in self._steps if divmod(n, 1)[1] < 0.001]
def bin_boundaries(self, vmin, vmax):
nbins = self._nbins
scale, offset = scale_range(vmin, vmax, nbins)
if self._integer:
scale = max(1, scale)
vmin = vmin - offset
vmax = vmax - offset
raw_step = (vmax - vmin) / nbins
scaled_raw_step = raw_step / scale
best_vmax = vmax
best_vmin = vmin
for step in self._steps:
if step < scaled_raw_step:
continue
step *= scale
best_vmin = step * divmod(vmin, step)[0]
best_vmax = best_vmin + step * nbins
if (best_vmax >= vmax):
break
if self._trim:
extra_bins = int(divmod((best_vmax - vmax), step)[0])
nbins -= extra_bins
return (np.arange(nbins + 1) * step + best_vmin + offset)
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=1e-13,
tiny=1e-14)
locs = self.bin_boundaries(vmin, vmax)
prune = self._prune
if prune == 'lower':
locs = locs[1:]
elif prune == 'upper':
locs = locs[:-1]
elif prune == 'both':
locs = locs[1:-1]
return self.raise_if_exceeds(locs)
def view_limits(self, dmin, dmax):
if self._symmetric:
maxabs = max(abs(dmin), abs(dmax))
dmin = -maxabs
dmax = maxabs
dmin, dmax = mtransforms.nonsingular(dmin, dmax, expander=1e-12,
tiny=1.e-13)
return np.take(self.bin_boundaries(dmin, dmax), [0, -1])
def decade_down(x, base=10):
'floor x to the nearest lower decade'
if x == 0.0:
return -base
lx = np.floor(np.log(x) / np.log(base))
return base ** lx
def decade_up(x, base=10):
'ceil x to the nearest higher decade'
if x == 0.0:
return base
lx = np.ceil(np.log(x) / np.log(base))
return base ** lx
def nearest_long(x):
if x == 0:
return long(0)
elif x > 0:
return long(x + 0.5)
else:
return long(x - 0.5)
def is_decade(x, base=10):
if not np.isfinite(x):
return False
if x == 0.0:
return True
lx = np.log(np.abs(x)) / np.log(base)
return is_close_to_int(lx)
def is_close_to_int(x):
if not np.isfinite(x):
return False
return abs(x - nearest_long(x)) < 1e-10
class LogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0], numdecs=4, numticks=15):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
self.numticks = numticks
self.numdecs = numdecs
def base(self, base):
"""
set the base of the log scaling (major tick every base**i, i integer)
"""
self._base = base + 0.0
def subs(self, subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs) + 0.0
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._base
# dummy axis has no axes attribute
if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
decades = np.arange(vmax - self.numdecs, vmax)
ticklocs = b ** decades
return ticklocs
if vmin <= 0.0:
if self.axis is not None:
vmin = self.axis.get_minpos()
if vmin <= 0.0 or not np.isfinite(vmin):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
vmin = math.log(vmin) / math.log(b)
vmax = math.log(vmax) / math.log(b)
if vmax < vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax) - math.ceil(vmin)
if self._subs is None: # autosub
if numdec > 10:
subs = np.array([1.0])
elif numdec > 6:
subs = np.arange(2.0, b, 2.0)
else:
subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
while numdec / stride + 1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin) - stride,
math.ceil(vmax) + 2 * stride, stride)
if hasattr(self, '_transform'):
ticklocs = self._transform.inverted().transform(decades)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = np.ravel(np.outer(subs, ticklocs))
else:
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b ** decades:
ticklocs.extend(subs * decadeStart)
else:
ticklocs = b ** decades
return self.raise_if_exceeds(np.asarray(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._base
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
vmin = b ** (vmax - self.numdecs)
return vmin, vmax
minpos = self.axis.get_minpos()
if minpos <= 0 or not np.isfinite(minpos):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin, self._base):
vmin = decade_down(vmin, self._base)
if not is_decade(vmax, self._base):
vmax = decade_up(vmax, self._base)
if vmin == vmax:
vmin = decade_down(vmin, self._base)
vmax = decade_up(vmax, self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
class SymmetricalLogLocator(Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, transform, subs=None):
"""
place ticks on the location= base**i*subs[j]
"""
self._transform = transform
if subs is None:
self._subs = [1.0]
else:
self._subs = subs
self.numticks = 15
def __call__(self):
'Return the locations of the ticks'
# Note, these are untransformed coordinates
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._transform.base
t = self._transform.linthresh
if vmax < vmin:
vmin, vmax = vmax, vmin
# The domain is divided into three sections, only some of
# which may actually be present.
#
# <======== -t ==0== t ========>
# aaaaaaaaa bbbbb ccccccccc
#
# a) and c) will have ticks at integral log positions. The
# number of ticks needs to be reduced if there are more
# than self.numticks of them.
#
# b) has a tick at 0 and only 0 (we assume t is a small
# number, and the linear segment is just an implementation
# detail and not interesting.)
#
# We could also add ticks at t, but that seems to usually be
# uninteresting.
#
# "simple" mode is when the range falls entirely within (-t,
# t) -- it should just display (vmin, 0, vmax)
has_a = has_b = has_c = False
if vmin < -t:
has_a = True
if vmax > -t:
has_b = True
if vmax > t:
has_c = True
elif vmin < 0:
if vmax > 0:
has_b = True
if vmax > t:
has_c = True
else:
return [vmin, vmax]
elif vmin < t:
if vmax > t:
has_b = True
has_c = True
else:
return [vmin, vmax]
else:
has_c = True
def get_log_range(lo, hi):
lo = np.floor(np.log(lo) / np.log(b))
hi = np.ceil(np.log(hi) / np.log(b))
return lo, hi
# First, calculate all the ranges, so we can determine striding
if has_a:
if has_b:
a_range = get_log_range(t, -vmin + 1)
else:
a_range = get_log_range(-vmax, -vmin + 1)
else:
a_range = (0, 0)
if has_c:
if has_b:
c_range = get_log_range(t, vmax + 1)
else:
c_range = get_log_range(vmin, vmax + 1)
else:
c_range = (0, 0)
total_ticks = (a_range[1] - a_range[0]) + (c_range[1] - c_range[0])
if has_b:
total_ticks += 1
stride = max(np.floor(float(total_ticks) / (self.numticks - 1)), 1)
decades = []
if has_a:
decades.extend(-1 * (b ** (np.arange(a_range[0], a_range[1],
stride)[::-1])))
if has_b:
decades.append(0.0)
if has_c:
decades.extend(b ** (np.arange(c_range[0], c_range[1], stride)))
# Add the subticks if requested
if self._subs is None:
subs = np.arange(2.0, b)
else:
subs = np.asarray(self._subs)
if len(subs) > 1 or subs[0] != 1.0:
ticklocs = []
for decade in decades:
ticklocs.extend(subs * decade)
else:
ticklocs = decades
return self.raise_if_exceeds(np.array(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._transform.base
if vmax < vmin:
vmin, vmax = vmax, vmin
if not is_decade(abs(vmin), b):
if vmin < 0:
vmin = -decade_up(-vmin, b)
else:
vmin = decade_down(vmin, b)
if not is_decade(abs(vmax), b):
if vmax < 0:
vmax = -decade_down(-vmax, b)
else:
vmax = decade_up(vmax, b)
if vmin == vmax:
if vmin < 0:
vmin = -decade_up(-vmin, b)
vmax = -decade_down(-vmax, b)
else:
vmin = decade_down(vmin, b)
vmax = decade_up(vmax, b)
result = mtransforms.nonsingular(vmin, vmax)
return result
class AutoLocator(MaxNLocator):
def __init__(self):
MaxNLocator.__init__(self, nbins=9, steps=[1, 2, 5, 10])
class AutoMinorLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks. Assumes the scale is linear and major ticks are
evenly spaced.
"""
def __init__(self, n=None):
"""
*n* is the number of subdivisions of the interval between
major ticks; e.g., n=2 will place a single minor tick midway
between major ticks.
If *n* is omitted or None, it will be set to 5 or 4.
"""
self.ndivs = n
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
try:
majorstep = majorlocs[1] - majorlocs[0]
except IndexError:
# Need at least two major ticks to find minor tick locations
# TODO: Figure out a way to still be able to display minor
# ticks without two major ticks visible. For now, just display
# no ticks at all.
majorstep = 0
if self.ndivs is None:
if majorstep == 0:
# TODO: Need a better way to figure out ndivs
ndivs = 1
else:
x = int(round(10 ** (np.log10(majorstep) % 1)))
if x in [1, 5, 10]:
ndivs = 5
else:
ndivs = 4
else:
ndivs = self.ndivs
minorstep = majorstep / ndivs
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
if len(majorlocs) > 0:
t0 = majorlocs[0]
tmin = ((vmin - t0) // minorstep + 1) * minorstep
tmax = ((vmax - t0) // minorstep + 1) * minorstep
locs = np.arange(tmin, tmax, minorstep) + t0
cond = np.abs((locs - t0) % majorstep) > minorstep / 10.0
locs = locs.compress(cond)
else:
locs = []
return self.raise_if_exceeds(np.array(locs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
class OldAutoLocator(Locator):
"""
On autoscale this class picks the best MultipleLocator to set the
view limits and the tick locs.
"""
def __init__(self):
self._locator = LinearLocator()
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self.raise_if_exceeds(self._locator())
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
def refresh(self):
'refresh internal information based on current lim'
vmin, vmax = self.axis.get_view_interval()
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
d = abs(vmax - vmin)
self._locator = self.get_locator(d)
return self._locator.view_limits(vmin, vmax)
def get_locator(self, d):
'pick the best locator based on a distance'
d = abs(d)
if d <= 0:
locator = MultipleLocator(0.2)
else:
try:
ld = math.log10(d)
except OverflowError:
raise RuntimeError('AutoLocator illegal data interval range')
fld = math.floor(ld)
base = 10 ** fld
#if ld==fld: base = 10**(fld-1)
#else: base = 10**fld
if d >= 5 * base:
ticksize = base
elif d >= 2 * base:
ticksize = base / 2.0
else:
ticksize = base / 5.0
locator = MultipleLocator(ticksize)
return locator
__all__ = ('TickHelper', 'Formatter', 'FixedFormatter',
'NullFormatter', 'FuncFormatter', 'FormatStrFormatter',
'ScalarFormatter', 'LogFormatter', 'LogFormatterExponent',
'LogFormatterMathtext', 'Locator', 'IndexLocator',
'FixedLocator', 'NullLocator', 'LinearLocator',
'LogLocator', 'AutoLocator', 'MultipleLocator',
'MaxNLocator', 'AutoMinorLocator',)
| mit |
ekaakurniawan/Bioinformatics-Tools | plot_conservation/plot_conservation.py | 1 | 2539 | # Copyright (C) 2012 by Eka A. Kurniawan
# eka.a.kurniawan(ta)gmail(tod)com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Tested on:
# - Python 2.7.3
# - NumPy 1.6.2
# - MatPlotLib 1.1.1
import numpy as np
import matplotlib.pyplot as plot
files = [['H1N1 - Avian - protein_conservation.txt', 'H1N1 - Avian'],
['H1N1 - Human - protein 1a_conservation.txt', 'H1N1 - Human 1'],
['H1N1 - Human - protein 1b_conservation.txt', 'H1N1 - Human 2'],
['H1N1 - Human - protein 2a_conservation.txt', 'H1N1 - Human 3'],
['H1N1 - Human - protein 2b_conservation.txt', 'H1N1 - Human 4'],
['H1N1 - Human - protein 3a_conservation.txt', 'H1N1 - Human 5'],
['H1N1 - Human - protein 3b_conservation.txt', 'H1N1 - Human 6'],
['H1N1 - Swine - protein_conservation.txt', 'H1N1 - Swine'],
['H3N2 - Avian - protein_conservation.txt', 'H3N2 - Avian'],
['H3N2 - Human - protein 1_conservation.txt', 'H3N2 - Human 1'],
['H3N2 - Human - protein 2_conservation.txt', 'H3N2 - Human 2'],
['H3N2 - Human - protein 3_conservation.txt', 'H3N2 - Human 3'],
['H3N2 - Swine - protein_conservation.txt', 'H3N2 - Swine'],
['H5N1 - Avian - protein_conservation.txt', 'H5N1 - Avian'],
['H5N1 - Human - protein_conservation.txt', 'H5N1 - Human'],
['H5N1 - Swine - protein_conservation.txt', 'H5N1 - Swine']]
conservations = []
totalFile = len(files)
for file in files:
inFile = open(file[0], 'r')
conservations.append(np.array(inFile.read().split(',')[:-1], \
dtype = np.float))
inFile.close()
plot.boxplot([np.asarray(cs) for cs in conservations])
plot.title('Conservation Box Plot of Different Viruses')
plot.ylabel('Score (0 to 11)')
plot.xticks(np.arange(totalFile + 1), [''] + [file[1] for file in files], \
rotation = -90)
plot.show()
| gpl-2.0 |
DiamondLightSource/auto_tomo_calibration-experimental | measure_resolution/lmfit/models.py | 7 | 16554 | import numpy as np
from .model import Model
from .lineshapes import (gaussian, lorentzian, voigt, pvoigt, pearson7,
step, rectangle, breit_wigner, logistic,
students_t, lognormal, damped_oscillator,
expgaussian, skewed_gaussian, donaich,
skewed_voigt, exponential, powerlaw, linear,
parabolic)
from . import lineshapes
from .asteval import Interpreter
from .astutils import get_ast_names
class DimensionalError(Exception):
pass
def _validate_1d(independent_vars):
if len(independent_vars) != 1:
raise DimensionalError(
"This model requires exactly one independent variable.")
def index_of(arr, val):
"""return index of array nearest to a value
"""
if val < min(arr):
return 0
return np.abs(arr-val).argmin()
def fwhm_expr(model):
"return constraint expression for fwhm"
return "%.7f*%ssigma" % (model.fwhm_factor, model.prefix)
def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
"estimate amp, cen, sigma for a peak, create params"
if x is None:
return 1.0, 0.0, 1.0
maxy, miny = max(y), min(y)
maxx, minx = max(x), min(x)
imaxy = index_of(y, maxy)
cen = x[imaxy]
amp = (maxy - miny)*2.0
sig = (maxx-minx)/6.0
halfmax_vals = np.where(y > (maxy+miny)/2.0)[0]
if negative:
imaxy = index_of(y, miny)
amp = -(maxy - miny)*2.0
halfmax_vals = np.where(y < (maxy+miny)/2.0)[0]
if len(halfmax_vals) > 2:
sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0
cen = x[halfmax_vals].mean()
amp = amp*sig*ampscale
sig = sig*sigscale
pars = model.make_params(amplitude=amp, center=cen, sigma=sig)
pars['%ssigma' % model.prefix].set(min=0.0)
return pars
def update_param_vals(pars, prefix, **kwargs):
"""convenience function to update parameter values
with keyword arguments"""
for key, val in kwargs.items():
pname = "%s%s" % (prefix, key)
if pname in pars:
pars[pname].value = val
return pars
COMMON_DOC = """
Parameters
----------
independent_vars: list of strings to be set as variable names
missing: None, 'drop', or 'raise'
None: Do not check for null or missing values.
'drop': Drop null or missing observations in data.
Use pandas.isnull if pandas is available; otherwise,
silently fall back to numpy.isnan.
'raise': Raise a (more helpful) exception when data contains null
or missing values.
prefix: string to prepend to paramter names, needed to add two Models that
have parameter names in common. None by default.
"""
class ConstantModel(Model):
__doc__ = "x -> c" + COMMON_DOC
def __init__(self, *args, **kwargs):
def constant(x, c):
return c
super(ConstantModel, self).__init__(constant, *args, **kwargs)
def guess(self, data, **kwargs):
pars = self.make_params()
pars['%sc' % self.prefix].set(value=data.mean())
return update_param_vals(pars, self.prefix, **kwargs)
class LinearModel(Model):
__doc__ = linear.__doc__ + COMMON_DOC if linear.__doc__ else ""
def __init__(self, *args, **kwargs):
super(LinearModel, self).__init__(linear, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
sval, oval = 0., 0.
if x is not None:
sval, oval = np.polyfit(x, data, 1)
pars = self.make_params(intercept=oval, slope=sval)
return update_param_vals(pars, self.prefix, **kwargs)
class QuadraticModel(Model):
__doc__ = parabolic.__doc__ + COMMON_DOC if parabolic.__doc__ else ""
def __init__(self, *args, **kwargs):
super(QuadraticModel, self).__init__(parabolic, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
a, b, c = 0., 0., 0.
if x is not None:
a, b, c = np.polyfit(x, data, 2)
pars = self.make_params(a=a, b=b, c=c)
return update_param_vals(pars, self.prefix, **kwargs)
ParabolicModel = QuadraticModel
class PolynomialModel(Model):
__doc__ = "x -> c0 + c1 * x + c2 * x**2 + ... c7 * x**7" + COMMON_DOC
MAX_DEGREE=7
DEGREE_ERR = "degree must be an integer less than %d."
def __init__(self, degree, *args, **kwargs):
if not isinstance(degree, int) or degree > self.MAX_DEGREE:
raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE)
self.poly_degree = degree
pnames = ['c%i' % (i) for i in range(degree + 1)]
kwargs['param_names'] = pnames
def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x)
super(PolynomialModel, self).__init__(polynomial, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
pars = self.make_params()
if x is not None:
out = np.polyfit(x, data, self.poly_degree)
for i, coef in enumerate(out[::-1]):
pars['%sc%i'% (self.prefix, i)].set(value=coef)
return update_param_vals(pars, self.prefix, **kwargs)
class GaussianModel(Model):
__doc__ = gaussian.__doc__ + COMMON_DOC if gaussian.__doc__ else ""
fwhm_factor = 2.354820
def __init__(self, *args, **kwargs):
super(GaussianModel, self).__init__(gaussian, *args, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class LorentzianModel(Model):
__doc__ = lorentzian.__doc__ + COMMON_DOC if lorentzian.__doc__ else ""
fwhm_factor = 2.0
def __init__(self, *args, **kwargs):
super(LorentzianModel, self).__init__(lorentzian, *args, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
return update_param_vals(pars, self.prefix, **kwargs)
class VoigtModel(Model):
__doc__ = voigt.__doc__ + COMMON_DOC if voigt.__doc__ else ""
fwhm_factor = 3.60131
def __init__(self, *args, **kwargs):
super(VoigtModel, self).__init__(voigt, *args, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative,
ampscale=1.5, sigscale=0.65)
return update_param_vals(pars, self.prefix, **kwargs)
class PseudoVoigtModel(Model):
__doc__ = pvoigt.__doc__ + COMMON_DOC if pvoigt.__doc__ else ""
fwhm_factor = 2.0
def __init__(self, *args, **kwargs):
super(PseudoVoigtModel, self).__init__(pvoigt, *args, **kwargs)
self.set_param_hint('fraction', value=0.5)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
pars['%sfraction' % self.prefix].set(value=0.5)
return update_param_vals(pars, self.prefix, **kwargs)
class Pearson7Model(Model):
__doc__ = pearson7.__doc__ + COMMON_DOC if pearson7.__doc__ else ""
def __init__(self, *args, **kwargs):
super(Pearson7Model, self).__init__(pearson7, *args, **kwargs)
self.set_param_hint('expon', value=1.5)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
pars['%sexpon' % self.prefix].set(value=1.5)
return update_param_vals(pars, self.prefix, **kwargs)
class StudentsTModel(Model):
__doc__ = students_t.__doc__ + COMMON_DOC if students_t.__doc__ else ""
def __init__(self, *args, **kwargs):
super(StudentsTModel, self).__init__(students_t, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class BreitWignerModel(Model):
__doc__ = breit_wigner.__doc__ + COMMON_DOC if breit_wigner.__doc__ else ""
def __init__(self, *args, **kwargs):
super(BreitWignerModel, self).__init__(breit_wigner, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
pars['%sq' % self.prefix].set(value=1.0)
return update_param_vals(pars, self.prefix, **kwargs)
class LognormalModel(Model):
__doc__ = lognormal.__doc__ + COMMON_DOC if lognormal.__doc__ else ""
def __init__(self, *args, **kwargs):
super(LognormalModel, self).__init__(lognormal, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25)
pars['%ssigma' % self.prefix].set(min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
class DampedOscillatorModel(Model):
__doc__ = damped_oscillator.__doc__ + COMMON_DOC if damped_oscillator.__doc__ else ""
def __init__(self, *args, **kwargs):
super(DampedOscillatorModel, self).__init__(damped_oscillator, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars =guess_from_peak(self, data, x, negative,
ampscale=0.1, sigscale=0.1)
return update_param_vals(pars, self.prefix, **kwargs)
class ExponentialGaussianModel(Model):
__doc__ = expgaussian.__doc__ + COMMON_DOC if expgaussian.__doc__ else ""
def __init__(self, *args, **kwargs):
super(ExponentialGaussianModel, self).__init__(expgaussian, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class SkewedGaussianModel(Model):
__doc__ = skewed_gaussian.__doc__ + COMMON_DOC if skewed_gaussian.__doc__ else ""
fwhm_factor = 2.354820
def __init__(self, *args, **kwargs):
super(SkewedGaussianModel, self).__init__(skewed_gaussian, *args, **kwargs)
self.set_param_hint('sigma', min=0)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class DonaichModel(Model):
__doc__ = donaich.__doc__ + COMMON_DOC if donaich.__doc__ else ""
def __init__(self, *args, **kwargs):
super(DonaichModel, self).__init__(donaich, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=0.5)
return update_param_vals(pars, self.prefix, **kwargs)
class PowerLawModel(Model):
__doc__ = powerlaw.__doc__ + COMMON_DOC if powerlaw.__doc__ else ""
def __init__(self, *args, **kwargs):
super(PowerLawModel, self).__init__(powerlaw, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
try:
expon, amp = np.polyfit(np.log(x+1.e-14), np.log(data+1.e-14), 1)
except:
expon, amp = 1, np.log(abs(max(data)+1.e-9))
pars = self.make_params(amplitude=np.exp(amp), exponent=expon)
return update_param_vals(pars, self.prefix, **kwargs)
class ExponentialModel(Model):
__doc__ = exponential.__doc__ + COMMON_DOC if exponential.__doc__ else ""
def __init__(self, *args, **kwargs):
super(ExponentialModel, self).__init__(exponential, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
try:
sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
except:
sval, oval = 1., np.log(abs(max(data)+1.e-9))
pars = self.make_params(amplitude=np.exp(oval), decay=-1.0/sval)
return update_param_vals(pars, self.prefix, **kwargs)
class StepModel(Model):
__doc__ = step.__doc__ + COMMON_DOC if step.__doc__ else ""
def __init__(self, *args, **kwargs):
super(StepModel, self).__init__(step, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
if x is None:
return
ymin, ymax = min(data), max(data)
xmin, xmax = min(x), max(x)
pars = self.make_params(amplitude=(ymax-ymin),
center=(xmax+xmin)/2.0)
pars['%ssigma' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
class RectangleModel(Model):
__doc__ = rectangle.__doc__ + COMMON_DOC if rectangle.__doc__ else ""
def __init__(self, *args, **kwargs):
super(RectangleModel, self).__init__(rectangle, *args, **kwargs)
self.set_param_hint('midpoint',
expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
self.prefix))
def guess(self, data, x=None, **kwargs):
if x is None:
return
ymin, ymax = min(data), max(data)
xmin, xmax = min(x), max(x)
pars = self.make_params(amplitude=(ymax-ymin),
center1=(xmax+xmin)/4.0,
center2=3*(xmax+xmin)/4.0)
pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
class ExpressionModel(Model):
"""Model from User-supplied expression
%s
""" % COMMON_DOC
idvar_missing = "No independent variable found in\n %s"
idvar_notfound = "Cannot find independent variables '%s' in\n %s"
def __init__(self, expr, independent_vars=None, init_script=None,
*args, **kwargs):
# create ast evaluator, load custom functions
self.asteval = Interpreter()
for name in lineshapes.functions:
self.asteval.symtable[name] = getattr(lineshapes, name, None)
if init_script is not None:
self.asteval.eval(init_script)
# save expr as text, parse to ast, save for later use
self.expr = expr
self.astcode = self.asteval.parse(expr)
# find all symbol names found in expression
sym_names = get_ast_names(self.astcode)
if independent_vars is None and 'x' in sym_names:
independent_vars = ['x']
if independent_vars is None:
raise ValueError(self.idvar_missing % (self.expr))
# determine which named symbols are parameter names,
# try to find all independent variables
idvar_found = [False]*len(independent_vars)
param_names = []
for name in sym_names:
if name in independent_vars:
idvar_found[independent_vars.index(name)] = True
elif name not in self.asteval.symtable:
param_names.append(name)
# make sure we have all independent parameters
if not all(idvar_found):
lost = []
for ix, found in enumerate(idvar_found):
if not found:
lost.append(independent_vars[ix])
lost = ', '.join(lost)
raise ValueError(self.idvar_notfound % (lost, self.expr))
kwargs['independent_vars'] = independent_vars
def _eval(**kwargs):
for name, val in kwargs.items():
self.asteval.symtable[name] = val
return self.asteval.run(self.astcode)
super(ExpressionModel, self).__init__(_eval, *args, **kwargs)
# set param names here, and other things normally
# set in _parse_params(), which will be short-circuited.
self.independent_vars = independent_vars
self._func_allargs = independent_vars + param_names
self._param_names = set(param_names)
self._func_haskeywords = True
self.def_vals = {}
def __repr__(self):
return "<lmfit.ExpressionModel('%s')>" % (self.expr)
def _parse_params(self):
"""ExpressionModel._parse_params is over-written (as `pass`)
to prevent normal parsing of function for parameter names
"""
pass
| apache-2.0 |
linebp/pandas | pandas/tests/scalar/test_timedelta.py | 7 | 27311 | """ test the scalar Timedelta """
import pytest
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat)
from pandas._libs.tslib import iNaT, NaTType
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def setup_method(self, method):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
assert Timedelta(10, unit='d').value == expected
assert Timedelta(10.0, unit='d').value == expected
assert Timedelta('10 days').value == expected
assert Timedelta(days=10).value == expected
assert Timedelta(days=10.0).value == expected
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
assert Timedelta('10 days 00:00:10').value == expected
assert Timedelta(days=10, seconds=10).value == expected
assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
assert (Timedelta(days=10, microseconds=10 * 1000 * 1000)
.value == expected)
# gh-8757: test construction with np dtypes
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1, npkwarg).astype(
'm8[ns]').view('i8')
assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
# rounding cases
assert Timedelta(82739999850000).value == 82739999850000
assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
assert Timedelta(123072001000000).value == 123072001000000
assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# string conversion with/without leading zero
# GH 9570
assert Timedelta('0:00:00') == timedelta(hours=0)
assert Timedelta('00:00:00') == timedelta(hours=0)
assert Timedelta('-1:00:00') == -timedelta(hours=1)
assert Timedelta('-01:00:00') == -timedelta(hours=1)
# more strings & abbrevs
# GH 8190
assert Timedelta('1 h') == timedelta(hours=1)
assert Timedelta('1 hour') == timedelta(hours=1)
assert Timedelta('1 hr') == timedelta(hours=1)
assert Timedelta('1 hours') == timedelta(hours=1)
assert Timedelta('-1 hours') == -timedelta(hours=1)
assert Timedelta('1 m') == timedelta(minutes=1)
assert Timedelta('1.5 m') == timedelta(seconds=90)
assert Timedelta('1 minute') == timedelta(minutes=1)
assert Timedelta('1 minutes') == timedelta(minutes=1)
assert Timedelta('1 s') == timedelta(seconds=1)
assert Timedelta('1 second') == timedelta(seconds=1)
assert Timedelta('1 seconds') == timedelta(seconds=1)
assert Timedelta('1 ms') == timedelta(milliseconds=1)
assert Timedelta('1 milli') == timedelta(milliseconds=1)
assert Timedelta('1 millisecond') == timedelta(milliseconds=1)
assert Timedelta('1 us') == timedelta(microseconds=1)
assert Timedelta('1 micros') == timedelta(microseconds=1)
assert Timedelta('1 microsecond') == timedelta(microseconds=1)
assert Timedelta('1.5 microsecond') == Timedelta('00:00:00.000001500')
assert Timedelta('1 ns') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nano') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nanosecond') == Timedelta('00:00:00.000000001')
# combos
assert Timedelta('10 days 1 hour') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h 1m 1s') == timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=1, microseconds=3)
assert Timedelta('-10 days 1 h 1.5m 1s 3us'), -timedelta(
days=10, hours=1, minutes=1, seconds=31, microseconds=3)
# Currently invalid as it has a - on the hh:mm:dd part
# (only allowed on the days)
pytest.raises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
pytest.raises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
pytest.raises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assert_raises_regex(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assert_raises_regex(ValueError,
"unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assert_raises_regex(ValueError,
"cannot construct a Timedelta from the "
"passed arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# round-trip both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
assert Timedelta(td.value) == td
# str does not normally display nanos
if not td.nanoseconds:
assert Timedelta(str(td)) == td
assert Timedelta(td._repr_base(format='all')) == td
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
assert Timedelta(10.5, unit='s').value == expected
# offset
assert (to_timedelta(pd.offsets.Hour(2)) ==
Timedelta('0 days, 02:00:00'))
assert (Timedelta(pd.offsets.Hour(2)) ==
Timedelta('0 days, 02:00:00'))
assert (Timedelta(pd.offsets.Second(2)) ==
Timedelta('0 days, 00:00:02'))
# gh-11995: unicode
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
assert result == expected
assert (to_timedelta(pd.offsets.Hour(2)) ==
Timedelta(u'0 days, 02:00:00'))
pytest.raises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
pytest.raises(OverflowError, pd.Timedelta, value)
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_repr(self):
assert (repr(Timedelta(10, unit='d')) ==
"Timedelta('10 days 00:00:00')")
assert (repr(Timedelta(10, unit='s')) ==
"Timedelta('0 days 00:00:10')")
assert (repr(Timedelta(10, unit='ms')) ==
"Timedelta('0 days 00:00:00.010000')")
assert (repr(Timedelta(-10, unit='ms')) ==
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert (isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
assert td == np.timedelta64(td.value, 'ns')
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, 'ns')
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
assert td != td.to_pytimedelta()
def test_freq_conversion(self):
# truediv
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
assert result == td.value / float(86400 * 1e9)
result = td / np.timedelta64(1, 's')
assert result == td.value / float(1e9)
result = td / np.timedelta64(1, 'ns')
assert result == td.value
# floordiv
td = Timedelta('1 days 2 hours 3 ns')
result = td // np.timedelta64(1, 'D')
assert result == 1
result = td // np.timedelta64(1, 's')
assert result == 93600
result = td // np.timedelta64(1, 'ns')
assert result == td.value
def test_fields(self):
def check(value):
# that we are int/long like
assert isinstance(value, (int, compat.long))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
assert abs(td) == Timedelta('13:48:48')
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta('0 days 13:48:48')
assert -Timedelta('-1 days, 10:11:12').value == 49728000000000
assert Timedelta('-1 days, 10:11:12').value == -49728000000000
rng = to_timedelta('-1 days, 10:11:12.100123456')
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_nat_converters(self):
assert to_timedelta('nat', box=False).astype('int64') == iNaT
assert to_timedelta('nan', box=False).astype('int64') == iNaT
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
assert result == expected
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
assert ct(0) == np.timedelta64(0, 'ns')
assert ct(10) == np.timedelta64(10, 'ns')
assert ct(10, unit='ns') == np.timedelta64(10, 'ns').astype('m8[ns]')
assert ct(10, unit='us') == np.timedelta64(10, 'us').astype('m8[ns]')
assert ct(10, unit='ms') == np.timedelta64(10, 'ms').astype('m8[ns]')
assert ct(10, unit='s') == np.timedelta64(10, 's').astype('m8[ns]')
assert ct(10, unit='d') == np.timedelta64(10, 'D').astype('m8[ns]')
def test_timedelta_conversions(self):
assert (ct(timedelta(seconds=1)) ==
np.timedelta64(1, 's').astype('m8[ns]'))
assert (ct(timedelta(microseconds=1)) ==
np.timedelta64(1, 'us').astype('m8[ns]'))
assert (ct(timedelta(days=1)) ==
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
assert not (v in td)
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
assert (v in td)
def test_identity(self):
td = Timedelta(10, unit='d')
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
assert ct('10') == np.timedelta64(10, 'ns')
assert ct('10ns') == np.timedelta64(10, 'ns')
assert ct('100') == np.timedelta64(100, 'ns')
assert ct('100ns') == np.timedelta64(100, 'ns')
assert ct('1000') == np.timedelta64(1000, 'ns')
assert ct('1000ns') == np.timedelta64(1000, 'ns')
assert ct('1000NS') == np.timedelta64(1000, 'ns')
assert ct('10us') == np.timedelta64(10000, 'ns')
assert ct('100us') == np.timedelta64(100000, 'ns')
assert ct('1000us') == np.timedelta64(1000000, 'ns')
assert ct('1000Us') == np.timedelta64(1000000, 'ns')
assert ct('1000uS') == np.timedelta64(1000000, 'ns')
assert ct('1ms') == np.timedelta64(1000000, 'ns')
assert ct('10ms') == np.timedelta64(10000000, 'ns')
assert ct('100ms') == np.timedelta64(100000000, 'ns')
assert ct('1000ms') == np.timedelta64(1000000000, 'ns')
assert ct('-1s') == -np.timedelta64(1000000000, 'ns')
assert ct('1s') == np.timedelta64(1000000000, 'ns')
assert ct('10s') == np.timedelta64(10000000000, 'ns')
assert ct('100s') == np.timedelta64(100000000000, 'ns')
assert ct('1000s') == np.timedelta64(1000000000000, 'ns')
assert ct('1d') == conv(np.timedelta64(1, 'D'))
assert ct('-1d') == -conv(np.timedelta64(1, 'D'))
assert ct('1D') == conv(np.timedelta64(1, 'D'))
assert ct('10D') == conv(np.timedelta64(10, 'D'))
assert ct('100D') == conv(np.timedelta64(100, 'D'))
assert ct('1000D') == conv(np.timedelta64(1000, 'D'))
assert ct('10000D') == conv(np.timedelta64(10000, 'D'))
# space
assert ct(' 10000D ') == conv(np.timedelta64(10000, 'D'))
assert ct(' - 10000D ') == -conv(np.timedelta64(10000, 'D'))
# invalid
pytest.raises(ValueError, ct, '1foo')
pytest.raises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert ct('1days') == conv(d1)
assert ct('1days,') == conv(d1)
assert ct('- 1days,') == -conv(d1)
assert ct('00:00:01') == conv(np.timedelta64(1, 's'))
assert ct('06:00:01') == conv(np.timedelta64(6 * 3600 + 1, 's'))
assert ct('06:00:01.0') == conv(np.timedelta64(6 * 3600 + 1, 's'))
assert ct('06:00:01.01') == conv(np.timedelta64(
1000 * (6 * 3600 + 1) + 10, 'ms'))
assert (ct('- 1days, 00:00:01') ==
conv(-d1 + np.timedelta64(1, 's')))
assert (ct('1days, 06:00:01') ==
conv(d1 + np.timedelta64(6 * 3600 + 1, 's')))
assert (ct('1days, 06:00:01.01') ==
conv(d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
pytest.raises(ValueError, ct, '- 1days, 00')
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so
# might be some loss of precision
assert np.allclose(result.value / 1000, expected.value / 1000)
# sum
pytest.raises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
pytest.raises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = timedelta_range('1 second', periods=20)
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == np.iinfo(np.int64).min + 1
assert max_td.value == np.iinfo(np.int64).max
# Beyond lower limit, a NAT before the Overflow
assert isinstance(min_td - Timedelta(1, 'ns'), NaTType)
with pytest.raises(OverflowError):
min_td - Timedelta(2, 'ns')
with pytest.raises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
assert isinstance(td, NaTType)
with pytest.raises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with pytest.raises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
def test_timedelta_arithmetic(self):
data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]')
deltas = [timedelta(days=1), Timedelta(1, unit='D')]
for delta in deltas:
result_method = data.add(delta)
result_operator = data + delta
expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
result_method = data.sub(delta)
result_operator = data - delta
expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
# GH 9396
result_method = data.div(delta)
result_operator = data / delta
expected = pd.Series([np.nan, 32.], dtype='float64')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
# TODO: unused?
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
assert not result.iloc[0].isnull().all()
assert result.iloc[1].isnull().all()
def test_isoformat(self):
td = Timedelta(days=6, minutes=50, seconds=3,
milliseconds=10, microseconds=10, nanoseconds=12)
expected = 'P6DT0H50M3.010010012S'
result = td.isoformat()
assert result == expected
td = Timedelta(days=4, hours=12, minutes=30, seconds=5)
result = td.isoformat()
expected = 'P4DT12H30M5S'
assert result == expected
td = Timedelta(nanoseconds=123)
result = td.isoformat()
expected = 'P0DT0H0M0.000000123S'
assert result == expected
# trim nano
td = Timedelta(microseconds=10)
result = td.isoformat()
expected = 'P0DT0H0M0.00001S'
assert result == expected
# trim micro
td = Timedelta(milliseconds=1)
result = td.isoformat()
expected = 'P0DT0H0M0.001S'
assert result == expected
# don't strip every 0
result = Timedelta(minutes=1).isoformat()
expected = 'P0DT0H1M0S'
assert result == expected
def test_ops_error_str(self):
# GH 13624
td = Timedelta('1 day')
for l, r in [(td, 'a'), ('a', td)]:
with pytest.raises(TypeError):
l + r
with pytest.raises(TypeError):
l > r
assert not l == r
assert l != r
| bsd-3-clause |
Haddy1/ClusterMDS | lib/libSMACOF.py | 1 | 5343 | #!/usr/bin/python
from __future__ import print_function
from sklearn.decomposition import PCA
import numpy as np
import scipy
from scipy.spatial.distance import squareform
from sklearn.metrics import euclidean_distances
import matplotlib.pyplot as plt
class SMACOF():
"""
Multidimensional Scaling
Multidimensional Scaling using the "Scaling by MAjorizing a COmplicated Function" (SMACOF) alghorithm
Parameters
---------
data: array-like
array containing high dimensional data points
n_components: int, optional, default: 2
number of dimensions to which the data should be transformed
maxiter: int, optional, default: 10000
Maximum number of iterations of the SMACOF Alghorithm
References
---------
Borg, I.; Groenen, P. (1997), Modern Multidimensional Scaling: theory and applications, New York: Springer-Verlag.
"""
#setup constants
def __init__(self, data, n_components=2, maxiter = 10000):
self.n_components = n_components
self.maxiter = 10000
self.delta = self.calcDist(data)
self.size_inv = 1.0/data.shape[0]
def getInitValues(self, data):
"""
Provides initial values
Parameters
----------
data: array-like
high dimensional data
Returns
-------
init_best: array-like
Guess for initial low dimensional data
dist_best: array-like
Initial Distance Matrix for init_best
s_best: double
Initial Result of Cost Function sigma
"""
n_init = 4
#first guess: pca_transform
pca = PCA(self.n_components)
init_best = pca.fit(data).transform(data)
dist_best = self.calcDist(init_best)
s_best = self.sigma(dist_best)
#try random initial values
for k in range(0,4):
init = np.random.uniform(0.000001, 10, (data.shape[0], self.n_components))
dist = self.calcDist(init)
s = self.sigma(dist)
if s < s_best:
init_best = init
dist_best = dist
s_best = s
return init_best, dist_best, s_best
def calcDist(self, X):
"""
Calculates Distance Matrix
Parameters
---------
X: array-like
Input Array
Returns
--------
dist: array-like
squared symmetric array containing the euclidian distances between each row of X
"""
XX = np.dot(np.sum(X**2, axis=1)[:, np.newaxis], np.ones((1,X.shape[0])))
#XX = np.sum(X**2, axis=1)
YY = XX.T
dist = np.dot(X, X.T)
dist *= -2
dist += XX
dist += YY
np.maximum(dist, 0, out=dist)
return np.sqrt(dist)
def sigma(self, distX):
"""
Cost Function to be minimized
Parameters
--------
distX: array-like
distance matrix of low dimensional X for current iteration step
Returns
------
s: float
squared difference of high- and lowdimensional distances
"""
s = np.sum( np.square(np.subtract(distX, self.delta)))
return s
def bCalc(self, B, distZ):
"""
Calculates B
"""
#Ignore divide by zero erros, we'll fix them later
with np.errstate(divide='ignore', invalid='ignore'):
ratio = np.divide(self.delta, distZ)
#search for invalid values and set them to zero
ratio[ ~ np.isfinite(ratio)] = 0
B = -ratio
B[np.diag_indices(B.shape[0])] += ratio.sum(axis=1)
return B
def guttmanTrans(self ,X, Z, distZ, B):
"""
Guttman Transformation: Update Function for X
"""
X = self.size_inv * np.dot(self.bCalc(B, distZ), Z)
return X
def solve(self, data, initX = None, eps = 1e-6):
"""
Performs Multidimensional Scaling
Parameters
---------
data: array-like
high dimensional data to be transformed in low dimensional form
initX: array-like, optional
Initial Guess for low dimensional data
default is PCA transformed data or random data, depending which has better stress
eps: float
convergence tolerance, w.r.t sigma
Returns
-------
X: array-like
low dimensional data
"""
#check if initial guess was provided
if np.array_equal(initX, None):
X, distX, s = self.getInitValues(data)
else:
X = initX
distX = self.calcDist(X)
s = self.sigma(distX)
dist_norm = np.sqrt((X**2).sum(axis=1)).sum()
s = s / dist_norm
Z = X
distZ = distX
B = np.zeros(distZ.shape)
for k in range(1,self.maxiter):
s_old = s
X = self.guttmanTrans(X, Z, distZ, B)
#distX = self.calcDist(distX, X)
distX = self.calcDist(X)
dist_norm = np.sqrt((X**2).sum(axis=1)).sum()
s = self.sigma(distX) / dist_norm
if (s_old - s ) < eps:
break
Z = X
distZ = distX
return (X)
| gpl-3.0 |
hrjn/scikit-learn | examples/mixture/plot_gmm_sin.py | 103 | 6101 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example demonstrates the behavior of Gaussian mixture models fit on data
that was not sampled from a mixture of Gaussian random variables. The dataset
is formed by 100 points loosely spaced following a noisy sine curve. There is
therefore no ground truth value for the number of Gaussian components.
The first model is a classical Gaussian Mixture Model with 10 components fit
with the Expectation-Maximization algorithm.
The second model is a Bayesian Gaussian Mixture Model with a Dirichlet process
prior fit with variational inference. The low value of the concentration prior
makes the model favor a lower number of active components. This models
"decides" to focus its modeling power on the big picture of the structure of
the dataset: groups of points with alternating directions modeled by
non-diagonal covariance matrices. Those alternating directions roughly capture
the alternating nature of the original sine signal.
The third model is also a Bayesian Gaussian mixture model with a Dirichlet
process prior but this time the value of the concentration prior is higher
giving the model more liberty to model the fine-grained structure of the data.
The result is a mixture with a larger number of active components that is
similar to the first model where we arbitrarily decided to fix the number of
components to 10.
Which model is the best is a matter of subjective judgement: do we want to
favor models that only capture the big picture to summarize and explain most of
the structure of the data while ignoring the details or do we prefer models
that closely follow the high density regions of the signal?
The last two panels show how we can sample from the last two models. The
resulting samples distributions do not look exactly like the original data
distribution. The difference primarily stems from the approximation error we
made by using a model that assumes that the data was generated by a finite
number of Gaussian components instead of a continuous noisy sine curve.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y, means, covariances, index, title):
splot = plt.subplot(5, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
def plot_samples(X, Y, n_components, index, title):
plt.subplot(5, 1, 4 + index)
for i, color in zip(range(n_components), color_iter):
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y == i):
continue
plt.scatter(X[Y == i, 0], X[Y == i, 1], .8, color=color)
plt.xlim(-6., 4. * np.pi - 6.)
plt.ylim(-5., 5.)
plt.title(title)
plt.xticks(())
plt.yticks(())
# Parameters
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4. * np.pi / n_samples
for i in range(X.shape[0]):
x = i * step - 6.
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3. * (np.sin(x) + np.random.normal(0, .2))
plt.figure(figsize=(10, 10))
plt.subplots_adjust(bottom=.04, top=0.95, hspace=.2, wspace=.05,
left=.03, right=.97)
# Fit a Gaussian mixture with EM using ten components
gmm = mixture.GaussianMixture(n_components=10, covariance_type='full',
max_iter=100).fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Expectation-maximization')
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e-2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="random", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=0.01$.")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 0,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=0.01$ sampled with $2000$ samples.")
dpgmm = mixture.BayesianGaussianMixture(
n_components=10, covariance_type='full', weight_concentration_prior=1e+2,
weight_concentration_prior_type='dirichlet_process',
mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2),
init_params="kmeans", max_iter=100, random_state=2).fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 2,
"Bayesian Gaussian mixture models with a Dirichlet process prior "
r"for $\gamma_0=100$")
X_s, y_s = dpgmm.sample(n_samples=2000)
plot_samples(X_s, y_s, dpgmm.n_components, 1,
"Gaussian mixture with a Dirichlet process prior "
r"for $\gamma_0=100$ sampled with $2000$ samples.")
plt.show()
| bsd-3-clause |
jakobj/nest-simulator | pynest/examples/glif_psc_neuron.py | 14 | 9617 | # -*- coding: utf-8 -*-
#
# glif_psc_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Current-based generalized leaky integrate and fire (GLIF) neuron example
------------------------------------------------------------------------
Simple example of how to use the ``glif_psc`` neuron model for
five different levels of GLIF neurons.
Four stimulation paradigms are illustrated for the GLIF model
with externally applied current and spikes impinging
Voltage traces, current traces, threshold traces, and spikes are shown.
KEYWORDS: glif_psc
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
##############################################################################
# We initialize the nest and set the simulation resolution.
nest.ResetKernel()
resolution = 0.05
nest.SetKernelStatus({"resolution": resolution})
##############################################################################
# We also pre-define the synapse time constant array, [2.0, 1.0] ms for
# the two desired synaptic ports of the GLIF neurons. Note that the default
# synapse time constant is [2.0] ms, which is for neuron with one port.
syn_tau = [2.0, 1.0]
###############################################################################
# We create the five levels of GLIF model to be tested, i.e.,
# ``lif``, ``lif_r``, ``lif_asc``, ``lif_r_asc``, ``lif_r_asc_a``.
# For each level of GLIF model, we create a ``glif_psc`` node. The node is
# created by setting relative model mechanism parameters and the time constant
# of the 2 synaptic ports as mentioned above. Other neuron parameters are set
# as default. The five ``glif_psc`` node handles were combined as a list.
n_lif = nest.Create("glif_psc",
params={"spike_dependent_threshold": False,
"after_spike_currents": False,
"adapting_threshold": False,
"tau_syn": syn_tau})
n_lif_r = nest.Create("glif_psc",
params={"spike_dependent_threshold": True,
"after_spike_currents": False,
"adapting_threshold": False,
"tau_syn": syn_tau})
n_lif_asc = nest.Create("glif_psc",
params={"spike_dependent_threshold": False,
"after_spike_currents": True,
"adapting_threshold": False,
"tau_syn": syn_tau})
n_lif_r_asc = nest.Create("glif_psc",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": False,
"tau_syn": syn_tau})
n_lif_r_asc_a = nest.Create("glif_psc",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": True,
"tau_syn": syn_tau})
neurons = n_lif + n_lif_r + n_lif_asc + n_lif_r_asc + n_lif_r_asc_a
###############################################################################
# For the stimulation input to the glif_psc neurons, we create one excitation
# spike generator and one inhibition spike generator, each of which generates
# three spikes; we also create one step current generator and a Poisson
# generator, a parrot neuron (to be paired with the Poisson generator).
# The three different injections are spread to three different time periods,
# i.e., 0 ms ~ 200 ms, 200 ms ~ 500 ms, 600 ms ~ 900 ms.
# Each of the excitation and inhibition spike generators generates three spikes
# at different time points. Configuration of the current generator includes the
# definition of the start and stop times and the amplitude of the injected
# current. Configuration of the Poisson generator includes the definition of
# the start and stop times and the rate of the injected spike train.
espikes = nest.Create("spike_generator",
params={"spike_times": [10., 100., 150.],
"spike_weights": [20.]*3})
ispikes = nest.Create("spike_generator",
params={"spike_times": [15., 99., 150.],
"spike_weights": [-20.]*3})
cg = nest.Create("step_current_generator",
params={"amplitude_values": [400., ],
"amplitude_times": [200., ],
"start": 200., "stop": 500.})
pg = nest.Create("poisson_generator",
params={"rate": 150000., "start": 600., "stop": 900.})
pn = nest.Create("parrot_neuron")
###############################################################################
# The generators are then connected to the neurons. Specification of
# the ``receptor_type`` uniquely defines the target receptor.
# We connect current generator, the spike generators, Poisson generator (via
# parrot neuron) to receptor 0, 1, and 2 of the GLIF neurons, respectively.
# Note that Poisson generator is connected to parrot neuron to transit the
# spikes to the glif_psc neuron.
nest.Connect(cg, neurons, syn_spec={"delay": resolution})
nest.Connect(espikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(ispikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(pg, pn, syn_spec={"delay": resolution})
nest.Connect(pn, neurons, syn_spec={"delay": resolution, "receptor_type": 2})
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create("multimeter",
params={"interval": resolution,
"record_from": ["V_m", "I", "I_syn", "threshold",
"threshold_spike",
"threshold_voltage",
"ASCurrents_sum"]})
nest.Connect(mm, neurons)
###############################################################################
# A ``spike_recorder`` is created and connected to the neurons record the
# spikes generated by the glif_psc neurons.
sr = nest.Create("spike_recorder")
nest.Connect(neurons, sr)
###############################################################################
# Run the simulation for 1000 ms and retrieve recorded data from
# the multimeter and spike recorder.
nest.Simulate(1000.)
data = mm.events
senders = data["senders"]
spike_data = sr.events
spike_senders = spike_data["senders"]
spikes = spike_data["times"]
###############################################################################
# We plot the time traces of the membrane potential (in blue) and
# the overall threshold (in green), and the spikes (as red dots) in one panel;
# the spike component of threshold (in yellow) and the voltage component of
# threshold (in black) in another panel; the injected currents (in strong blue),
# the sum of after spike currents (in cyan), and the synaptic currents (in
# magenta) in responding to the spike inputs to the neurons in the third panel.
# We plot all these three panels for each level of GLIF model in a separated
# figure.
glif_models = ["lif", "lif_r", "lif_asc", "lif_r_asc", "lif_r_asc_a"]
for i in range(len(glif_models)):
glif_model = glif_models[i]
node_id = neurons[i].global_id
plt.figure(glif_model)
gs = gridspec.GridSpec(3, 1, height_ratios=[2, 1, 1])
t = data["times"][senders == 1]
ax1 = plt.subplot(gs[0])
plt.plot(t, data["V_m"][senders == node_id], "b")
plt.plot(t, data["threshold"][senders == node_id], "g--")
plt.plot(spikes[spike_senders == node_id],
[max(data["threshold"][senders == node_id]) * 0.95] *
len(spikes[spike_senders == node_id]), "r.")
plt.legend(["V_m", "threshold", "spike"])
plt.ylabel("V (mV)")
plt.title("Simulation of glif_psc neuron of " + glif_model)
ax2 = plt.subplot(gs[1])
plt.plot(t, data["threshold_spike"][senders == node_id], "y")
plt.plot(t, data["threshold_voltage"][senders == node_id], "k--")
plt.legend(["threshold_spike", "threshold_voltage"])
plt.ylabel("V (mV)")
ax3 = plt.subplot(gs[2])
plt.plot(t, data["I"][senders == node_id], "--")
plt.plot(t, data["ASCurrents_sum"][senders == node_id], "c-.")
plt.plot(t, data["I_syn"][senders == node_id], "m")
plt.legend(["I_e", "ASCurrents_sum", "I_syn"])
plt.ylabel("I (pA)")
plt.xlabel("t (ms)")
plt.show()
| gpl-2.0 |
multipath-tcp/mptcp-analysis-scripts | common.py | 1 | 33254 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Matthieu Baerts & Quentin De Coninck
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from __future__ import print_function
##################################################
# IMPORTS #
##################################################
import os
import matplotlib
# Do not use any X11 backend
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import numpy as np
import pickle
from scipy.stats import gaussian_kde
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
from multiprocessing import Process
##################################################
# COMMON CLASSES #
##################################################
class cd:
""" Context manager to change the current working directory """
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
##################################################
# COMMON EXCEPTIONS #
##################################################
class TSharkError(Exception):
pass
##################################################
# COMMON CONSTANTS #
##################################################
# Lines in xpl files that starts with one of the words in XPL_ONE_POINT have one point
XPL_ONE_POINT = ['darrow', 'uarrow', 'diamond', 'dot', 'atext', 'dtick', 'utick', 'atext', 'box', 'htick']
# Lines in xpl files that starts with one of the words in XPL_TWO_POINTS have two points
XPL_TWO_POINTS = ['line']
# The default stat directory
DEF_STAT_DIR = 'stats'
# The default aggl directory
DEF_AGGL_DIR = 'aggls'
# The default sums directory
DEF_SUMS_DIR = 'sums'
# The default rtt directory
DEF_RTT_DIR = 'rtt'
# Another rtt directory, for mptcp subflows
DEF_RTT_SUBFLOW_DIR = 'rtt_subflow'
# Directory with information about failed establishment of subflows (TCPConnections)
DEF_FAILED_CONNS_DIR = 'failed_conns'
# Directory of acksize info
DEF_ACKSIZE_DIR = 'acksize'
# The default interface to analyse
DEF_IFACE = 'any'
# The time sequence and throughput graphs directory
TSG_THGPT_DIR = 'tsg_thgpt'
# The congestion window graphs directory
CWIN_DIR = 'cwin'
# The agglomerated graphs directory
AGGL_DIR = 'aggl'
# The directory of csv files
CSV_DIR = 'csv'
# Following constants are used to make the code cleaner and more robust (for dictionary)
# Those are mainly determined by the output of mptcptrace
CELL = 'cellular'
WIFI = 'wifi'
# IPv4 or IPv6
TYPE = 'type'
# Interface: CELL or WIFI
IF = 'interface'
# Indicate if the connection has full info or only a subset
TCP_COMPLETE = 'tcp_complete'
# Source IP address
SADDR = 'saddr'
# Destination IP address
DADDR = 'daddr'
# Source port
SPORT = 'sport'
# Destination port
DPORT = 'dport'
# Window scale for source
WSCALESRC = 'wscalesrc'
# Window scale for destination
WSCALEDST = 'wscaledst'
# Start of a connection (first packet)
START = 'start_time'
# Duration of a connection
DURATION = 'duration'
# Number of packets
PACKS = 'packets'
# Number of bytes
BYTES = 'bytes'
# Number of data bytes (according to tcptrace)
BYTES_DATA = 'bytes_data'
# Number of bytes missed by tcptrace (if non-zero, this connection should be take with care)
MISSED_DATA = 'missed_data'
# Number of packets retransmitted
PACKS_RETRANS = 'packets_retrans'
# Number of bytes retransmitted
BYTES_RETRANS = 'bytes_retrans'
# Timestamp of retransmissions
TIMESTAMP_RETRANS = 'timestamp_retrans'
# tcpcsm information about retransmissions
TCPCSM_RETRANS = 'tcpcsm_retrans'
# Number of packets out of orders
PACKS_OOO = 'packets_outoforder'
# Congestion window graph data dictionary
CWIN_DATA = 'congestion_window_data'
# Timestamp of reinjected packets
REINJ_ORIG_TIMESTAMP = 'reinjected_orig_timestamp'
# Reinjected packets
REINJ_ORIG_PACKS = 'reinjected_orig_packets'
# Reinjected bytes
REINJ_ORIG_BYTES = 'reinjected_orig_bytes'
# Reinjected origin
REINJ_ORIG = 'reinjected_orig'
# Is reinjection (timestamp in char + bytes reinjected)
IS_REINJ = 'is_reinjection'
# Number of bytes returned by mptcptrace (unique bytes)
BYTES_MPTCPTRACE = 'bytes_mptcptrace'
# Total number of bytes of frames
BYTES_FRAMES_TOTAL = 'bytes_frames_total'
# Total number of frames
FRAMES_TOTAL = 'frames_total'
# Total number of retransmitted bytes of frames
BYTES_FRAMES_RETRANS = 'bytes_frames_retrans'
# Total number of retransmitted frames
FRAMES_RETRANS = 'frames_retrans'
# Throughput returned by tpctrace
THGPT_TCPTRACE = 'throughput_tcptrace'
# Throughput returned by mptcptrace
THGPT_MPTCPTRACE = 'throughput_mptcptrace'
# MPTCP bursts
BURSTS = 'bursts'
# Flights information
FLIGHT = 'flight'
# RTT info
RTT_SAMPLES = 'rtt_samples'
RTT_MIN = 'rtt_min'
RTT_MAX = 'rtt_max'
RTT_AVG = 'rtt_avg'
RTT_STDEV = 'rtt_stdev'
RTT_3WHS = 'rtt_from_3whs'
RTT_99P = 'rtt_99p'
RTT_98P = 'rtt_98p'
RTT_97P = 'rtt_97p'
RTT_95P = 'rtt_95p'
RTT_90P = 'rtt_90p'
RTT_75P = 'rtt_75p'
RTT_MED = 'rtt_median'
RTT_25P = 'rtt_25p'
# For aggregation
C2S = 'client2server'
S2C = 'server2client'
# Kept for compatibility reasons
S2D = C2S
D2S = S2C
# Number of SYN, FIN, RST and ACK seen on a subflow
NB_SYN = 'nb_syn'
NB_FIN = 'nb_fin'
NB_RST = 'nb_rst'
NB_ACK = 'nb_ack'
# Relative time to the beginning of the connection
TIME_FIRST_PAYLD = 'time_first_payload'
TIME_LAST_PAYLD = 'time_last_payload'
TIME_FIRST_ACK = 'time_first_ack'
# Timestamp (absolute values)
TIME_FIN_ACK_TCP = 'time_fin_ack_tcp'
TIME_LAST_ACK_TCP = 'time_last_ack_tcp'
TIME_LAST_PAYLD_TCP = 'time_last_payload_tcp'
TIME_LAST_PAYLD_WITH_RETRANS_TCP = 'time_last_payload_with_retrans_tcp'
# Time to live
TTL_MIN = 'time_to_live_min'
TTL_MAX = 'time_to_live_max'
# Segment size
SS_MIN = 'segment_size_min'
SS_MAX = 'segment_size_max'
# Congestion window
CWIN_MIN = 'minimum_in_flight_size'
CWIN_MAX = 'maximum_in_flight_size'
# Subflow inefficiencies
NB_RTX_RTO = 'nb_rtx_rto'
NB_RTX_FR = 'nb_rtx_fr'
NB_REORDERING = 'nb_reordering'
NB_NET_DUP = 'nb_network_duplicate'
NB_UNKNOWN = 'nb_unknown'
NB_FLOW_CONTROL = 'nb_flow_control'
NB_UNNECE_RTX_RTO = 'nb_unnecessary_rtx_rto'
NB_UNNECE_RTX_FR = 'nb_unnecessary_rtx_fr'
# Multipath TCP inefficiencies
REINJ_BYTES = 'reinj_bytes'
REINJ_PC = 'reinj_pc'
# To process both directions
DIRECTIONS = [C2S, S2C]
IPv4 = 'IPv4'
IPv6 = 'IPv6'
# IPv4 localhost address
LOCALHOST_IPv4 = '127.0.0.1'
# Port number of RedSocks
PORT_RSOCKS = '8123'
# Prefix of the Wi-Fi interface IP address
PREFIX_WIFI_IF = '192.168.'
# Size of Latin alphabet
SIZE_LAT_ALPH = 26
# IP address of the proxy (has to be overriden)
IP_PROXY = False
# Size of the header of frame of a MPTCP packet with data (16 + 20 + 52)
FRAME_MPTCP_OVERHEAD = 88
# Those values have to be overriden
PREFIX_IP_WIFI = False
PREFIX_IP_PROXY = False
IP_WIFI = False
IP_CELL = False
TIMESTAMP = 'timestamp'
CONN_ID = 'conn_id'
FLOW_ID = 'flow_id'
# Info from the SOCKS command
SOCKS_PORT = 'socks_port'
SOCKS_DADDR = 'socks_daddr'
# ADD_ADDRs and REMOVE_ADDRs
ADD_ADDRS = 'add_addrs'
RM_ADDRS = 'rm_addrs'
# Backup bit of a subflow
BACKUP = 'backup'
# Retransmission of DSS
RETRANS_DSS = 'retrans_dss'
if os.path.isfile('config.py'):
import config as conf
import collections
if isinstance(conf.IP_PROXY, collections.Iterable) and not isinstance(conf.IP_PROXY, str):
IP_PROXY = list(conf.IP_PROXY)
else:
IP_PROXY = [conf.IP_PROXY]
if isinstance(conf.PREFIX_IP_PROXY, collections.Iterable) and not isinstance(conf.PREFIX_IP_PROXY, str):
PREFIX_IP_PROXY = list(conf.PREFIX_IP_PROXY)
else:
PREFIX_IP_PROXY = [conf.PREFIX_IP_PROXY]
PREFIX_IP_WIFI = conf.PREFIX_IP_WIFI
##################################################
# CONNECTION RELATED #
##################################################
class BasicFlow(object):
""" Represent a flow between two hosts at transport layer """
attr = {C2S: {}, S2C: {}}
def __init__(self):
self.attr = {C2S: {}, S2C: {}}
def indicates_wifi_or_cell(self):
""" Given data of a mptcp connection subflow, indicates if comes from wifi or cell """
if self.attr[SADDR].startswith(PREFIX_WIFI_IF) or self.attr[DADDR].startswith(PREFIX_WIFI_IF) or self.attr[SADDR].startswith(PREFIX_IP_WIFI) \
or self.attr[DADDR].startswith(PREFIX_IP_WIFI) or (IP_WIFI and (self.attr[SADDR] in IP_WIFI)):
self.attr[IF] = WIFI
elif not IP_CELL or (self.attr[SADDR] in IP_CELL):
self.attr[IF] = CELL
else:
self.attr[IF] = "?"
def detect_ipv4(self):
""" Given the dictionary of a TCP connection, add the type IPv4 if it is an IPv4 connection """
saddr = self.attr[SADDR]
daddr = self.attr[DADDR]
num_saddr = saddr.split('.')
num_daddr = daddr.split('.')
if len(num_saddr) == 4 and len(num_daddr) == 4:
self.attr[TYPE] = IPv4
elif ":" in saddr and ":" in daddr:
self.attr[TYPE] = IPv6
class BasicConnection(object):
""" Represent a connection between two hosts at high level """
conn_id = ""
attr = {C2S: {}, S2C: {}}
def __init__(self, cid):
self.conn_id = cid
self.attr = {C2S: {}, S2C: {}}
##################################################
# (DE)SERIALIZATION OF OBJECTS #
##################################################
def save_object(obj, fname):
""" Save the object obj in the file with filename fname """
file = open(fname, 'wb')
file.write(pickle.dumps(obj))
file.close()
def load_object(fname):
""" Return the object contained in the file with filename fname """
file = open(fname, 'rb')
obj = pickle.loads(file.read())
file.close()
return obj
##################################################
# COMMON FUNCTIONS #
##################################################
def check_directory_exists(directory):
""" Check if the directory exists, and create it if needed
If directory is a file, exit the program
"""
if os.path.exists(directory):
if not os.path.isdir(directory):
print(directory + " is a file: stop", file=sys.stderr)
sys.exit(1)
else:
os.makedirs(directory)
def get_dir_from_arg(directory, end=''):
""" Get the abspath of the dir given by the user and append 'end' """
if end.endswith('.'):
end = end[:-1]
if directory.endswith('/'):
directory = directory[:-1]
return os.path.abspath(os.path.expanduser(directory)) + end
def is_number(s):
""" Check if the str s is a number """
try:
float(s)
return True
except ValueError:
return False
def move_file(from_path, to_path, print_out=sys.stderr):
""" Move a file, overwrite if needed """
try:
shutil.move(from_path, to_path)
except Exception:
# Destination already exists; remove it
os.remove(os.path.join(to_path, os.path.basename(from_path)))
shutil.move(from_path, to_path)
def tshark_stats(filtering, src_path, print_out=sys.stderr):
""" Filter src_path using the condition and write the result to print_out (open stream)
Raise a TSharkError in case of failure
"""
table = 'conv,tcp'
if filtering:
table += ',' + filtering
cmd = ['tshark', '-n', '-r', src_path, '-z', table, '-q']
if subprocess.call(cmd, stdout=print_out) != 0:
raise TSharkError("Error with filtering " + filtering + " for source " + src_path)
def long_ipv6_address(ip):
""" Return ip in long format, ex. 2001:db8::1 will be 2001:0db8:0000:0000:0000:0000:0000:0001 """
if ":" not in ip or "." in ip:
# IPv4 address, don't do anything (clean possible ':')
return ip.replace(":", "")
# Before ::, after ::
split_ip = []
decomposed_ip = [[], []]
# Compressed 0 in IPv6
split_ip = ip.split("::")
# Treat splitted parts of ip
for i in range(0, len(split_ip)):
decomposed_ip[i] = split_ip[i].split(":")
for j in range(0, len(decomposed_ip[i])):
while not len(decomposed_ip[i][j]) >= 4:
decomposed_ip[i][j] = "0" + decomposed_ip[i][j]
# Putting everything together
long_ip = ""
for d_ip in decomposed_ip[0]:
long_ip += d_ip + ":"
for i in range(0, 8 - len(decomposed_ip[0]) - len(decomposed_ip[1])):
long_ip += "0000:"
for d_ip in decomposed_ip[1]:
long_ip += d_ip + ":"
# Remove the last :
return long_ip[:-1]
##################################################
# PCAP #
##################################################
def save_data(filepath, dir_exp, data):
""" Using the name pcap_fname, save data in a file with filename fname in dir dir_exp """
path_name = os.path.join(
dir_exp, os.path.splitext(os.path.basename(filepath))[0])
try:
data_file = open(path_name, 'w')
pickle.dump(data, data_file)
data_file.close()
except IOError as e:
print(str(e) + ': no data file for ' + filepath, file=sys.stderr)
def clean_loopback_pcap(pcap_filepath, print_out=sys.stdout):
""" Remove noisy traffic (port 1984), see netstat """
tmp_pcap = tempfile.mkstemp(suffix='.pcap')[1]
cmd = ['tshark', '-Y', '!(tcp.dstport==1984||tcp.srcport==1984)&&!((ip.src==127.0.0.1)&&(ip.dst==127.0.0.1))', '-r',
pcap_filepath, '-w', tmp_pcap, '-F', 'pcap']
if subprocess.call(cmd, stdout=print_out) != 0:
print("Error in cleaning " + pcap_filepath, file=sys.stderr)
return
cmd = ['mv', tmp_pcap, pcap_filepath]
if subprocess.call(cmd, stdout=print_out) != 0:
print("Error in moving " + tmp_pcap + " to " + pcap_filepath, file=sys.stderr)
def get_date_as_int(pcap_fname):
""" Return the date of the pcap trace in int (like 20141230)
If there is no date, return None
"""
dash_index = pcap_fname.index("-")
start_index = pcap_fname[:dash_index].rindex("_")
try:
return int(pcap_fname[start_index + 1:dash_index])
except ValueError as e:
print(str(e) + ": get date as int for " + pcap_fname, file=sys.stderr)
return None
##################################################
# GRAPHS #
##################################################
def log_outliers(aggl_res, remove=False, m=3.0, log_file=sys.stdout):
""" Print on stderr outliers (value + filename), remove them from aggl_res if remove is True """
for condition, data_label in aggl_res.iteritems():
for label, data in data_label.iteritems():
num_data = [elem[0] for elem in data]
np_data = np.array(num_data)
d = np.abs(np_data - np.median(np_data))
mdev = np.median(d)
s = d / mdev if mdev else 0.0
if isinstance(s, float) and s == 0.0:
aggl_res[condition][label] = num_data
continue
new_list = []
for index in range(0, len(data)):
if s[index] >= m:
print("Outlier " + str(data[index][0]) + " of file " + data[index][1] + "; median = " +
str(np.median(np_data)) + ", mstd = " + str(mdev) + " and s = " + str(s[index]), file=log_file)
if remove:
continue
new_list.append(data[index][0])
aggl_res[condition][label] = new_list
def sort_and_aggregate(aggr_list):
""" Given a list of elements as returned by prepare_datasets_file, return a sorted and
aggregated list
List is ordered with elem at index 0, aggregated on elem at index 1 and indicates its source
with elem at index 2
"""
offsets = {}
total = 0
# Sort list by time
sorted_list = sorted(aggr_list, key=lambda elem: elem[0])
return_list = []
for elem in sorted_list:
# Manage the case when the flow name is seen for the first time
if elem[2] in offsets.keys():
total += elem[1] - offsets[elem[2]]
else:
total += elem[1]
offsets[elem[2]] = elem[1]
return_list.append([elem[0], total])
return return_list
# Initialize lock semaphore for matplotlib
# This is needed to avoid race conditions inside matplotlib
plt_lock = threading.Lock()
TIMEOUT = 60
def critical_plot_line_graph(data, label_names, formatting, xlabel, ylabel, title, graph_filepath, ymin=None, titlesize=20, y_log=False):
""" Critical part to plot a line graph """
count = 0
fig = plt.figure()
plt.clf()
fig, ax = plt.subplots()
# Create plots
try:
for dataset in data:
x_val = [x[0] for x in dataset]
y_val = [x[1] for x in dataset]
ax.plot(x_val, y_val, formatting[count], linewidth=2, label=label_names[count])
count += 1
ax.legend(loc='best', shadow=True, fontsize='x-large')
except ValueError as e:
print(str(e) + ": create plots: skip " + graph_filepath, file=sys.stderr)
return
# try:
# # Put a nicer background color on the legend.
# legend.get_frame().set_facecolor('#00FFCC')
# except AttributeError as e:
# # if we have no frame, it means we have no object...
# print(str(e) + ": change legend: skip " + graph_filepath, file=sys.stderr)
# print('label_names: ' + str(label_names), file=sys.stderr)
# print('formatting: ' + str(formatting), file=sys.stderr)
# print('data: ' + str(data), file=sys.stderr)
# return
fig.suptitle(title, fontsize=titlesize)
plt.xlabel(xlabel, fontsize=24, labelpad=-1)
plt.ylabel(ylabel, fontsize=24)
if y_log:
ax.set_xscale('log', linthreshx=1)
if ymin is not None:
plt.ylim(ymin=ymin)
try:
plt.savefig(graph_filepath)
except:
print('ERROR when creating graph for ' + graph_filepath, file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return
# Don't forget to clean the plot, otherwise previous ones will be there!
try:
plt.clf()
except KeyError as e:
print(str(e) + ": when cleaning graph " + graph_filepath, file=sys.stderr)
plt.close()
def plot_line_graph(data, label_names, formatting, xlabel, ylabel, title, graph_filepath, ymin=None, titlesize=20, y_log=False):
""" Plot a line graph with data """
# no data, skip
pop_index = []
count = 0
for dataset in data:
if not dataset or len(dataset) <= 1:
# If no data, remove it from dataset and manage label name and formatting
# number = "One" if len(dataset) == 1 else "No"
# print(number + " data in dataset; remove it", file=sys.stderr)
pop_index.append(count)
count += 1
for index in reversed(pop_index):
data.pop(index)
label_names.pop(index)
formatting.pop(index)
if not data:
print("No data for " + title + ": skip", file=sys.stderr)
return
plt_lock.acquire()
try:
p = Process(target=critical_plot_line_graph, args=(
data, label_names, formatting, xlabel, ylabel, title, graph_filepath,), kwargs={'ymin': ymin, 'titlesize': titlesize, 'y_log': y_log},)
p.start()
p.join(TIMEOUT)
if p.is_alive():
print("A process must be terminated", file=sys.stderr)
p.terminate()
except Exception as e:
print("UNCATCHED EXCEPTION IN critical_plot_line_graph for " + graph_filepath, file=sys.stderr)
print(str(e), file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
plt_lock.release()
def plot_bar_chart(aggl_res, label_names, color, ecolor, ylabel, title, graph_fname):
""" Plot a bar chart with aggl_res """
plt_lock.acquire()
matplotlib.rcParams.update({'font.size': 8})
# Convert Python arrays to numpy arrays (easier for mean and std)
for cond, elements in aggl_res.iteritems():
for label, array in elements.iteritems():
elements[label] = np.array(array)
N = len(aggl_res)
nb_subbars = len(label_names)
ind = np.arange(N)
labels = []
values = {}
for label_name in label_names:
values[label_name] = ([], [])
width = (1.00 / nb_subbars) - (0.1 / nb_subbars) # the width of the bars
fig, ax = plt.subplots()
# So far, simply count the number of connections
for cond, elements in aggl_res.iteritems():
labels.append(cond)
for label_name in label_names:
values[label_name][0].append(elements[label_name].mean())
values[label_name][1].append(elements[label_name].std())
bars = []
labels_names = []
zero_bars = []
count = 0
for label_name in label_names:
(mean, std) = values[label_name]
bar = ax.bar(ind + (count * width), mean, width, color=color[count], yerr=std, ecolor=ecolor[count])
bars.append(bar)
zero_bars.append(bar[0])
labels_names.append(label_name)
count += 1
# add some text for labels, title and axes ticks
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xticks(ind + width)
ax.set_xticklabels(labels)
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
ax.legend(zero_bars, label_names, loc='upper center', bbox_to_anchor=(0.5, -0.05), fancybox=True, shadow=True,
ncol=len(zero_bars))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, '%d' % int(height),
ha='center', va='bottom')
for bar in bars:
autolabel(bar)
plt.savefig(graph_fname)
plt.close()
plt_lock.release()
def plot_cdfs(aggl_res, color, xlabel, base_graph_fname, ylim=None, xlim=None):
""" Plot all possible CDFs based on aggl_res.
aggl_res is a dictionary with the structure aggl_res[condition][element] = list of data
base_graph_fname does not have any extension
WARNING: this function assumes that the list of elements will remain the same for all conditions
"""
if len(aggl_res) < 1:
return
cond_init = aggl_res.keys()[0]
for element in aggl_res[cond_init].keys():
plt.figure()
plt.clf()
fig, ax = plt.subplots()
graph_fname = os.path.splitext(base_graph_fname)[0] + "_cdf_" + element + ".pdf"
for cond in aggl_res.keys():
try:
sample = np.array(sorted(aggl_res[cond][element]))
sorted_array = np.sort(sample)
yvals = np.arange(len(sorted_array)) / float(len(sorted_array))
if len(sorted_array) > 0:
# Add a last point
sorted_array = np.append(sorted_array, sorted_array[-1])
yvals = np.append(yvals, 1.0)
plt.plot(sorted_array, yvals, linewidth=2, color=color[aggl_res[cond].keys().index(element)], label=element)
except ZeroDivisionError as e:
print(str(e))
# Shrink current axis's height by 10% on the top
box = ax.get_position()
ax.set_position([box.x0, box.y0,
box.width, box.height * 0.9])
if ylim:
plt.ylim(ylim, 1.0)
if xlim:
plt.xlim(0.0, xlim)
# Put a legend above current axis
ax.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), fancybox=True, shadow=True, ncol=len(aggl_res))
plt.xlabel(xlabel, fontsize=18)
plt.ylabel("CDF", fontsize=18)
plt.savefig(graph_fname)
plt.close('all')
def plot_cdfs_natural(aggl_res, color, xlabel, base_graph_fname, xlim=None, ylim=None, ncol=None, label_order=None, xlog=False, ylog=False, ccdf=False):
""" Plot all possible CDFs based on aggl_res.
aggl_res is a dictionary with the structure aggl_res[condition][element] = list of data
base_graph_fname does not have any extension
WARNING: this function assumes that the list of elements will remain the same for all conditions
"""
if len(aggl_res) < 1:
return
for cond in aggl_res.keys():
plt.figure()
plt.clf()
fig, ax = plt.subplots()
graph_fname = os.path.splitext(base_graph_fname)[0] + "_cdf_" + cond + ".pdf"
cond_list = aggl_res[cond].keys()
if label_order:
cond_list = label_order
for element in cond_list:
try:
sample = np.array(sorted(aggl_res[cond][element]))
# f = open(os.path.splitext(base_graph_fname)[0] + '_' + cond + '_' + element, 'w')
# for i in range(len(sample)):
# f.write(str(sample[i]) + "\n")
# f.close()
sorted_array = np.sort(sample)
yvals = np.arange(len(sorted_array)) / float(len(sorted_array))
if len(sorted_array) > 0:
# Add a last point
sorted_array = np.append(sorted_array, sorted_array[-1])
yvals = np.append(yvals, 1.0)
if ccdf:
yvals = 1.0 - yvals
ax.plot(sorted_array, yvals, color=color[aggl_res[cond].keys().index(element)], label=element)
except ZeroDivisionError as e:
print(str(e))
# Shrink current axis's height by 10% on the top
# box = ax.get_position()
# ax.set_position([box.x0, box.y0,
# box.width, box.height * 0.9])
if xlim:
if xlog:
plt.xlim(0.1, xlim)
else:
plt.xlim(0.0, xlim)
if ylim:
plt.ylim(ylim, 1.0)
if not ncol:
ncol = len(aggl_res[cond])
if xlog:
ax.set_xscale('log')
if ylog:
ax.set_yscale('symlog', linthreshy=0.0000001)
# Put a legend above current axis
# ax.legend(loc='lower center', bbox_to_anchor=(0.5, 1.05), fancybox=True, shadow=True, ncol=ncol)
ax.legend(loc='lower right')
plt.xlabel(xlabel, fontsize=18)
if ccdf:
plt.ylabel("1 - CDF", fontsize=18)
else:
plt.ylabel("CDF", fontsize=18)
plt.savefig(graph_fname)
plt.close('all')
def plot_cdfs_with_direction(aggl_res, color, xlabel, base_graph_fname, natural=False, ylim=None, xlim=None, xlog=False, ylog=False, ccdf=False, label_order=None):
""" Plot all possible CDFs based on aggl_res.
aggl_res is a dictionary with the structure aggl_res[direction][condition][element] = list of data
WARNING: this function assumes that the list of elements will remain the same for all conditions
"""
if len(aggl_res) < 1:
return
for direction in aggl_res.keys():
if natural:
plot_cdfs_natural(aggl_res[direction], color, xlabel, os.path.splitext(base_graph_fname)[0] + '_' + direction, ylim=ylim, xlim=xlim, xlog=xlog, ylog=ylog, ccdf=ccdf, label_order=label_order)
else:
plot_cdfs(aggl_res[direction], color, xlabel, os.path.splitext(base_graph_fname)[0] + '_' + direction, ylim=ylim, xlim=xlim)
def scatter_plot(data, xlabel, ylabel, color, sums_dir_exp, base_graph_name, plot_identity=True, s=None, log_scale_x=True, log_scale_y=True, y_to_one=False, label_order=None):
""" Plot a scatter plot for each condition inside data (points are for apps)
base_graph_name is given without extension
"""
for condition, data_cond in data.iteritems():
plt.figure()
plt.clf()
fig, ax = plt.subplots()
scatters = []
apps = []
labels = data_cond.keys()
if label_order:
labels = label_order
for app_name in labels:
if app_name not in data_cond:
continue
x_val = [x[0] for x in data_cond[app_name]]
y_val = [x[1] for x in data_cond[app_name]]
if s:
scatters.append(ax.scatter(x_val, y_val, s=s[condition][app_name], label=app_name, color=color[app_name], alpha=1.))
else:
scatters.append(ax.scatter(x_val, y_val, label=app_name, color=color[app_name], alpha=1.))
apps.append(app_name)
if plot_identity:
identity = np.arange(0, 9999, 1000000)
ax.plot(identity, identity, 'k--')
plt.xlim(0.0, 10000)
plt.ylim(0.0, 10000)
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(scatters, apps, loc='center left', bbox_to_anchor=(1, 0.5), fontsize='large', scatterpoints=1)
plt.xlabel(xlabel, fontsize=18)
plt.ylabel(ylabel, fontsize=16)
if log_scale_y:
ax.set_yscale('symlog', linthreshy=1)
if log_scale_x:
ax.set_xscale('symlog', linthreshx=1)
plt.grid()
plt.xlim(0.0, plt.xlim()[1])
if y_to_one:
plt.ylim(0.0, 1.02)
else:
plt.ylim(0.0, max(plt.ylim()[1], 1))
# plt.annotate('1', xy=(0.57, 0.96), xycoords="axes fraction",
# xytext=(0.85, 0.85), textcoords='axes fraction',
# arrowprops=dict(facecolor='black', shrink=0.05),
# horizontalalignment='right', verticalalignment='bottom', size='large'
# )
#
# plt.annotate('2', xy=(0.38, 0.04), xycoords="axes fraction",
# xytext=(0.125, 0.2), textcoords='axes fraction',
# arrowprops=dict(facecolor='black', shrink=0.05),
# horizontalalignment='left', verticalalignment='top', size='large'
# )
graph_fname = base_graph_name + "_" + condition + ".pdf"
graph_full_path = os.path.join(sums_dir_exp, graph_fname)
plt.savefig(graph_full_path)
plt.clf()
plt.close('all')
def scatter_plot_with_direction(data, xlabel, ylabel, color, sums_dir_exp, base_graph_name, plot_identity=True, s=None, log_scale_x=True, log_scale_y=True, y_to_one=False, label_order=None):
""" Plot a scatter plot for each direction and condition inside data (points are for apps)
"""
for direction, data_dir in data.iteritems():
if s:
scatter_plot(data_dir, xlabel, ylabel, color, sums_dir_exp, os.path.splitext(base_graph_name)[0] + "_" + direction, plot_identity=plot_identity, s=s[direction], log_scale_x=log_scale_x, log_scale_y=log_scale_y, y_to_one=y_to_one, label_order=label_order)
else:
scatter_plot(data_dir, xlabel, ylabel, color, sums_dir_exp, os.path.splitext(base_graph_name)[0] + "_" + direction, plot_identity=plot_identity, log_scale_x=log_scale_x, log_scale_y=log_scale_y, y_to_one=y_to_one, label_order=label_order)
def density_plot(data, xlabel, color, graph_fname, xlim=None):
plt.figure()
plt.clf()
max_value = 0
# First find the max value
for condition, cond_data in data.iteritems():
if cond_data:
max_value = max(max_value, max(cond_data))
# Then do the plot work
for condition, cond_data in data.iteritems():
if cond_data:
density = gaussian_kde(cond_data)
xs = np.linspace(0, max_value, 1500)
density.covariance_factor = lambda: .25
density._compute_covariance()
plt.plot(xs, density(xs), color=color[condition], label=condition)
plt.legend(loc='upper right')
if xlim:
plt.xlim([0.0, xlim])
plt.xlabel(xlabel, fontsize=18)
plt.ylabel("Density function", fontsize=18)
plt.savefig(graph_fname)
plt.close('all')
| gpl-3.0 |
thunder-project/thunder-factorization | factorization/algorithms/SVD.py | 1 | 3881 | from ..base import Algorithm
class SVD(Algorithm):
"""
Algorithm for singular value decomposition
"""
def __init__(self, k=3, method="auto", max_iter=20, tol=0.00001, seed=None):
self.k = k
self.method = method
self.max_iter = max_iter
self.tol = tol
self.seed = seed
def _fit_local(self, mat):
from sklearn.utils.extmath import randomized_svd
U, S, V = randomized_svd(mat, n_components=self.k, n_iter=self.max_iter, random_state=self.seed)
return U, S, V
def _fit_spark(self, mat):
from numpy import argsort, dot, outer, sqrt, sum, zeros, random
from scipy.linalg import inv, orth
from numpy.linalg import eigh
from thunder.series import Series
mat = Series(mat)
nrows = mat.shape[0]
ncols = mat.shape[1]
if self.method == 'auto':
if ncols < 750:
method = 'direct'
else:
method = 'em'
else:
method = self.method
if method == 'direct':
# get the normalized gramian matrix
cov = mat.gramian().toarray() / nrows
# do a local eigendecomposition
eigw, eigv = eigh(cov)
inds = argsort(eigw)[::-1]
s = sqrt(eigw[inds[0:self.k]]) * sqrt(nrows)
v = eigv[:, inds[0:self.k]].T
# project back into data, normalize by singular values
u = mat.times(v.T / s)
if method == 'em':
# initialize random matrix
random.seed(self.seed)
c = random.rand(self.k, ncols)
niter = 0
error = 100
# define an accumulator
from pyspark.accumulators import AccumulatorParam
class MatrixAccumulatorParam(AccumulatorParam):
def zero(self, value):
return zeros(value.shape)
def addInPlace(self, val1, val2):
val1 += val2
return val1
# define an accumulator function
global run_sum
def outer_sum_other(x, y):
global run_sum
run_sum += outer(x, dot(x, y))
# iterative update subspace using expectation maximization
# e-step: x = (c'c)^-1 c' y
# m-step: c = y x' (xx')^-1
while (niter < self.max_iter) & (error > self.tol):
c_old = c
# pre compute (c'c)^-1 c'
c_inv = dot(c.T, inv(dot(c, c.T)))
# compute (xx')^-1 through a map reduce
xx = mat.times(c_inv).gramian().toarray()
xx_inv = inv(xx)
# pre compute (c'c)^-1 c' (xx')^-1
pre_mult_2 = mat.tordd().context.broadcast(dot(c_inv, xx_inv))
# compute the new c using an accumulator
# direct approach: c = mat.rows().map(lambda x: outer(x, dot(x, pre_mult_2.value))).sum()
run_sum = mat.tordd().context.accumulator(zeros((ncols, self.k)), MatrixAccumulatorParam())
mat.tordd().values().foreach(lambda x: outer_sum_other(x, pre_mult_2.value))
c = run_sum.value
# transpose result
c = c.T
error = sum(sum((c - c_old) ** 2))
niter += 1
# project data into subspace spanned by columns of c
# use standard eigendecomposition to recover an orthonormal basis
c = orth(c.T)
cov = mat.times(c).gramian().toarray() / nrows
eigw, eigv = eigh(cov)
inds = argsort(eigw)[::-1]
s = sqrt(eigw[inds[0:self.k]]) * sqrt(nrows)
v = dot(eigv[:, inds[0:self.k]].T, c.T)
u = mat.times(v.T / s)
return u.values, s, v
| mit |
iulian787/spack | var/spack/repos/builtin/packages/py-biom-format/package.py | 5 | 1318 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBiomFormat(PythonPackage):
"""The BIOM file format (canonically pronounced biome) is designed to be
a general-use format for representing biological sample by observation
contingency tables."""
homepage = "https://pypi.python.org/pypi/biom-format/2.1.6"
url = "https://pypi.io/packages/source/b/biom-format/biom-format-2.1.6.tar.gz"
version('2.1.6', sha256='8eefc275a85cc937f6d6f408d91b7b45eae854cd5d1cbda411a3af51f5b49b0d')
variant('h5py', default=True, description='For use with BIOM 2.0+ files')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-cython', type='build')
depends_on('py-h5py', type=('build', 'run'), when='+h5py')
depends_on('py-click', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pyqi', type=('build', 'run'))
| lgpl-2.1 |
abhishekgahlot/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 10 | 22918 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics.scorer import SCORERS
from sklearn.metrics import make_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
"""Ridge regression convergence test using score
TODO: for this test to be robust, we should use a dataset instead
of np.random.
"""
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
"""Test shape of coef_ and intercept_
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
"""Test intercept with multiple targets GH issue #708
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
"""Test BayesianRegression ridge classifier
TODO: test also n_samples > n_features
"""
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
"""On alpha=0., Ridge and OLS yield the same solution."""
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
"""Tests the ridge object using individual penalties"""
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = SCORERS['mean_squared_error']
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'auto' can handle negative labels.
clf = RidgeClassifier(class_weight='auto')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'auto', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='auto')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weights_cv():
"""
Test class weights for cross validated ridge classifier.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
"""
Test _RidgeCV's store_cv_values attribute.
"""
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridge_sample_weights_in_feature_space():
"""Check that Cholesky solver in feature space applies sample_weights
correctly.
"""
rng = np.random.RandomState(42)
n_samples_list = [5, 6, 7] * 2
n_features_list = [7, 6, 5] * 2
n_targets_list = [1, 1, 1, 2, 2, 2]
noise = 1.
alpha = 2.
alpha = np.atleast_1d(alpha)
for n_samples, n_features, n_targets in zip(n_samples_list,
n_features_list,
n_targets_list):
X = rng.randn(n_samples, n_features)
beta = rng.randn(n_features, n_targets)
Y = X.dot(beta)
Y_noisy = Y + rng.randn(*Y.shape) * np.sqrt((Y ** 2).sum(0)) * noise
K = X.dot(X.T)
sample_weights = 1. + (rng.randn(n_samples) ** 2) * 10
coef_sample_space = _solve_cholesky_kernel(K, Y_noisy, alpha,
sample_weight=sample_weights)
coef_feature_space = _solve_cholesky(X, Y_noisy, alpha,
sample_weight=sample_weights)
assert_array_almost_equal(X.T.dot(coef_sample_space),
coef_feature_space.T)
def test_raises_value_error_if_sample_weights_greater_than_1d():
"""Sample weights must be either scalar or 1D"""
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
"""Sample weights must work with sparse matrices"""
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_deprecation_warning_dense_cholesky():
"""Tests if DeprecationWarning is raised at instantiation of estimators
and when ridge_regression is called"""
warning_class = DeprecationWarning
warning_message = ("The name 'dense_cholesky' is deprecated."
" Using 'cholesky' instead")
X = np.ones([2, 3])
y = np.ones(2)
func1 = lambda: Ridge(solver='dense_cholesky').fit(X, y)
func2 = lambda: RidgeClassifier(solver='dense_cholesky').fit(X, y)
X = np.ones([3, 2])
y = np.zeros(3)
func3 = lambda: ridge_regression(X, y, alpha=1, solver='dense_cholesky')
for func in [func1, func2, func3]:
assert_warns_message(warning_class, warning_message, func)
def test_raises_value_error_if_solver_not_supported():
"""Tests whether a ValueError is raised if a non-identified solver
is passed to ridge_regression"""
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
AIML/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
Djabbz/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 41 | 8901 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
assert_equal(v_measure_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b,
max_n_classes=1e4), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
def test_max_n_classes():
rng = np.random.RandomState(seed=0)
labels_true = rng.rand(53)
labels_pred = rng.rand(53)
labels_zero = np.zeros(53)
labels_true[:2] = 0
labels_zero[:3] = 1
labels_pred[:2] = 0
for score_func in score_funcs:
expected = ("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
assert_raise_message(ValueError, expected, score_func,
labels_true, labels_pred,
max_n_classes=50)
expected = ("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
assert_raise_message(ValueError, expected, score_func,
labels_zero, labels_pred,
max_n_classes=50)
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/frame/test_block_internals.py | 1 | 21652 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import itertools
import numpy as np
import pytest
from pandas.compat import StringIO
import pandas as pd
from pandas import (
Categorical, DataFrame, Series, Timestamp, compat, date_range,
option_context)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.core.internals.blocks import IntBlock
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals():
def test_setitem_invalidates_datetime_index_freq(self):
# GH#24096 altering a datetime64tz column inplace invalidates the
# `freq` attribute on the underlying DatetimeIndex
dti = date_range('20130101', periods=3, tz='US/Eastern')
ts = dti[1]
df = DataFrame({'B': dti})
assert df['B']._values.freq == 'D'
df.iloc[1, 0] = pd.NaT
assert df['B']._values.freq is None
# check that the DatetimeIndex was not altered in place
assert dti.freq == 'D'
assert dti[1] == ts
def test_cast_internals(self, float_frame):
casted = DataFrame(float_frame._data, dtype=int)
expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(float_frame._data, dtype=np.int32)
expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame['E'] = 7.
consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame['F'] = 8.
assert len(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame['E'] = 7.
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).all()
# unconsolidated
float_frame['E'] = 7.
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self, float_frame):
float_frame['E'] = 7.
expected = float_frame.values.copy()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame['foo'] = 'bar'
values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [datetime(2005, 1, 1), True]})
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [None, 1]})
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, 2]})
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3.0]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, True]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, None]})
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, None]})
result = df['A']
expected = Series(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, True, None]})
result = df['A']
expected = Series(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
float_string_frame['datetime'] = datetime.now()
float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
assert float_string_frame['datetime'].dtype == 'M8[ns]'
assert float_string_frame['timedelta'].dtype == 'm8[ns]'
result = float_string_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_values()
assert_series_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A': pd.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(df, expected)
expected = DataFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return DataFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
msg = ("compound dtypes are not implemented in the DataFrame"
" constructor")
with pytest.raises(NotImplementedError, match=msg):
f([("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (df0._data.blocks[0].dtype != df1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self, float_frame):
# API/ENH 9607
df = DataFrame(float_frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_copy(self, float_frame, float_string_frame):
cop = float_frame.copy()
cop['E'] = cop['A']
assert 'E' not in float_frame
# copy objects
copy = float_string_frame.copy()
assert copy._data is not float_string_frame._data
def test_pickle(self, float_string_frame, timezone_frame):
empty_frame = DataFrame()
unpickled = tm.round_trip_pickle(float_string_frame)
assert_frame_equal(float_string_frame, unpickled)
# buglet
float_string_frame._data.ndim
# empty
unpickled = tm.round_trip_pickle(empty_frame)
repr(unpickled)
# tz frame
unpickled = tm.round_trip_pickle(timezone_frame)
assert_frame_equal(timezone_frame, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = pd.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_starting.index.name = 'starting'
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
ser_ending.index.name = 'ending'
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(
df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def test_is_mixed_type(self, float_frame, float_string_frame):
assert not float_frame._is_mixed_type
assert float_string_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'f': Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64': 1,
datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd': np.array([1.] * 10, dtype='float32'),
'e': np.array([1] * 10, dtype='int32'),
'f': np.array([1] * 10, dtype='int16'),
'g': Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.loc[:, ['a', 'b', 'd', 'e', 'f']]
assert_frame_equal(result, expected)
only_obj = df.loc[:, ['c', 'g']]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict(
{'a': [1, 2], 'b': ['foo', 'bar'], 'c': [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a': [1, 2], 'c': [np.pi, np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_get_numeric_data_extension_dtype(self):
# GH 22290
df = DataFrame({
'A': integer_array([-10, np.nan, 0, 10, 20, 30], dtype='Int64'),
'B': Categorical(list('abcabc')),
'C': integer_array([0, 1, 2, 3, np.nan, 5], dtype='UInt8'),
'D': IntervalArray.from_breaks(range(7))})
result = df._get_numeric_data()
expected = df.loc[:, ['A', 'C']]
assert_frame_equal(result, expected)
def test_convert_objects(self, float_string_frame):
oops = float_string_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, float_string_frame)
assert converted['A'].dtype == np.float64
# force numeric conversion
float_string_frame['H'] = '1.'
float_string_frame['I'] = '1'
# add in some items that will be nan
length = len(float_string_frame)
float_string_frame['J'] = '1.'
float_string_frame['K'] = '1'
float_string_frame.loc[0:5, ['J', 'K']] = 'garbled'
converted = float_string_frame._convert(datetime=True, numeric=True)
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
assert converted['J'].dtype == 'float64'
assert converted['K'].dtype == 'float64'
assert len(converted['J'].dropna()) == length - 5
assert len(converted['K'].dropna()) == length - 5
# via astype
converted = float_string_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
# via astype, but errors
converted = float_string_frame.copy()
with pytest.raises(ValueError, match='invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s=Series([1, 'na', 3, 4])))
result = df._convert(datetime=True, numeric=True)
expected = DataFrame(dict(s=Series([1, np.nan, 3, 4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_infer_objects(self):
# GH 11221
df = DataFrame({'a': ['a', 1, 2, 3],
'b': ['b', 2.0, 3.0, 4.1],
'c': ['c', datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [1, 2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
df = df.iloc[1:].infer_objects()
assert df['a'].dtype == 'int64'
assert df['b'].dtype == 'float64'
assert df['c'].dtype == 'M8[ns]'
assert df['d'].dtype == 'object'
expected = DataFrame({'a': [1, 2, 3],
'b': [2.0, 3.0, 4.1],
'c': [datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
# reconstruct frame to verify inference is same
tm.assert_frame_equal(df.reset_index(drop=True), expected)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment', None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum() # noqa
exp = Y['g'].sum() # noqa
assert pd.isna(Y['g']['c'])
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame({'a': [1, 2, 3],
'b': [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
tm.assert_index_equal(df._get_numeric_data().columns,
pd.Index(['a', 'b', 'e']))
def test_strange_column_corruption_issue(self):
# (wesm) Unclear how exactly this is related to internal matters
df = DataFrame(index=[0, 1])
df[0] = np.nan
wasCol = {}
# uncommenting these makes the results match
# for col in xrange(100, 200):
# wasCol[col] = 1
# df[col] = np.nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
if col not in wasCol:
wasCol[col] = 1
df[col] = np.nan
df[col][dt] = i
myid = 100
first = len(df.loc[pd.isna(df[myid]), [myid]])
second = len(df.loc[pd.isna(df[myid]), [myid]])
assert first == second == 0
def test_constructor_no_pandas_array(self):
# Ensure that PandasArray isn't allowed inside Series
# See https://github.com/pandas-dev/pandas/issues/23995 for more.
arr = pd.Series([1, 2, 3]).array
result = pd.DataFrame({"A": arr})
expected = pd.DataFrame({"A": [1, 2, 3]})
tm.assert_frame_equal(result, expected)
assert isinstance(result._data.blocks[0], IntBlock)
| bsd-3-clause |
kjung/scikit-learn | sklearn/utils/__init__.py | 8 | 11976 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .deprecation import deprecated
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import ConvergenceWarning as _ConvergenceWarning
from ..exceptions import DataConversionWarning
@deprecated("ConvergenceWarning has been moved into the sklearn.exceptions "
"module. It will not be available here from version 0.19")
class ConvergenceWarning(_ConvergenceWarning):
pass
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
| bsd-3-clause |
carrillo/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 169 | 8809 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, n_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced")
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(C=0.1, n_iter=1000, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(C=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5})
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5])
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch")
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
abhishekkrthakur/scikit-learn | sklearn/tests/test_dummy.py | 27 | 17468 | from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
def test_most_frequent_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1/3, decimal=1)
assert_almost_equal(p[2], 1/3, decimal=1)
assert_almost_equal(p[4], 1/3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
hdmetor/scikit-learn | examples/svm/plot_weighted_samples.py | 69 | 1942 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasis the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
jmmease/pandas | pandas/tests/series/test_internals.py | 17 | 12814 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime
from numpy import nan
import numpy as np
from pandas import Series
from pandas.core.indexes.datetimes import Timestamp
import pandas._libs.lib as lib
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
class TestSeriesInternals(object):
def test_convert_objects(self):
s = Series([1., 2, 3], index=['a', 'b', 'c'])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
expected = s.copy()
expected['a'] = np.nan
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False,
convert_numeric=True)
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'],
dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=True,
convert_numeric=False)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'),
Timestamp('20010105')], dtype='M8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
# preserver all-nans (if convert_dates='coerce')
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r.convert_objects(convert_dates=True,convert_numeric=False)
# assert result.dtype == 'M8[ns]'
# dateutil parses some single letters into today's value as a date
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
s = Series([x.upper()])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
def test_convert_objects_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_objects_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
# GH 10265
def test_convert(self):
# Tests: All to nans, coerce, true
# Test coercion returns correct type
s = Series(['a', 'b', 'c'])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 3)
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([np.nan] * 3)
assert_series_equal(results, expected)
expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]'))
results = s._convert(timedelta=True, coerce=True)
assert_series_equal(results, expected)
dt = datetime(2001, 1, 1, 0, 0)
td = dt - datetime(2000, 1, 1, 0, 0)
# Test coercion with mixed types
s = Series(['a', '3.1415', dt, td])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, dt, lib.NaT])
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, lib.NaT, td],
dtype=np.dtype('m8[ns]'))
assert_series_equal(results, expected)
# Test standard conversion returns original
results = s._convert(datetime=True)
assert_series_equal(results, s)
results = s._convert(numeric=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True)
assert_series_equal(results, s)
# test pass-through and non-conversion when other types selected
s = Series(['1.0', '2.0', '3.0'])
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([1.0, 2.0, 3.0])
assert_series_equal(results, expected)
results = s._convert(True, False, True)
assert_series_equal(results, s)
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0, 0)],
dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 1, 0,
0)])
assert_series_equal(results, expected)
results = s._convert(datetime=False, numeric=True, timedelta=True)
assert_series_equal(results, s)
td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)
s = Series([td, td], dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([td, td])
assert_series_equal(results, expected)
results = s._convert(True, True, False)
assert_series_equal(results, s)
s = Series([1., 2, 3], index=['a', 'b', 'c'])
result = s._convert(numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
result = r._convert(numeric=True)
expected = s.copy()
expected['a'] = nan
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
result = s._convert(datetime=True, numeric=True)
expected = Series([1, nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
result = s._convert(datetime=True, numeric=True)
assert_series_equal(result, expected)
# dates
s = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0),
datetime(2001, 1, 3, 0, 0), 'foo', 1.0, 1,
Timestamp('20010104'), '20010105'], dtype='O')
result = s._convert(datetime=True)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
expected = Series([Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20010103'), lib.NaT, lib.NaT, lib.NaT,
Timestamp('20010104'), Timestamp('20010105')],
dtype='M8[ns]')
result = s2._convert(datetime=True, numeric=False, timedelta=False,
coerce=True)
assert_series_equal(result, expected)
result = s2._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
result = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 2 + [Timestamp(1)] * 2)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, s)
# r = s.copy()
# r[0] = np.nan
# result = r._convert(convert_dates=True,convert_numeric=False)
# assert result.dtype == 'M8[ns]'
# dateutil parses some single letters into today's value as a date
expected = Series([lib.NaT])
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series([x.upper()])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
def test_convert_no_arg_error(self):
s = Series(['1.0', '2'])
pytest.raises(ValueError, s._convert)
def test_convert_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
| bsd-3-clause |
YinongLong/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 25 | 3492 | from nose.tools import assert_equal
import numpy as np
from sklearn.utils import testing
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have received X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have received X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
def test_kw_arg():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
# Test that rounding is correct
testing.assert_array_equal(F.transform(X),
np.around(X, decimals=3))
def test_kw_arg_update():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args['decimals'] = 1
# Test that rounding is correct
testing.assert_array_equal(F.transform(X),
np.around(X, decimals=1))
def test_kw_arg_reset():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args = dict(decimals=1)
# Test that rounding is correct
testing.assert_array_equal(F.transform(X),
np.around(X, decimals=1))
def test_inverse_transform():
X = np.array([1, 4, 9, 16]).reshape((2, 2))
# Test that inverse_transform works correctly
F = FunctionTransformer(
func=np.sqrt,
inverse_func=np.around, inv_kw_args=dict(decimals=3))
testing.assert_array_equal(
F.inverse_transform(F.transform(X)),
np.around(np.sqrt(X), decimals=3))
| bsd-3-clause |
f3r/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
abimannans/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
MediffRobotics/DeepRobotics | DeepLearnMaterials/tutorials/Reinforcement_learning_TUT/4_Sarsa_lambda_maze/RL_brain.py | 1 | 5275 | """
This part of code is the Q learning brain, which is a brain of the agent.
All decisions are made in here.
View more on 莫烦Python: https://morvanzhou.github.io/tutorials/
"""
import numpy as np
import pandas as pd
class RL(object):
def __init__(self, action_space, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
self.actions = action_space # a list
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
def choose_action(self, observation):
pass
def learn(self, *args):
pass
# off-policy
class QTable(RL):
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
super(QTable, self).__init__(actions, learning_rate, reward_decay, e_greedy)
self.q_table = pd.DataFrame(columns=self.actions)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(
pd.Series(
[0]*len(self.actions),
index=self.q_table.columns,
name=state,
)
)
def choose_action(self, observation):
self.check_state_exist(observation)
# action selection
if np.random.uniform() < self.epsilon:
# choose best action
state_action = self.q_table.ix[observation, :]
state_action = state_action.reindex(np.random.permutation(state_action.index)) # some actions have same value
action = state_action.argmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_):
self.check_state_exist(s_)
q_predict = self.q_table.ix[s, a]
if s_ != 'terminal':
q_target = r + self.gamma * self.q_table.ix[s_, :].max() # next state is not terminal
else:
q_target = r # next state is terminal
self.q_table.ix[s, a] += self.lr * (q_target - q_predict) # update
# on-policy
class SarsaTable(RL):
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):
super(SarsaTable, self).__init__(actions, learning_rate, reward_decay, e_greedy)
self.q_table = pd.DataFrame(columns=self.actions)
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
self.q_table = self.q_table.append(
pd.Series(
[0]*len(self.actions),
index=self.q_table.columns,
name=state,
)
)
def choose_action(self, observation):
self.check_state_exist(observation)
# action selection
if np.random.rand() < self.epsilon:
# choose best action
state_action = self.q_table.ix[observation, :]
state_action = state_action.reindex(np.random.permutation(state_action.index)) # some actions have same value
action = state_action.argmax()
else:
# choose random action
action = np.random.choice(self.actions)
return action
def learn(self, s, a, r, s_, a_):
self.check_state_exist(s_)
q_predict = self.q_table.ix[s, a]
if s_ != 'terminal':
q_target = r + self.gamma * self.q_table.ix[s_, a_] # next state is not terminal
else:
q_target = r # next state is terminal
self.q_table.ix[s, a] += self.lr * (q_target - q_predict) # update
# backward eligibility traces
class SarsaLambdaTable(SarsaTable):
def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9, trace_decay=0.9):
super(SarsaLambdaTable, self).__init__(actions, learning_rate, reward_decay, e_greedy)
# backward view, eligibility trace.
self.lambda_ = trace_decay
def initialize_trace(self):
self.eligibility_trace = self.q_table * 0
def check_state_exist(self, state):
if state not in self.q_table.index:
# append new state to q table
to_be_append = pd.Series(
[0] * len(self.actions),
index=self.q_table.columns,
name=state,
)
self.q_table = self.q_table.append(to_be_append)
# also update eligibility trace
self.eligibility_trace = self.eligibility_trace.append(to_be_append)
def learn(self, s, a, r, s_, a_):
self.check_state_exist(s_)
q_predict = self.q_table.ix[s, a]
if s_ != 'terminal':
q_target = r + self.gamma * self.q_table.ix[s_, a_] # next state is not terminal
else:
q_target = r # next state is terminal
error = q_target - q_predict
# increase trace amount for visited state-action pair
self.eligibility_trace.ix[s, :] *= 0
self.eligibility_trace.ix[s, a] = 1
# Q update
self.q_table += self.lr * error * self.eligibility_trace
# decay eligibility trace after update
self.eligibility_trace *= self.gamma*self.lambda_
| gpl-3.0 |
aabadie/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28856 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
fatfishcc/justatest | get_all_equ.py | 1 | 1889 | #!/usr/bin/env python
import pandas as pd
import tushare as ts
import sys
sys.path.append('/Users/tiacui/aws/justatest/lib')
import key_file
from termcolor import colored
from pymongo import MongoClient
import pymongo
import time_conv
import calc
def main():
mg_client = MongoClient('localhost', 27017)
db = mg_client.stock
prices = db.prices
equ = db.equ
cursor = prices.find({"tradeDate":"2016-03-30"})
all = []
for row in cursor:
ticker = row['ticker']
all.append(ticker)
print(len(all))
ts.set_token(key_file.ts_key)
for t in ['A', 'B']:
print(t)
mt = ts.Equity()
df = mt.Equ(equTypeCD=t)
df['ticker'] = df['ticker'].map(lambda x: str(x).zfill(6))
print(str(len(df.index)) + ' rows in the df...')
for i, row in df.iterrows():
if row['ticker'] in all:
try:
r = equ.insert_one({
"ticker":row["ticker"],
"secShortName":row["secShortName"],
"exchangeCD":row["exchangeCD"],
"ListSectorCD":row["ListSectorCD"],
"ListSector":row["ListSector"],
"listDate":row["listDate"],
"equTypeCD":row["equTypeCD"],
"equType":row["equType"],
"exCountryCD":row["exCountryCD"],
"totalShares":row["totalShares"],
"officeAddr":row["officeAddr"],
"primeOperating":row["primeOperating"],
"endDate":row["endDate"]
})
except pymongo.errors.DuplicateKeyError as Ex:
print('existing data...')
return
if __name__ == '__main__':
main()
| gpl-2.0 |
jart/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 21 | 5221 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), axis=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Evaluate.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
hoanghaiphan/acados | experimental/rien/rk4_example/test.py | 7 | 3142 | import erk_integrator as erk
import sparse_erk_integrator as serk
import rk4_integrator as rk4
import numpy as np
import matplotlib.pyplot as plt
sim_in = rk4.sim_in()
sim_out = rk4.sim_out()
sim_in2 = erk.sim_in()
sim_out2 = erk.sim_out()
sim_in3 = serk.sim_in()
sim_out3 = serk.sim_out()
sim_in.step = 0.1
sim_in2.step = 0.1
sim_in.nSteps = 1
sim_in2.nSteps = 1
Ts = sim_in.step*sim_in.nSteps
sim_in.x = [1.0, -0.5]
sim_in.u = [0.1]
sim_in2.x = [1.0, -0.5]
sim_in2.u = [0.1]
print "x0 = ", sim_in.x
print "u = ", sim_in.u
# Perform a numerical simulation
N = 10;
xs = np.zeros((N+1,2))
xs[0,:] = sim_in.x
xs2 = np.zeros((N+1,2))
xs2[0,:] = sim_in.x
Sxs = []
Sus = []
for i in range(N):
sim_in.x = xs[i,:]
rk4.integrate(sim_in, sim_out)
xs[i+1,:] = sim_out.xn
Sxs.append(sim_out.Sx)
Sus.append(sim_out.Su)
sim_in2.x = xs[i,:]
erk.integrate(sim_in2, sim_out2)
xs2[i+1,:] = sim_out2.xn
print "xnext = ", xs[-1]
print "Sx = ", Sxs[-1]
print "Su = ", Sus[-1]
time = [Ts*i for i in range(N+1)]
plt.figure(1)
plt.clf()
plt.plot(time, xs[:,0], 'o-', label="x1 (RK4)")
plt.plot(time, xs[:,1], 'o-', label="x2 (RK4)")
plt.plot(time, xs2[:,0], 'o-', label="x1 (ERK)")
plt.plot(time, xs2[:,1], 'o-', label="x2 (ERK)")
plt.legend(loc='upper left')
plt.xlabel("time (s)")
plt.ylabel("state")
plt.show()
# Compute relative errors for varying step sizes
stepSize = 0.00001
numSteps = 100000
sim_in.step = stepSize
sim_in.nSteps = numSteps
sim_in.x = [1.0, -0.5]
sim_in.u = [0.1]
rk4.integrate(sim_in, sim_out);
x_ex = sim_out.xn # 'exact' values
sim_in2.step = sim_in.step
sim_in2.nSteps = sim_in.nSteps
sim_in2.x = sim_in.x
sim_in2.u = sim_in.u
sim_in3.step = sim_in.step
sim_in3.nSteps = sim_in.nSteps
sim_in3.x = sim_in.x
sim_in3.u = sim_in.u
stepSize = 0.0001
numSteps = 10000
print "Ts = ", stepSize*numSteps
steps = [1, 0.5, 0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.0025, 0.001]
nSteps = [1, 2, 4, 10, 20, 40, 100, 200, 400, 1000]
xs3 = np.zeros((len(steps),2))
err = np.zeros(len(steps))
timings = np.zeros(len(steps))
timings2 = np.zeros(len(steps))
timings3 = np.zeros(len(steps))
for i in range(len(steps)):
sim_in.step = steps[i]
sim_in.nSteps = nSteps[i]
rk4.integrate(sim_in, sim_out);
xs3[i,:] = sim_out.xn
err[i] = max(abs(xs3[i,:]-x_ex))
timings[i] = 1e6*sim_out.cpuTime
sim_in2.step = steps[i]
sim_in2.nSteps = nSteps[i]
erk.integrate(sim_in2, sim_out2);
timings2[i] = 1e6*sim_out2.cpuTime
sim_in3.step = steps[i]
sim_in3.nSteps = nSteps[i]
serk.integrate(sim_in3, sim_out3);
timings3[i] = 1e6*sim_out3.cpuTime
print "abs err = ", err
plt.figure(2)
plt.clf()
plt.plot(nSteps, err[:], 'ro--')
plt.xlabel("number of steps")
plt.ylabel("max abs error")
plt.yscale('log')
plt.xscale('log')
plt.show()
plt.figure(3)
plt.clf()
plt.plot(nSteps[1:8], timings[1:8], 'go-', label="RK4 implementation")
plt.plot(nSteps[1:8], timings2[1:8], 'ro-', label="ERK implementation")
plt.plot(nSteps[1:8], timings3[1:8], 'bo-', label="'SPARSE' ERK implementation")
plt.legend(loc='upper left')
plt.xlabel("number of steps")
plt.ylabel("cpuTime/step ($\mu$s)")
plt.show()
| lgpl-3.0 |
krafczyk/spack | var/spack/repos/builtin/packages/cosmomc/package.py | 5 | 7786 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import fnmatch
import os
class Cosmomc(Package):
"""CosmoMC is a Fortran 2008 Markov-Chain Monte-Carlo (MCMC) engine
for exploring cosmological parameter space, together with
Fortran and python code for analysing Monte-Carlo samples and
importance sampling (plus a suite of scripts for building grids
of runs, plotting and presenting results)."""
homepage = "http://cosmologist.info/cosmomc/"
url = "https://github.com/cmbant/CosmoMC/archive/Nov2016.tar.gz"
version('2016.11', '98620cb746352f68fb0c1196e9a070ac')
version('2016.06', '92dc651d1407cca6ea9228992165f5cb')
def url_for_version(self, version):
names = {'2016.11': "Nov2016",
'2016.06': "June2016"}
return ("https://github.com/cmbant/CosmoMC/archive/%s.tar.gz" %
names[str(version)])
variant('mpi', default=True, description='Enable MPI support')
variant('planck', default=False,
description='Enable Planck Likelihood code and baseline data')
variant('python', default=True, description='Enable Python bindings')
extends('python', when='+python')
depends_on('mpi', when='+mpi')
depends_on('planck-likelihood', when='+planck')
depends_on('py-matplotlib', type=('build', 'run'), when='+python')
depends_on('py-numpy', type=('build', 'run'), when='+python')
depends_on('py-pandas', type=('build', 'run'), when='+python')
depends_on('py-scipy', type=('build', 'run'), when='+python')
depends_on('py-six', type=('build', 'run'), when='+python')
depends_on('python @2.7:2.999,3.4:', type=('build', 'run'), when='+python')
patch('Makefile.patch')
patch('errorstop.patch')
parallel = False
def install(self, spec, prefix):
# Clean up environment to avoid configure problems
os.environ.pop('LINKMPI', '')
os.environ.pop('NERSC_HOST', '')
os.environ.pop('NONCLIKLIKE', '')
os.environ.pop('PICO', '')
os.environ.pop('PRECISION', '')
os.environ.pop('RECOMBINATION', '')
os.environ.pop('WMAP', '')
# Set up Planck data if requested
clikdir = join_path('data', 'clik')
try:
os.remove(clikdir)
except OSError:
pass
if '+planck' in spec:
os.symlink(join_path(os.environ['CLIK_DATA'], 'plc_2.0'), clikdir)
else:
os.environ.pop('CLIK_DATA', '')
os.environ.pop('CLIK_PATH', '')
os.environ.pop('CLIK_PLUGIN', '')
# Choose compiler
# Note: Instead of checking the compiler vendor, we should
# rewrite the Makefile to use Spack's options all the time
if spec.satisfies('%gcc'):
if not spec.satisfies('%gcc@6:'):
raise InstallError(
"When using GCC, "
"CosmoMC requires version gcc@6: for building")
choosecomp = 'ifortErr=1' # choose gfortran
elif spec.satisfies('%intel'):
if not spec.satifies('%intel@14:'):
raise InstallError(
"When using the Intel compiler, "
"CosmoMC requires version intel@14: for building")
choosecomp = 'ifortErr=0' # choose ifort
else:
raise InstallError("Only GCC and Intel compilers are supported")
# Configure MPI
if '+mpi' in spec:
wantmpi = 'BUILD=MPI'
mpif90 = 'MPIF90C=%s' % spec['mpi'].mpifc
else:
wantmpi = 'BUILD=NOMPI'
mpif90 = 'MPIF90C='
# Choose BLAS and LAPACK
lapack = ("LAPACKL=%s" %
(spec['lapack'].libs + spec['blas'].libs).ld_flags)
# Build
make(choosecomp, wantmpi, mpif90, lapack)
# Install
mkdirp(prefix.bin)
install('cosmomc', prefix.bin)
root = join_path(prefix.share, 'cosmomc')
mkdirp(root)
entries = [
'batch1',
'batch2',
'batch3',
'camb',
'chains',
'clik_latex.paramnames',
'clik_units.paramnames',
'cosmomc.cbp',
'data',
'distgeneric.ini',
'distparams.ini',
'disttest.ini',
'docs',
'job_script',
'job_script_MOAB',
'job_script_SLURM',
'paramnames',
'params_generic.ini',
'planck_covmats',
'scripts',
# don't copy 'source'
'test.ini',
'test_pico.ini',
'test_planck.ini',
'tests',
]
if '+python' in spec:
entries += ['python']
for entry in entries:
if os.path.isfile(entry):
install(entry, root)
else:
install_tree(entry, join_path(root, entry))
for dirpath, dirnames, filenames in os.walk(prefix):
for filename in fnmatch.filter(filenames, '*~'):
os.remove(os.path.join(dirpath, filename))
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
prefix = self.prefix
spec = self.spec
os.environ.pop('LINKMPI', '')
os.environ.pop('NERSC_HOST', '')
os.environ.pop('NONCLIKLIKE', '')
os.environ.pop('PICO', '')
os.environ.pop('PRECISION', '')
os.environ.pop('RECOMBINATION', '')
os.environ.pop('WMAP', '')
os.environ.pop('COSMOMC_LOCATION', '')
os.environ.pop('PLC_LOCATION', '')
os.environ.pop('CLIKPATH', '')
os.environ.pop('PLANCKLIKE', '')
exe = spec['cosmomc'].command.path
args = []
if '+mpi' in spec:
# Add mpirun prefix
args = ['-np', '1', exe]
exe = join_path(spec['mpi'].prefix.bin, 'mpiexec')
cosmomc = Executable(exe)
with working_dir('spack-check', create=True):
for entry in [
'camb',
'chains',
'data',
'paramnames',
'planck_covmats',
]:
os.symlink(join_path(prefix.share, 'cosmomc', entry), entry)
inifile = join_path(prefix.share, 'cosmomc', 'test.ini')
cosmomc(*(args + [inifile]))
if '+planck' in spec:
inifile = join_path(prefix.share, 'cosmomc', 'test_planck.ini')
cosmomc(*(args + [inifile]))
| lgpl-2.1 |
Planelles20/ReactorMultitubular | main.py | 1 | 5913 | import numpy as np
import matplotlib.pyplot as plt
import representar as rep
import integrateODE as intODE
import datos
import kinetic as kn
if __name__ == "__main__":
#condiciones iniciales
n0 = [75.0, 3.75, 0, 0, 0] # kmol/h
P0 = 2 # atm
T0 = 270 # C
Ts0 = 270 # C
L = datos.L # metros
N = datos.Ntub #numero de tubos inicial
Dint = datos.Dint # diamtero interno de los tubos (m)
adi = datos.adi
nl = datos.nl # puntos en la lungitud L
nt = datos.nt #puntos en el tiempo
tf = datos.tf # horas
# que hace?
#Resolver:
# n0, T0, Ts, P0 -----> y = [n0, T0, Ts0, P0, a0]
# dnj/dW = f(nj, T, P, a) |dnj/dW|
# dT/dW = h(nj, T, P, a) -----> dy/dW = | dT/dW|
#dTs/dW = i(nj, T, P, a) |dTs/dW|
# dP/dW = j(nj, T, P) | dP/dW|
# dadt = k(nj, T, P, a) | da/dt|
#Resolver
if(True):
SOL = np.zeros((nt,nl,9))
tlong = np.linspace(0,tf,nt)
dt = tlong[1]-tlong[0]
a = np.ones((nl))
for i in range(nt):
print (nt-i)
sol = intODE.ODE(n0, T0, Ts0, P0, a, Adiabatico=adi, n=nl)
ylong = sol.solutionLong2()
SOL[i,:,:8] = ylong
SOL[i,:,8] = a
# nj0 T P a
a = sol.aaa(SOL[i,:,:5], SOL[i,:,5], SOL[i,:,7], SOL[i,:,8], dt)
xlong = sol.abcisasLongReactor()
np.savetxt('./data/desactivaciont.dat', tlong, fmt='%.5e')
np.savetxt('./data/desactivacionX.dat', xlong, fmt='%.5e')
np.savetxt('./data/desactivacion0.dat', SOL[:,:,0], fmt='%.5e')
np.savetxt('./data/desactivacion1.dat', SOL[:,:,1], fmt='%.5e')
np.savetxt('./data/desactivacion2.dat', SOL[:,:,2], fmt='%.5e')
np.savetxt('./data/desactivacion3.dat', SOL[:,:,3], fmt='%.5e')
np.savetxt('./data/desactivacion4.dat', SOL[:,:,4], fmt='%.5e')
np.savetxt('./data/desactivacion5.dat', SOL[:,:,5], fmt='%.5e')
np.savetxt('./data/desactivacion6.dat', SOL[:,:,6], fmt='%.15e')
np.savetxt('./data/desactivacion7.dat', SOL[:,:,7], fmt='%.5e')
np.savetxt('./data/desactivacion8.dat', SOL[:,:,8], fmt='%.5e')
#Representar
if (True):
tlong = np.linspace(0,tf,nt)
xlong = np.zeros((nl))
xlong = np.loadtxt('./data/desactivacionX.dat')
SOL = np.zeros((nt,nl,9))
SOL[:,:,0] = np.loadtxt('./data/desactivacion0.dat')
SOL[:,:,1] = np.loadtxt('./data/desactivacion1.dat')
SOL[:,:,2] = np.loadtxt('./data/desactivacion2.dat')
SOL[:,:,3] = np.loadtxt('./data/desactivacion3.dat')
SOL[:,:,4] = np.loadtxt('./data/desactivacion4.dat')
SOL[:,:,5] = np.loadtxt('./data/desactivacion5.dat')
SOL[:,:,6] = np.loadtxt('./data/desactivacion6.dat')
SOL[:,:,7] = np.loadtxt('./data/desactivacion7.dat')
SOL[:,:,8] = np.loadtxt('./data/desactivacion8.dat')
rep1 = rep.plotearLong(SOL,xlong,tlong)
rep1.componentes()
rep1.T_and_P()
rep1.T_and_Ts()
rep1.actividad()
rep1.general_plot(SOL[:,:,0], xlong, tlong, title="ciclohexanol (kmol/h)")
rep1.general_plot(SOL[:,:,1], xlong, tlong, title="ciclohexanona (kmol/h)")
rep1.general_plot(SOL[:,:,5], xlong, tlong, title="Temperatura (C)")
rep1.general_plot(SOL[:,:,7], xlong, tlong, title="Presion (atm)")
rep1.moduloThiele()
rep1.fraccionMolar(n=0,title="ciclohexanol")
rep1.fraccionMolar(n=1,title="ciclohexanona")
rep1.conversion()
X1 = np.zeros((nl))
X2 = np.zeros((nl))
X3 = np.zeros((nl))
X4 = np.zeros((nl))
X5 = np.zeros((nl))
for i in range(nl):
X1[i] = SOL[0,i,0]/sum(SOL[0,i,0:5])
X2[i] = SOL[0,i,1]/sum(SOL[0,i,0:5])
X3[i] = SOL[0,i,2]/sum(SOL[0,i,0:5])
X4[i] = SOL[0,i,3]/sum(SOL[0,i,0:5])
X5[i] = SOL[0,i,4]/sum(SOL[0,i,0:5])
np.savetxt('./data/fraccionMolarCiclohexanonaTiempo0.dat', X2, fmt='%.5e')
np.savetxt('./data/TemperaturaAtiempo0.dat', SOL[0,:,5], fmt='%.5e')
fig = plt.figure(1)
plt.title("Fraccion molar")
plt.xlabel("longitud (m)")
plt.ylabel("Fraccion molar")
plt.plot(xlong, X1)
plt.plot(xlong, X2)
plt.plot(xlong, X3)
plt.plot(xlong, X4)
plt.plot(xlong, X5)
plt.show()
for i in range(nl):
X1[i] = SOL[-1,i,0]/sum(SOL[-1,i,0:5])
X2[i] = SOL[-1,i,1]/sum(SOL[-1,i,0:5])
X3[i] = SOL[-1,i,2]/sum(SOL[-1,i,0:5])
X4[i] = SOL[-1,i,3]/sum(SOL[-1,i,0:5])
X5[i] = SOL[-1,i,4]/sum(SOL[-1,i,0:5])
np.savetxt('./data/fraccionMolarCiclohexanonaTiempoFinal.dat', X2, fmt='%.5e')
np.savetxt('./data/TemperaturaAtiempoFinal.dat', SOL[-1,:,5], fmt='%.5e')
fig = plt.figure(1)
plt.title("Fraccion molar")
plt.xlabel("longitud (m)")
plt.ylabel("Fraccion molar")
plt.plot(xlong, X1)
plt.plot(xlong, X2)
plt.plot(xlong, X3)
plt.plot(xlong, X4)
plt.plot(xlong, X5)
plt.show()
fig = plt.figure(1)
plt.title("Temperatura")
plt.xlabel("longitud (m)")
plt.ylabel("Temperatura (C)")
plt.plot(xlong, SOL[0,:,5])
plt.plot(xlong, SOL[0,:,6])
plt.ylim([263,271])
plt.show()
fig = plt.figure(1)
plt.title("Flujo molar")
plt.xlabel("longitud (m)")
plt.ylabel("Flujo molar (kmol/h)")
plt.plot(xlong, SOL[0,:,:5])
plt.show()
fig = plt.figure(1)
plt.title("Presion")
plt.xlabel("longitud (m)")
plt.ylabel("Presion (atm)")
plt.plot(xlong, SOL[0,:,7])
plt.show()
| gpl-3.0 |
highfei2011/spark | python/pyspark/sql/types.py | 4 | 65368 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import decimal
import time
import datetime
import calendar
import json
import re
import base64
from array import array
import ctypes
if sys.version >= "3":
long = int
basestring = unicode = str
from py4j.protocol import register_input_converter
from py4j.java_gateway import JavaClass
from pyspark import SparkContext
from pyspark.serializers import CloudPickleSerializer
__all__ = [
"DataType", "NullType", "StringType", "BinaryType", "BooleanType", "DateType",
"TimestampType", "DecimalType", "DoubleType", "FloatType", "ByteType", "IntegerType",
"LongType", "ShortType", "ArrayType", "MapType", "StructField", "StructType"]
class DataType(object):
"""Base class for data types."""
def __repr__(self):
return self.__class__.__name__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def typeName(cls):
return cls.__name__[:-4].lower()
def simpleString(self):
return self.typeName()
def jsonValue(self):
return self.typeName()
def json(self):
return json.dumps(self.jsonValue(),
separators=(',', ':'),
sort_keys=True)
def needConversion(self):
"""
Does this type need to conversion between Python object and internal SQL object.
This is used to avoid the unnecessary conversion for ArrayType/MapType/StructType.
"""
return False
def toInternal(self, obj):
"""
Converts a Python object into an internal SQL object.
"""
return obj
def fromInternal(self, obj):
"""
Converts an internal SQL object into a native Python object.
"""
return obj
# This singleton pattern does not work with pickle, you will get
# another object after pickle and unpickle
class DataTypeSingleton(type):
"""Metaclass for DataType"""
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(DataTypeSingleton, cls).__call__()
return cls._instances[cls]
class NullType(DataType):
"""Null type.
The data type representing None, used for the types that cannot be inferred.
"""
__metaclass__ = DataTypeSingleton
class AtomicType(DataType):
"""An internal type used to represent everything that is not
null, UDTs, arrays, structs, and maps."""
class NumericType(AtomicType):
"""Numeric data types.
"""
class IntegralType(NumericType):
"""Integral data types.
"""
__metaclass__ = DataTypeSingleton
class FractionalType(NumericType):
"""Fractional data types.
"""
class StringType(AtomicType):
"""String data type.
"""
__metaclass__ = DataTypeSingleton
class BinaryType(AtomicType):
"""Binary (byte array) data type.
"""
__metaclass__ = DataTypeSingleton
class BooleanType(AtomicType):
"""Boolean data type.
"""
__metaclass__ = DataTypeSingleton
class DateType(AtomicType):
"""Date (datetime.date) data type.
"""
__metaclass__ = DataTypeSingleton
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def needConversion(self):
return True
def toInternal(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def fromInternal(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimestampType(AtomicType):
"""Timestamp (datetime.datetime) data type.
"""
__metaclass__ = DataTypeSingleton
def needConversion(self):
return True
def toInternal(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 1000000 + dt.microsecond
def fromInternal(self, ts):
if ts is not None:
# using int to avoid precision loss in float
return datetime.datetime.fromtimestamp(ts // 1000000).replace(microsecond=ts % 1000000)
class DecimalType(FractionalType):
"""Decimal (decimal.Decimal) data type.
The DecimalType must have fixed precision (the maximum total number of digits)
and scale (the number of digits on the right of dot). For example, (5, 2) can
support the value from [-999.99 to 999.99].
The precision can be up to 38, the scale must be less or equal to precision.
When create a DecimalType, the default precision and scale is (10, 0). When infer
schema from decimal.Decimal objects, it will be DecimalType(38, 18).
:param precision: the maximum total number of digits (default: 10)
:param scale: the number of digits on right side of dot. (default: 0)
"""
def __init__(self, precision=10, scale=0):
self.precision = precision
self.scale = scale
self.hasPrecisionInfo = True # this is public API
def simpleString(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def jsonValue(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def __repr__(self):
return "DecimalType(%d,%d)" % (self.precision, self.scale)
class DoubleType(FractionalType):
"""Double data type, representing double precision floats.
"""
__metaclass__ = DataTypeSingleton
class FloatType(FractionalType):
"""Float data type, representing single precision floats.
"""
__metaclass__ = DataTypeSingleton
class ByteType(IntegralType):
"""Byte data type, i.e. a signed integer in a single byte.
"""
def simpleString(self):
return 'tinyint'
class IntegerType(IntegralType):
"""Int data type, i.e. a signed 32-bit integer.
"""
def simpleString(self):
return 'int'
class LongType(IntegralType):
"""Long data type, i.e. a signed 64-bit integer.
If the values are beyond the range of [-9223372036854775808, 9223372036854775807],
please use :class:`DecimalType`.
"""
def simpleString(self):
return 'bigint'
class ShortType(IntegralType):
"""Short data type, i.e. a signed 16-bit integer.
"""
def simpleString(self):
return 'smallint'
class ArrayType(DataType):
"""Array data type.
:param elementType: :class:`DataType` of each element in the array.
:param containsNull: boolean, whether the array can contain null (None) values.
"""
def __init__(self, elementType, containsNull=True):
"""
>>> ArrayType(StringType()) == ArrayType(StringType(), True)
True
>>> ArrayType(StringType(), False) == ArrayType(StringType())
False
"""
assert isinstance(elementType, DataType),\
"elementType %s should be an instance of %s" % (elementType, DataType)
self.elementType = elementType
self.containsNull = containsNull
def simpleString(self):
return 'array<%s>' % self.elementType.simpleString()
def __repr__(self):
return "ArrayType(%s,%s)" % (self.elementType,
str(self.containsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"elementType": self.elementType.jsonValue(),
"containsNull": self.containsNull}
@classmethod
def fromJson(cls, json):
return ArrayType(_parse_datatype_json_value(json["elementType"]),
json["containsNull"])
def needConversion(self):
return self.elementType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.toInternal(v) for v in obj]
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.fromInternal(v) for v in obj]
class MapType(DataType):
"""Map data type.
:param keyType: :class:`DataType` of the keys in the map.
:param valueType: :class:`DataType` of the values in the map.
:param valueContainsNull: indicates whether values can contain null (None) values.
Keys in a map data type are not allowed to be null (None).
"""
def __init__(self, keyType, valueType, valueContainsNull=True):
"""
>>> (MapType(StringType(), IntegerType())
... == MapType(StringType(), IntegerType(), True))
True
>>> (MapType(StringType(), IntegerType(), False)
... == MapType(StringType(), FloatType()))
False
"""
assert isinstance(keyType, DataType),\
"keyType %s should be an instance of %s" % (keyType, DataType)
assert isinstance(valueType, DataType),\
"valueType %s should be an instance of %s" % (valueType, DataType)
self.keyType = keyType
self.valueType = valueType
self.valueContainsNull = valueContainsNull
def simpleString(self):
return 'map<%s,%s>' % (self.keyType.simpleString(), self.valueType.simpleString())
def __repr__(self):
return "MapType(%s,%s,%s)" % (self.keyType, self.valueType,
str(self.valueContainsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"keyType": self.keyType.jsonValue(),
"valueType": self.valueType.jsonValue(),
"valueContainsNull": self.valueContainsNull}
@classmethod
def fromJson(cls, json):
return MapType(_parse_datatype_json_value(json["keyType"]),
_parse_datatype_json_value(json["valueType"]),
json["valueContainsNull"])
def needConversion(self):
return self.keyType.needConversion() or self.valueType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.toInternal(k), self.valueType.toInternal(v))
for k, v in obj.items())
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.fromInternal(k), self.valueType.fromInternal(v))
for k, v in obj.items())
class StructField(DataType):
"""A field in :class:`StructType`.
:param name: string, name of the field.
:param dataType: :class:`DataType` of the field.
:param nullable: boolean, whether the field can be null (None) or not.
:param metadata: a dict from string to simple type that can be toInternald to JSON automatically
"""
def __init__(self, name, dataType, nullable=True, metadata=None):
"""
>>> (StructField("f1", StringType(), True)
... == StructField("f1", StringType(), True))
True
>>> (StructField("f1", StringType(), True)
... == StructField("f2", StringType(), True))
False
"""
assert isinstance(dataType, DataType),\
"dataType %s should be an instance of %s" % (dataType, DataType)
assert isinstance(name, basestring), "field name %s should be string" % (name)
if not isinstance(name, str):
name = name.encode('utf-8')
self.name = name
self.dataType = dataType
self.nullable = nullable
self.metadata = metadata or {}
def simpleString(self):
return '%s:%s' % (self.name, self.dataType.simpleString())
def __repr__(self):
return "StructField(%s,%s,%s)" % (self.name, self.dataType,
str(self.nullable).lower())
def jsonValue(self):
return {"name": self.name,
"type": self.dataType.jsonValue(),
"nullable": self.nullable,
"metadata": self.metadata}
@classmethod
def fromJson(cls, json):
return StructField(json["name"],
_parse_datatype_json_value(json["type"]),
json["nullable"],
json["metadata"])
def needConversion(self):
return self.dataType.needConversion()
def toInternal(self, obj):
return self.dataType.toInternal(obj)
def fromInternal(self, obj):
return self.dataType.fromInternal(obj)
def typeName(self):
raise TypeError(
"StructField does not have typeName. "
"Use typeName on its type explicitly instead.")
class StructType(DataType):
"""Struct type, consisting of a list of :class:`StructField`.
This is the data type representing a :class:`Row`.
Iterating a :class:`StructType` will iterate its :class:`StructField`\\s.
A contained :class:`StructField` can be accessed by name or position.
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct1["f1"]
StructField(f1,StringType,true)
>>> struct1[0]
StructField(f1,StringType,true)
"""
def __init__(self, fields=None):
"""
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True),
... StructField("f2", IntegerType(), False)])
>>> struct1 == struct2
False
"""
if not fields:
self.fields = []
self.names = []
else:
self.fields = fields
self.names = [f.name for f in fields]
assert all(isinstance(f, StructField) for f in fields),\
"fields should be a list of StructField"
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
return self
def __iter__(self):
"""Iterate the fields"""
return iter(self.fields)
def __len__(self):
"""Return the number of fields."""
return len(self.fields)
def __getitem__(self, key):
"""Access fields by name or slice."""
if isinstance(key, str):
for field in self:
if field.name == key:
return field
raise KeyError('No StructField named {0}'.format(key))
elif isinstance(key, int):
try:
return self.fields[key]
except IndexError:
raise IndexError('StructType index out of range')
elif isinstance(key, slice):
return StructType(self.fields[key])
else:
raise TypeError('StructType keys should be strings, integers or slices')
def simpleString(self):
return 'struct<%s>' % (','.join(f.simpleString() for f in self))
def __repr__(self):
return ("StructType(List(%s))" %
",".join(str(field) for field in self))
def jsonValue(self):
return {"type": self.typeName(),
"fields": [f.jsonValue() for f in self]}
@classmethod
def fromJson(cls, json):
return StructType([StructField.fromJson(f) for f in json["fields"]])
def fieldNames(self):
"""
Returns all field names in a list.
>>> struct = StructType([StructField("f1", StringType(), True)])
>>> struct.fieldNames()
['f1']
"""
return list(self.names)
def needConversion(self):
# We need convert Row()/namedtuple into tuple()
return True
def toInternal(self, obj):
if obj is None:
return
if self._needSerializeAnyField:
# Only calling toInternal function for fields that need conversion
if isinstance(obj, dict):
return tuple(f.toInternal(obj.get(n)) if c else obj.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
elif isinstance(obj, (tuple, list)):
return tuple(f.toInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(f.toInternal(d.get(n)) if c else d.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
else:
if isinstance(obj, dict):
return tuple(obj.get(n) for n in self.names)
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
return tuple(obj[n] for n in self.names)
elif isinstance(obj, (list, tuple)):
return tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(d.get(n) for n in self.names)
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
def fromInternal(self, obj):
if obj is None:
return
if isinstance(obj, Row):
# it's already converted by pickler
return obj
if self._needSerializeAnyField:
# Only calling fromInternal function for fields that need conversion
values = [f.fromInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion)]
else:
values = obj
return _create_row(self.names, values)
class UserDefinedType(DataType):
"""User-defined type (UDT).
.. note:: WARN: Spark Internal Use Only
"""
@classmethod
def typeName(cls):
return cls.__name__.lower()
@classmethod
def sqlType(cls):
"""
Underlying SQL storage type for this UDT.
"""
raise NotImplementedError("UDT must implement sqlType().")
@classmethod
def module(cls):
"""
The Python module of the UDT.
"""
raise NotImplementedError("UDT must implement module().")
@classmethod
def scalaUDT(cls):
"""
The class name of the paired Scala UDT (could be '', if there
is no corresponding one).
"""
return ''
def needConversion(self):
return True
@classmethod
def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type
def toInternal(self, obj):
if obj is not None:
return self._cachedSqlType().toInternal(self.serialize(obj))
def fromInternal(self, obj):
v = self._cachedSqlType().fromInternal(obj)
if v is not None:
return self.deserialize(v)
def serialize(self, obj):
"""
Converts the a user-type object into a SQL datum.
"""
raise NotImplementedError("UDT must implement toInternal().")
def deserialize(self, datum):
"""
Converts a SQL datum into a user-type object.
"""
raise NotImplementedError("UDT must implement fromInternal().")
def simpleString(self):
return 'udt'
def json(self):
return json.dumps(self.jsonValue(), separators=(',', ':'), sort_keys=True)
def jsonValue(self):
if self.scalaUDT():
assert self.module() != '__main__', 'UDT in __main__ cannot work with ScalaUDT'
schema = {
"type": "udt",
"class": self.scalaUDT(),
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"sqlType": self.sqlType().jsonValue()
}
else:
ser = CloudPickleSerializer()
b = ser.dumps(type(self))
schema = {
"type": "udt",
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"serializedClass": base64.b64encode(b).decode('utf8'),
"sqlType": self.sqlType().jsonValue()
}
return schema
@classmethod
def fromJson(cls, json):
pyUDT = str(json["pyClass"]) # convert unicode to str
split = pyUDT.rfind(".")
pyModule = pyUDT[:split]
pyClass = pyUDT[split+1:]
m = __import__(pyModule, globals(), locals(), [pyClass])
if not hasattr(m, pyClass):
s = base64.b64decode(json['serializedClass'].encode('utf-8'))
UDT = CloudPickleSerializer().loads(s)
else:
UDT = getattr(m, pyClass)
return UDT()
def __eq__(self, other):
return type(self) == type(other)
_atomic_types = [StringType, BinaryType, BooleanType, DecimalType, FloatType, DoubleType,
ByteType, ShortType, IntegerType, LongType, DateType, TimestampType, NullType]
_all_atomic_types = dict((t.typeName(), t) for t in _atomic_types)
_all_complex_types = dict((v.typeName(), v)
for v in [ArrayType, MapType, StructType])
_FIXED_DECIMAL = re.compile(r"decimal\(\s*(\d+)\s*,\s*(-?\d+)\s*\)")
def _parse_datatype_string(s):
"""
Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name
for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted
string and case-insensitive strings.
>>> _parse_datatype_string("int ")
IntegerType
>>> _parse_datatype_string("INT ")
IntegerType
>>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ")
StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true)))
>>> _parse_datatype_string("a DOUBLE, b STRING")
StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true)))
>>> _parse_datatype_string("a: array< short>")
StructType(List(StructField(a,ArrayType(ShortType,true),true)))
>>> _parse_datatype_string(" map<string , string > ")
MapType(StringType,StringType,true)
>>> # Error cases
>>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
"""
sc = SparkContext._active_spark_context
def from_ddl_schema(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.types.StructType.fromDDL(type_str).json())
def from_ddl_datatype(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.api.python.PythonSQLUtils.parseDataType(type_str).json())
try:
# DDL format, "fieldname datatype, fieldname datatype".
return from_ddl_schema(s)
except Exception as e:
try:
# For backwards compatibility, "integer", "struct<fieldname: datatype>" and etc.
return from_ddl_datatype(s)
except:
try:
# For backwards compatibility, "fieldname: datatype, fieldname: datatype" case.
return from_ddl_datatype("struct<%s>" % s.strip())
except:
raise e
def _parse_datatype_json_string(json_string):
"""Parses the given data type JSON string.
>>> import pickle
>>> def check_datatype(datatype):
... pickled = pickle.loads(pickle.dumps(datatype))
... assert datatype == pickled
... scala_datatype = spark._jsparkSession.parseDataType(datatype.json())
... python_datatype = _parse_datatype_json_string(scala_datatype.json())
... assert datatype == python_datatype
>>> for cls in _all_atomic_types.values():
... check_datatype(cls())
>>> # Simple ArrayType.
>>> simple_arraytype = ArrayType(StringType(), True)
>>> check_datatype(simple_arraytype)
>>> # Simple MapType.
>>> simple_maptype = MapType(StringType(), LongType())
>>> check_datatype(simple_maptype)
>>> # Simple StructType.
>>> simple_structtype = StructType([
... StructField("a", DecimalType(), False),
... StructField("b", BooleanType(), True),
... StructField("c", LongType(), True),
... StructField("d", BinaryType(), False)])
>>> check_datatype(simple_structtype)
>>> # Complex StructType.
>>> complex_structtype = StructType([
... StructField("simpleArray", simple_arraytype, True),
... StructField("simpleMap", simple_maptype, True),
... StructField("simpleStruct", simple_structtype, True),
... StructField("boolean", BooleanType(), False),
... StructField("withMeta", DoubleType(), False, {"name": "age"})])
>>> check_datatype(complex_structtype)
>>> # Complex ArrayType.
>>> complex_arraytype = ArrayType(complex_structtype, True)
>>> check_datatype(complex_arraytype)
>>> # Complex MapType.
>>> complex_maptype = MapType(complex_structtype,
... complex_arraytype, False)
>>> check_datatype(complex_maptype)
>>> # Decimal with negative scale.
>>> check_datatype(DecimalType(1,-1))
"""
return _parse_datatype_json_value(json.loads(json_string))
def _parse_datatype_json_value(json_value):
if not isinstance(json_value, dict):
if json_value in _all_atomic_types.keys():
return _all_atomic_types[json_value]()
elif json_value == 'decimal':
return DecimalType()
elif _FIXED_DECIMAL.match(json_value):
m = _FIXED_DECIMAL.match(json_value)
return DecimalType(int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Could not parse datatype: %s" % json_value)
else:
tpe = json_value["type"]
if tpe in _all_complex_types:
return _all_complex_types[tpe].fromJson(json_value)
elif tpe == 'udt':
return UserDefinedType.fromJson(json_value)
else:
raise ValueError("not supported type: %s" % tpe)
# Mapping Python types to Spark SQL DataType
_type_mappings = {
type(None): NullType,
bool: BooleanType,
int: LongType,
float: DoubleType,
str: StringType,
bytearray: BinaryType,
decimal.Decimal: DecimalType,
datetime.date: DateType,
datetime.datetime: TimestampType,
datetime.time: TimestampType,
}
if sys.version < "3":
_type_mappings.update({
unicode: StringType,
long: LongType,
})
# Mapping Python array types to Spark SQL DataType
# We should be careful here. The size of these types in python depends on C
# implementation. We need to make sure that this conversion does not lose any
# precision. Also, JVM only support signed types, when converting unsigned types,
# keep in mind that it required 1 more bit when stored as singed types.
#
# Reference for C integer size, see:
# ISO/IEC 9899:201x specification, chapter 5.2.4.2.1 Sizes of integer types <limits.h>.
# Reference for python array typecode, see:
# https://docs.python.org/2/library/array.html
# https://docs.python.org/3.6/library/array.html
# Reference for JVM's supported integral types:
# http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.3.1
_array_signed_int_typecode_ctype_mappings = {
'b': ctypes.c_byte,
'h': ctypes.c_short,
'i': ctypes.c_int,
'l': ctypes.c_long,
}
_array_unsigned_int_typecode_ctype_mappings = {
'B': ctypes.c_ubyte,
'H': ctypes.c_ushort,
'I': ctypes.c_uint,
'L': ctypes.c_ulong
}
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
"""
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType
# The list of all supported array typecodes is stored here
_array_type_mappings = {
# Warning: Actual properties for float and double in C is not specified in C.
# On almost every system supported by both python and JVM, they are IEEE 754
# single-precision binary floating-point format and IEEE 754 double-precision
# binary floating-point format. And we do assume the same thing here for now.
'f': FloatType,
'd': DoubleType
}
# compute array typecode mappings for signed integer types
for _typecode in _array_signed_int_typecode_ctype_mappings.keys():
size = ctypes.sizeof(_array_signed_int_typecode_ctype_mappings[_typecode]) * 8
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# compute array typecode mappings for unsigned integer types
for _typecode in _array_unsigned_int_typecode_ctype_mappings.keys():
# JVM does not have unsigned types, so use signed types that is at least 1
# bit larger to store
size = ctypes.sizeof(_array_unsigned_int_typecode_ctype_mappings[_typecode]) * 8 + 1
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# Type code 'u' in Python's array is deprecated since version 3.3, and will be
# removed in version 4.0. See: https://docs.python.org/3/library/array.html
if sys.version_info[0] < 4:
_array_type_mappings['u'] = StringType
# Type code 'c' are only available at python 2
if sys.version_info[0] < 3:
_array_type_mappings['c'] = StringType
# SPARK-21465:
# In python2, array of 'L' happened to be mistakenly partially supported. To
# avoid breaking user's code, we should keep this partial support. Below is a
# dirty hacking to keep this partial support and make the unit test passes
import platform
if sys.version_info[0] < 3 and platform.python_implementation() != 'PyPy':
if 'L' not in _array_type_mappings.keys():
_array_type_mappings['L'] = LongType
_array_unsigned_int_typecode_ctype_mappings['L'] = ctypes.c_uint
def _infer_type(obj):
"""Infer the DataType from obj
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
dataType = _type_mappings.get(type(obj))
if dataType is DecimalType:
# the precision and scale of `obj` may be different from row to row.
return DecimalType(38, 18)
elif dataType is not None:
return dataType()
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key), _infer_type(value), True)
return MapType(NullType(), NullType(), True)
elif isinstance(obj, list):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]), True)
return ArrayType(NullType(), True)
elif isinstance(obj, array):
if obj.typecode in _array_type_mappings:
return ArrayType(_array_type_mappings[obj.typecode](), False)
else:
raise TypeError("not supported type: array(%s)" % obj.typecode)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
def _infer_schema(row, names=None):
"""Infer the schema from dict/namedtuple/object"""
if isinstance(row, dict):
items = sorted(row.items())
elif isinstance(row, (tuple, list)):
if hasattr(row, "__fields__"): # Row
items = zip(row.__fields__, tuple(row))
elif hasattr(row, "_fields"): # namedtuple
items = zip(row._fields, tuple(row))
else:
if names is None:
names = ['_%d' % i for i in range(1, len(row) + 1)]
elif len(names) < len(row):
names.extend('_%d' % i for i in range(len(names) + 1, len(row) + 1))
items = zip(names, row)
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise TypeError("Can not infer schema for type: %s" % type(row))
fields = [StructField(k, _infer_type(v), True) for k, v in items]
return StructType(fields)
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType)
def _merge_type(a, b, name=None):
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
if isinstance(a, NullType):
return b
elif isinstance(b, NullType):
return a
elif type(a) is not type(b):
# TODO: type cast (such as int -> long)
raise TypeError(new_msg("Can not merge type %s and %s" % (type(a), type(b))))
# same type
if isinstance(a, StructType):
nfs = dict((f.name, f.dataType) for f in b.fields)
fields = [StructField(f.name, _merge_type(f.dataType, nfs.get(f.name, NullType()),
name=new_name(f.name)))
for f in a.fields]
names = set([f.name for f in fields])
for n in nfs:
if n not in names:
fields.append(StructField(n, nfs[n]))
return StructType(fields)
elif isinstance(a, ArrayType):
return ArrayType(_merge_type(a.elementType, b.elementType,
name='element in array %s' % name), True)
elif isinstance(a, MapType):
return MapType(_merge_type(a.keyType, b.keyType, name='key of map %s' % name),
_merge_type(a.valueType, b.valueType, name='value of map %s' % name),
True)
else:
return a
def _need_converter(dataType):
if isinstance(dataType, StructType):
return True
elif isinstance(dataType, ArrayType):
return _need_converter(dataType.elementType)
elif isinstance(dataType, MapType):
return _need_converter(dataType.keyType) or _need_converter(dataType.valueType)
elif isinstance(dataType, NullType):
return True
else:
return False
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct
_acceptable_types = {
BooleanType: (bool,),
ByteType: (int, long),
ShortType: (int, long),
IntegerType: (int, long),
LongType: (int, long),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
StringType: (str, unicode),
BinaryType: (bytearray,),
DateType: (datetime.date, datetime.datetime),
TimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
StructType: (tuple, list, dict),
}
def _make_type_verifier(dataType, nullable=True, name=None):
"""
Make a verifier that checks the type of obj against dataType and raises a TypeError if they do
not match.
This verifier also checks the value of obj against datatype and raises a ValueError if it's not
within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is
not checked, so it will become infinity when cast to Java float if it overflows.
>>> _make_type_verifier(StructType([]))(None)
>>> _make_type_verifier(StringType())("")
>>> _make_type_verifier(LongType())(0)
>>> _make_type_verifier(ArrayType(ShortType()))(list(range(3)))
>>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({})
>>> _make_type_verifier(StructType([]))(())
>>> _make_type_verifier(StructType([]))([])
>>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> # Check if numeric values are within the allowed range.
>>> _make_type_verifier(ByteType())(12)
>>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(
... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1})
Traceback (most recent call last):
...
ValueError:...
>>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False)
>>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
def verify_nullability(obj):
if obj is None:
if nullable:
return True
else:
raise ValueError(new_msg("This field is not nullable, but got None"))
else:
return False
_type = type(dataType)
def assert_acceptable_types(obj):
assert _type in _acceptable_types, \
new_msg("unknown datatype: %s for object %r" % (dataType, obj))
def verify_acceptable_types(obj):
# subclass of them can not be fromInternal in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError(new_msg("%s can not accept object %r in type %s"
% (dataType, obj, type(obj))))
if isinstance(dataType, StringType):
# StringType can work with any types
verify_value = lambda _: _
elif isinstance(dataType, UserDefinedType):
verifier = _make_type_verifier(dataType.sqlType(), name=name)
def verify_udf(obj):
if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType):
raise ValueError(new_msg("%r is not an instance of type %r" % (obj, dataType)))
verifier(dataType.toInternal(obj))
verify_value = verify_udf
elif isinstance(dataType, ByteType):
def verify_byte(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -128 or obj > 127:
raise ValueError(new_msg("object of ByteType out of range, got: %s" % obj))
verify_value = verify_byte
elif isinstance(dataType, ShortType):
def verify_short(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -32768 or obj > 32767:
raise ValueError(new_msg("object of ShortType out of range, got: %s" % obj))
verify_value = verify_short
elif isinstance(dataType, IntegerType):
def verify_integer(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -2147483648 or obj > 2147483647:
raise ValueError(
new_msg("object of IntegerType out of range, got: %s" % obj))
verify_value = verify_integer
elif isinstance(dataType, ArrayType):
element_verifier = _make_type_verifier(
dataType.elementType, dataType.containsNull, name="element in array %s" % name)
def verify_array(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for i in obj:
element_verifier(i)
verify_value = verify_array
elif isinstance(dataType, MapType):
key_verifier = _make_type_verifier(dataType.keyType, False, name="key of map %s" % name)
value_verifier = _make_type_verifier(
dataType.valueType, dataType.valueContainsNull, name="value of map %s" % name)
def verify_map(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for k, v in obj.items():
key_verifier(k)
value_verifier(v)
verify_value = verify_map
elif isinstance(dataType, StructType):
verifiers = []
for f in dataType.fields:
verifier = _make_type_verifier(f.dataType, f.nullable, name=new_name(f.name))
verifiers.append((f.name, verifier))
def verify_struct(obj):
assert_acceptable_types(obj)
if isinstance(obj, dict):
for f, verifier in verifiers:
verifier(obj.get(f))
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
# the order in obj could be different than dataType.fields
for f, verifier in verifiers:
verifier(obj[f])
elif isinstance(obj, (tuple, list)):
if len(obj) != len(verifiers):
raise ValueError(
new_msg("Length of object (%d) does not match with "
"length of fields (%d)" % (len(obj), len(verifiers))))
for v, (_, verifier) in zip(obj, verifiers):
verifier(v)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
for f, verifier in verifiers:
verifier(d.get(f))
else:
raise TypeError(new_msg("StructType can not accept object %r in type %s"
% (obj, type(obj))))
verify_value = verify_struct
else:
def verify_default(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
verify_value = verify_default
def verify(obj):
if not verify_nullability(obj):
verify_value(obj)
return verify
# This is used to unpickle a Row from JVM
def _create_row_inbound_converter(dataType):
return lambda *a: dataType.fromInternal(a)
def _create_row(fields, values):
row = Row(*values)
row.__fields__ = fields
return row
class Row(tuple):
"""
A row in L{DataFrame}.
The fields in it can be accessed:
* like attributes (``row.key``)
* like dictionary values (``row[key]``)
``key in row`` will search through row keys.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names. It is not allowed to omit
a named argument to represent the value is None or missing. This should be
explicitly set to None in this case.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row['name'], row['age']
('Alice', 11)
>>> row.name, row.age
('Alice', 11)
>>> 'name' in row
True
>>> 'wrong_key' in row
False
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row('name', 'age')>
>>> 'name' in Person
True
>>> 'wrong_key' in Person
False
>>> Person("Alice", 11)
Row(name='Alice', age=11)
This form can also be used to create rows as tuple values, i.e. with unnamed
fields. Beware that such Row objects have different equality semantics:
>>> row1 = Row("Alice", 11)
>>> row2 = Row(name="Alice", age=11)
>>> row1 == row2
False
>>> row3 = Row(a="Alice", b=11)
>>> row1 == row3
True
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if kwargs:
# create row objects
names = sorted(kwargs.keys())
row = tuple.__new__(self, [kwargs[n] for n in names])
row.__fields__ = names
row.__from_dict__ = True
return row
else:
# create row class or objects
return tuple.__new__(self, args)
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
def __contains__(self, item):
if hasattr(self, "__fields__"):
return item in self.__fields__
else:
return super(Row, self).__contains__(item)
# let object acts like class
def __call__(self, *args):
"""create new Row object"""
if len(args) > len(self):
raise ValueError("Can not create Row with fields %s, expected %d values "
"but got %s" % (self, len(self), args))
return _create_row(self, args)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
return super(Row, self).__getitem__(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return super(Row, self).__getitem__(idx)
except IndexError:
raise KeyError(item)
except ValueError:
raise ValueError(item)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key != '__fields__' and key != "__from_dict__":
raise Exception("Row is read-only")
self.__dict__[key] = value
def __reduce__(self):
"""Returns a tuple so Python knows how to pickle Row."""
if hasattr(self, "__fields__"):
return (_create_row, (self.__fields__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
"""Printable representation of Row used in Python REPL."""
if hasattr(self, "__fields__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__fields__, tuple(self)))
else:
return "<Row(%s)>" % ", ".join("%r" % field for field in self)
class DateConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.date)
def convert(self, obj, gateway_client):
Date = JavaClass("java.sql.Date", gateway_client)
return Date.valueOf(obj.strftime("%Y-%m-%d"))
class DatetimeConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.datetime)
def convert(self, obj, gateway_client):
Timestamp = JavaClass("java.sql.Timestamp", gateway_client)
seconds = (calendar.timegm(obj.utctimetuple()) if obj.tzinfo
else time.mktime(obj.timetuple()))
t = Timestamp(int(seconds) * 1000)
t.setNanos(obj.microsecond * 1000)
return t
# datetime is a subclass of date, we should register DatetimeConverter first
register_input_converter(DatetimeConverter())
register_input_converter(DateConverter())
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
elif type(dt) == StructType:
if any(type(field.dataType) == StructType for field in dt):
raise TypeError("Nested StructType not supported in conversion to Arrow")
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in dt]
arrow_type = pa.struct(fields)
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
elif types.is_struct(at):
if any(types.is_struct(field.type) for field in at):
raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at))
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in at])
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
def _get_local_timezone():
""" Get local timezone using pytz with environment variable, or dateutil.
If there is a 'TZ' environment variable, pass it to pandas to use pytz and use it as timezone
string, otherwise use the special word 'dateutil/:' which means that pandas uses dateutil and
it reads system configuration to know the system local timezone.
See also:
- https://github.com/pandas-dev/pandas/blob/0.19.x/pandas/tslib.pyx#L1753
- https://github.com/dateutil/dateutil/blob/2.6.1/dateutil/tz/tz.py#L1338
"""
import os
return os.environ.get('TZ', 'dateutil/:')
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
def _check_dataframe_localize_timestamps(pdf, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
for column, series in pdf.iteritems():
pdf[column] = _check_series_localize_timestamps(series, timezone)
return pdf
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
def _check_series_convert_timestamps_local_tz(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, None, timezone)
def _check_series_convert_timestamps_tz_local(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert from. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, timezone, None)
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
globs = globals()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession.builder.getOrCreate()
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
chenyyx/scikit-learn-doc-zh | examples/zh/ensemble/plot_adaboost_regression.py | 1 | 1600 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
======================================
使用 AdaBoost 的决策树回归
======================================
在具有少量高斯噪声的 1D 正弦数据集上使用 AdaBoost.R2 [1]_ 算法来提升决策树。
将 299 个提升(300 个决策树)与单个决策树回归比较。随着 boosts 的数量增加,回归器可以更多的细节。
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]> Joy yx <[email protected]>
#
# License: BSD 3 clause
# 导入一些必要的模块
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# 创建我们所需的数据集
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# 拟合回归模型
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# 预测数据
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# 绘制出我们拟合和预测之后的结果
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| gpl-3.0 |
evgchz/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
harshaneelhg/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
YuliaRubanova/latent_ode | lib/latent_ode.py | 1 | 4826 | ###########################
# Latent ODEs for Irregularly-Sampled Time Series
# Author: Yulia Rubanova
###########################
import numpy as np
import sklearn as sk
import numpy as np
#import gc
import torch
import torch.nn as nn
from torch.nn.functional import relu
import lib.utils as utils
from lib.utils import get_device
from lib.encoder_decoder import *
from lib.likelihood_eval import *
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.distributions import kl_divergence, Independent
from lib.base_models import VAE_Baseline
class LatentODE(VAE_Baseline):
def __init__(self, input_dim, latent_dim, encoder_z0, decoder, diffeq_solver,
z0_prior, device, obsrv_std = None,
use_binary_classif = False, use_poisson_proc = False,
linear_classifier = False,
classif_per_tp = False,
n_labels = 1,
train_classif_w_reconstr = False):
super(LatentODE, self).__init__(
input_dim = input_dim, latent_dim = latent_dim,
z0_prior = z0_prior,
device = device, obsrv_std = obsrv_std,
use_binary_classif = use_binary_classif,
classif_per_tp = classif_per_tp,
linear_classifier = linear_classifier,
use_poisson_proc = use_poisson_proc,
n_labels = n_labels,
train_classif_w_reconstr = train_classif_w_reconstr)
self.encoder_z0 = encoder_z0
self.diffeq_solver = diffeq_solver
self.decoder = decoder
self.use_poisson_proc = use_poisson_proc
def get_reconstruction(self, time_steps_to_predict, truth, truth_time_steps,
mask = None, n_traj_samples = 1, run_backwards = True, mode = None):
if isinstance(self.encoder_z0, Encoder_z0_ODE_RNN) or \
isinstance(self.encoder_z0, Encoder_z0_RNN):
truth_w_mask = truth
if mask is not None:
truth_w_mask = torch.cat((truth, mask), -1)
first_point_mu, first_point_std = self.encoder_z0(
truth_w_mask, truth_time_steps, run_backwards = run_backwards)
means_z0 = first_point_mu.repeat(n_traj_samples, 1, 1)
sigma_z0 = first_point_std.repeat(n_traj_samples, 1, 1)
first_point_enc = utils.sample_standard_gaussian(means_z0, sigma_z0)
else:
raise Exception("Unknown encoder type {}".format(type(self.encoder_z0).__name__))
first_point_std = first_point_std.abs()
assert(torch.sum(first_point_std < 0) == 0.)
if self.use_poisson_proc:
n_traj_samples, n_traj, n_dims = first_point_enc.size()
# append a vector of zeros to compute the integral of lambda
zeros = torch.zeros([n_traj_samples, n_traj,self.input_dim]).to(get_device(truth))
first_point_enc_aug = torch.cat((first_point_enc, zeros), -1)
means_z0_aug = torch.cat((means_z0, zeros), -1)
else:
first_point_enc_aug = first_point_enc
means_z0_aug = means_z0
assert(not torch.isnan(time_steps_to_predict).any())
assert(not torch.isnan(first_point_enc).any())
assert(not torch.isnan(first_point_enc_aug).any())
# Shape of sol_y [n_traj_samples, n_samples, n_timepoints, n_latents]
sol_y = self.diffeq_solver(first_point_enc_aug, time_steps_to_predict)
if self.use_poisson_proc:
sol_y, log_lambda_y, int_lambda, _ = self.diffeq_solver.ode_func.extract_poisson_rate(sol_y)
assert(torch.sum(int_lambda[:,:,0,:]) == 0.)
assert(torch.sum(int_lambda[0,0,-1,:] <= 0) == 0.)
pred_x = self.decoder(sol_y)
all_extra_info = {
"first_point": (first_point_mu, first_point_std, first_point_enc),
"latent_traj": sol_y.detach()
}
if self.use_poisson_proc:
# intergral of lambda from the last step of ODE Solver
all_extra_info["int_lambda"] = int_lambda[:,:,-1,:]
all_extra_info["log_lambda_y"] = log_lambda_y
if self.use_binary_classif:
if self.classif_per_tp:
all_extra_info["label_predictions"] = self.classifier(sol_y)
else:
all_extra_info["label_predictions"] = self.classifier(first_point_enc).squeeze(-1)
return pred_x, all_extra_info
def sample_traj_from_prior(self, time_steps_to_predict, n_traj_samples = 1):
# input_dim = starting_point.size()[-1]
# starting_point = starting_point.view(1,1,input_dim)
# Sample z0 from prior
starting_point_enc = self.z0_prior.sample([n_traj_samples, 1, self.latent_dim]).squeeze(-1)
starting_point_enc_aug = starting_point_enc
if self.use_poisson_proc:
n_traj_samples, n_traj, n_dims = starting_point_enc.size()
# append a vector of zeros to compute the integral of lambda
zeros = torch.zeros(n_traj_samples, n_traj,self.input_dim).to(self.device)
starting_point_enc_aug = torch.cat((starting_point_enc, zeros), -1)
sol_y = self.diffeq_solver.sample_traj_from_prior(starting_point_enc_aug, time_steps_to_predict,
n_traj_samples = 3)
if self.use_poisson_proc:
sol_y, log_lambda_y, int_lambda, _ = self.diffeq_solver.ode_func.extract_poisson_rate(sol_y)
return self.decoder(sol_y)
| mit |
rhshah/Miscellaneous | MakeMutationListForDownstreamAnalysis/makeComprehensiveMutationList.py | 1 | 16519 | '''
Created On : 06/01/2015
@author: Ronak H Shah
'''
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
from collections import defaultdict
import sys
from collections import Counter
from tabulate import tabulate
from cyordereddict import OrderedDict
from numpy import nan
import matplotlib.gridspec as gridspec
import scipy.spatial.distance as distance
import scipy.cluster.hierarchy as sch
import copy
import brewer2mpl
from gdata.data import GDFeed
def main():
parser = argparse.ArgumentParser(
prog='makeComprehensiveMutationList.py',
description='Write Per Patient Mutation in all samples and write heatmap files',
usage='%(prog)s [opFtions]')
parser.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
default=True,
help="make lots of noise [default]")
parser.add_argument(
"-m",
"--mutationFile",
action="store",
dest="mutationFile",
required=True,
metavar='ImpactVaraints.txt',
help="Location of mutation File")
parser.add_argument(
"-t",
"--titleFile",
action="store",
dest="infoFile",
required=True,
metavar='title_file.txt',
help="Location of the title File")
parser.add_argument(
"-o",
"--outputFilePrefix",
action="store",
dest="outFilePrefix",
required=True,
metavar='AnnotatedSV',
help="Full path with prefix name for the output file")
args = parser.parse_args()
(mutationsDF) = readExlFile(args.mutationFile)
(sampleinfoDF) = readTxtFile(args.infoFile)
(patientTosampleDF, sampleDF) = processSampleDF(sampleinfoDF)
(newMutationDF) = modifyMutationDF(mutationsDF.copy(), sampleinfoDF, patientTosampleDF, args)
WriteOutput(newMutationDF, "newMutations", args)
(sampleinfoDF) = readTxtFile(args.infoFile)
(patientTosampleDF, sampleDF) = processSampleDF(sampleinfoDF)
(geneLevelDF) = makeGeneLevelDF(mutationsDF.copy(), sampleinfoDF, patientTosampleDF, args)
WriteOutput(geneLevelDF, "GeneLevel", args)
(sampleinfoDF) = readTxtFile(args.infoFile)
(patientTosampleDF, sampleDF) = processSampleDF(sampleinfoDF)
makePateintLevelDF(mutationsDF.copy(), sampleinfoDF, patientTosampleDF, args)
(sampleinfoDF) = readTxtFile(args.infoFile)
(patientTosampleDF, sampleDF) = processSampleDF(sampleinfoDF)
#makePateintLevelDFForLichee(mutationsDF.copy(), sampleinfoDF, patientTosampleDF, args)
#geneLevelDFcopy = geneLevelDF.copy()
# makeHeatMaps(geneLevelDFcopy)
def readTxtFile(inFile):
df = pd.read_csv(inFile, sep='\t', header=0, keep_default_na='True')
print "Finished Reading Text File\n"
return(df)
def readExlFile(inFile):
df = pd.io.excel.read_excel(inFile, sheetname=0, keep_default_na='True')
print "Finished Reading Excel File\n"
return(df)
def processSampleDF(df):
groupsOfPatients = df.groupby('Patient_ID')
# Remove Normal Records
gDF = groupsOfPatients.apply(lambda g: g[g['Class'] == 'Tumor'])
# Remove Patients with only 1 sample
gDF = gDF.groupby('Patient_ID').filter(lambda g: len(g) > 1)
groupOfMultiSamplePatientsDF = gDF.groupby('Patient_ID')
groupOfSamplesDF = df.groupby('Sample_ID')
return(groupOfMultiSamplePatientsDF, groupOfSamplesDF)
def modifyMutationDF(mutationsDF, sampleDF, patientTosampleDF, args):
sampleuniqdict = OrderedDict()
names = mutationsDF.columns.values
newMutationDF = pd.DataFrame(columns=names)
indexForNewDF = 0
for count, row in mutationsDF.iterrows():
sampleName = str(row.loc['Sample'])
sampleIndex = int(sampleDF[sampleDF['Sample_ID'] == sampleName].index)
patientID = (sampleDF.iloc[sampleIndex]['Patient_ID'])
# print sampleIndex, type(sampleDF), str(patientID)
try:
patientTosampleGrp = patientTosampleDF.get_group(patientID)
except KeyError:
if(args.verbose):
print "The patient ID for the sample is not in the group:", patientID, "\n"
continue
NormalName = str(row.loc['NormalUsed'])
chr = str(row.loc['Chrom'])
start = str(row.loc['Start'])
ref = str(row.loc['Ref'])
alt = str(row.loc['Alt'])
key = patientID + ";" + chr + ";" + start + ";" + ref + ";" + alt
# print key,"\n"
if(key in sampleuniqdict):
if(args.verbose):
print "Data Already Processed for the mutation:", key, "\n"
continue
else:
for icount, irow in patientTosampleGrp.iterrows():
newrecord = row.copy()
gSampleID = str(irow.loc['Sample_ID'])
(dp, rd, ad, vf) = (str(row.loc[gSampleID])).split(";")
(n, dp) = dp.split("=")
(n, rd) = rd.split("=")
(n, ad) = ad.split("=")
(n, vf) = vf.split("=")
# print gSampleID,dp,rd,ad,vf,"\n"
newrecord.loc['Sample'] = gSampleID
newrecord.loc['T_TotalDepth'] = dp
newrecord.loc['T_RefCount'] = rd
newrecord.loc['T_AltCount'] = ad
newrecord.loc['T_AltFreq'] = vf
# print
# newrecord.loc['Sample'],newrecord.loc['T_TotalDepth'],newrecord.loc['T_AltFreq']
newMutationDF.loc[indexForNewDF] = newrecord
indexForNewDF = indexForNewDF + 1
# print newMutationDF
sampleuniqdict[key] = NormalName
return(newMutationDF)
def makeGeneLevelDF(mutationsDF, sampleDF, patientTosampleDF, args):
if(args.verbose):
print "Now running gene-level analysis\n"
sampleuniqdict = OrderedDict()
samplesToTraverse = sorted(sampleDF['Sample_ID'].tolist())
colsForDF = copy.copy(samplesToTraverse)
colsForDF.insert(0, "Gene-AminoAcid")
geneLevelDF = pd.DataFrame(columns=colsForDF)
gene_aa_dict = OrderedDict()
# print geneLevelDF,"\n"
geneIndex = 0
for count, row in mutationsDF.iterrows():
sampleName = str(row.loc['Sample'])
#if((sampleName == "s-FFPE-Pooled-Tumor") or (sampleName == "s-EV-crc-043-P2") or (sampleName == "s-EV-crc-070-M3") or (sampleName == "s-EV-crc-039-P3") or (sampleName == "s-EV-crc-036-P3") or (sampleName == "s-EV-crc-058-M6") or (sampleName == "s-EV-crc-034-P3")):
# print "Skipping:", sampleName,"\n"
# continue
geneName = str(row.loc['Gene'])
if((geneName == "PDGFRA") or (geneName == "MEN1")):
print "Skipping:", sampleName,"\n"
continue
callConfidence = str(row.loc['Call_Confidence'])
if(callConfidence != 'HIGH'):
print "Skipping:", sampleName,"\n"
continue
aachange = (str(row.loc['AAchange']))
cdnaChange = (str(row.loc['cDNAchange']))
NormalName = str(row.loc['NormalUsed'])
chr = str(row.loc['Chrom'])
start = str(row.loc['Start'])
ref = str(row.loc['Ref'])
alt = str(row.loc['Alt'])
comment = str(row.loc['Comments'])
if(aachange != "nan"):
(p, aachange) = (str(row.loc['AAchange'])).split(".")
else:
aachange = "NA"
if(cdnaChange != "nan"):
(p, cdnaChange) = (str(row.loc['cDNAchange'])).split(".")
else:
cdnaChange = "NA"
if(aachange == "NA"):
gene_aa = geneName + "-" + cdnaChange
else:
gene_aa = geneName + "-" + aachange
gene_aa_dict[gene_aa] = 0
key = gene_aa + ";" + chr + ";" + start + ";" + ref + ";" + alt
# print key,"\n"
if((key in sampleuniqdict)or ("Germline" in comment)):
if(args.verbose):
print "Data Already Processed for the mutation:", key, "\n"
continue
else:
valList = []
if(gene_aa in gene_aa_dict):
new_gene_aa = gene_aa + "-" + str(geneIndex)
gene_aa_dict[new_gene_aa] = gene_aa_dict[gene_aa]
valList.append(new_gene_aa)
for sampleName in samplesToTraverse:
(dp, rd, ad, vf) = (str(row.loc[sampleName])).split(";")
(n, dp) = dp.split("=")
(n, rd) = rd.split("=")
(n, ad) = ad.split("=")
(n, vf) = vf.split("=")
valList.append(vf)
sampleuniqdict[key] = NormalName
geneLevelDF.loc[geneIndex] = valList
geneIndex = geneIndex + 1
return(geneLevelDF)
def makePateintLevelDF(mutationsDF, sampleDF, patientTosampleDF, args):
if(args.verbose):
print "Making per patient heatmap Files\n"
#sampleuniqdict = OrderedDict()
for index,items in enumerate(patientTosampleDF):
pid = items[0]
df = items[1]
samplesToTraverse = sorted(df['Sample_ID'].tolist())
outsuffix = pid + "-" + "heatmapData"
colsForDF = copy.copy(samplesToTraverse)
colsForDF.insert(0, "Gene-AminoAcid")
geneLevelDF = pd.DataFrame(columns=colsForDF)
gene_aa_dict = OrderedDict()
geneIndex = 0
sampleuniqdict = OrderedDict()
for count, row in mutationsDF.iterrows():
sampleName = str(row.loc['Sample'])
if(sampleName in samplesToTraverse):
geneName = str(row.loc['Gene'])
if((geneName == "PDGFRA") or (geneName == "MEN1")):
print "Skipping:", sampleName,"\n"
continue
callConfidence = str(row.loc['Call_Confidence'])
if(callConfidence != 'HIGH'):
print "Skipping:", sampleName,"\n"
continue
aachange = (str(row.loc['AAchange']))
cdnaChange = (str(row.loc['cDNAchange']))
NormalName = str(row.loc['NormalUsed'])
comment = str(row.loc['Comments'])
chr = str(row.loc['Chrom'])
start = str(row.loc['Start'])
ref = str(row.loc['Ref'])
alt = str(row.loc['Alt'])
if(aachange != "nan"):
(p, aachange) = (str(row.loc['AAchange'])).split(".")
else:
aachange = "NA"
if(cdnaChange != "nan"):
(p, cdnaChange) = (str(row.loc['cDNAchange'])).split(".")
else:
cdnaChange = "NA"
if(aachange == "NA"):
gene_aa = geneName + "-" + cdnaChange
else:
gene_aa = geneName + "-" + aachange
gene_aa_dict[gene_aa] = 0
key = gene_aa + ";" + chr + ";" + start + ";" + ref + ";" + alt
# print key,"\n"
if((key in sampleuniqdict) or ("Germline" in comment)):
if(args.verbose):
print "Data Already Processed for the mutation:", key, "\n"
continue
else:
valList = []
if(gene_aa in gene_aa_dict):
new_gene_aa = gene_aa + "-" + str(geneIndex)
gene_aa_dict[new_gene_aa] = gene_aa_dict[gene_aa]
valList.append(new_gene_aa)
for sampleName in samplesToTraverse:
(dp, rd, ad, vf) = (str(row.loc[sampleName])).split(";")
(n, dp) = dp.split("=")
(n, rd) = rd.split("=")
(n, ad) = ad.split("=")
(n, vf) = vf.split("=")
valList.append(vf)
sampleuniqdict[key] = NormalName
geneLevelDF.loc[geneIndex] = valList
geneIndex = geneIndex + 1
WriteOutput(geneLevelDF, outsuffix, args)
return
def makePateintLevelDFForLichee(mutationsDF, sampleDF, patientTosampleDF, args):
if(args.verbose):
print "Making per patient Lichee Files\n"
#sampleuniqdict = OrderedDict()
for index,items in enumerate(patientTosampleDF):
pid = items[0]
df = items[1]
samplesToTraverse = sorted(df['Sample_ID'].tolist())
outsuffix = pid + "-" + "Lichee"
colsForDF = copy.copy(samplesToTraverse)
colsForDF.insert(0, "Normal")
colsForDF.insert(0, "EOA")
colsForDF.insert(0, "alt")
colsForDF.insert(0, "ref")
colsForDF.insert(0, "pos")
colsForDF.insert(0, "#chr")
geneLevelDF = pd.DataFrame(columns=colsForDF)
gene_aa_dict = OrderedDict()
geneIndex = 0
sampleuniqdict = OrderedDict()
for count, row in mutationsDF.iterrows():
sampleName = str(row.loc['Sample'])
if(sampleName in samplesToTraverse):
valList = []
geneName = str(row.loc['Gene'])
aachange = (str(row.loc['AAchange']))
cdnaChange = (str(row.loc['cDNAchange']))
NormalName = str(row.loc['NormalUsed'])
comment = str(row.loc['Comments'])
chr = str(row.loc['Chrom'])
start = str(row.loc['Start'])
ref = str(row.loc['Ref'])
alt = str(row.loc['Alt'])
valList.append(chr)
valList.append(start)
valList.append(ref)
valList.append(alt)
valList.append(geneName)
valList.append("0")
'''
if(aachange != "nan"):
(p, aachange) = (str(row.loc['AAchange'])).split(".")
else:
aachange = "NA"
if(cdnaChange != "nan"):
(p, cdnaChange) = (str(row.loc['cDNAchange'])).split(".")
else:
cdnaChange = "NA"
if(aachange == "NA"):
gene_aa = geneName + "-" + cdnaChange
else:
gene_aa = geneName + "-" + aachange
gene_aa_dict[gene_aa] = 0
'''
key = geneName + ";" + chr + ";" + start + ";" + ref + ";" + alt
# print key,"\n"
if((key in sampleuniqdict) or ("Germline" in comment)):
if(args.verbose):
print "Data Already Processed for the mutation:", key, "\n"
continue
else:
'''
if(gene_aa in gene_aa_dict):
new_gene_aa = gene_aa + "-" + str(geneIndex)
gene_aa_dict[new_gene_aa] = gene_aa_dict[gene_aa]
'''
for sampleName in samplesToTraverse:
(dp, rd, ad, vf) = (str(row.loc[sampleName])).split(";")
(n, dp) = dp.split("=")
(n, rd) = rd.split("=")
(n, ad) = ad.split("=")
(n, vf) = vf.split("=")
valList.append(vf)
sampleuniqdict[key] = NormalName
geneLevelDF.loc[geneIndex] = valList
geneIndex = geneIndex + 1
WriteOutput(geneLevelDF, outsuffix, args)
return
def makeHeatMaps(geneLevelDF):
axi = plt.imshow(geneLevelDF, interpolation='nearest', cmap=plt.cm.RdBu)
ax = axi.get_axes()
clean_axis(ax)
def WriteOutput(df, type, args):
outputTxt = args.outFilePrefix + "-" + type + ".txt"
outputExl = args.outFilePrefix + "-" + type + ".xlsx"
# Print to TSV file
df.to_csv(outputTxt, sep='\t', index=False)
# Print to Excel
df.to_excel(outputExl, sheet_name='Annotated_SVs', index=False)
return
# helper for cleaning up axes by removing ticks, tick labels, frame, etc.
def clean_axis(ax):
"""Remove ticks, tick labels, and frame from axis."""
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
for sp in ax.spines.values():
sp.set_visible(False)
if __name__ == "__main__":
start_time = time.time()
main()
end_time = time.time()
print("Elapsed time was %g seconds" % (end_time - start_time))
| apache-2.0 |
loli/semisupervisedforests | sklearn/neighbors/tests/test_nearest_centroid.py | 21 | 4207 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset, including sparse versions."""
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
"""Check consistency on dataset iris."""
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
"""Check consistency on dataset iris, when using shrinkage."""
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
"""Test that NearestCentroid gives same results on translated data"""
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
"""Test the manhattan metric."""
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
lttng/lttng-ci | lava/rootfs/vmdeboostrap/generate-root.py | 2 | 3606 | #!/usr/bin/python3
# Copyright (C) 2018 - Jonathan Rajotte-Julien <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import gzip
import os
import shutil
import subprocess
from datetime import datetime
def compress(filename):
with open(filename, 'rb') as f_in:
with gzip.open('{}.gz'.format(filename), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(filename)
packages = [
'autoconf',
'automake',
'bash-completion',
'bison',
'bsdtar',
'build-essential',
'chrpath',
'clang',
'cloc',
'cppcheck',
'curl',
'elfutils',
'flex',
'gettext',
'git',
'htop',
'jq',
'libdw-dev',
'libelf-dev',
'libffi-dev',
'libglib2.0-dev',
'libmount-dev',
'libnuma-dev',
'libpfm4-dev',
'libpopt-dev',
'libtap-harness-archive-perl',
'libtool',
'libxml2',
'libxml2-dev',
'netcat-traditional',
'openssh-server',
'psmisc',
'python-virtualenv',
'python3',
'python3-dev',
'python3-numpy',
'python3-pandas',
'python3-pip',
'python3-setuptools',
'python3-sphinx',
'stress',
'swig',
'texinfo',
'tree',
'uuid-dev',
'vim',
'wget',
]
def main():
parser = argparse.ArgumentParser(description='Generate lava lttng rootfs')
parser.add_argument("--arch", default='amd64')
# We are using xenial instead of bionic ++ since some syscall test depends
# on cat and the libc to use the open syscall. In recent libc openat is
# used. See these commit in lttng-tools that helps with the problem:
# c8e51d1559c48a12f18053997bbcff0c162691c4
# 192bd8fb712659b9204549f29d9a54dc2c57a9e
# These are only part of 2.11 and were not backported since they do not
# represent a *problem* per se.
parser.add_argument("--distribution", default='xenial')
parser.add_argument("--mirror", default='http://archive.ubuntu.com/ubuntu')
parser.add_argument(
"--component", default='universe,multiverse,main,restricted')
args = parser.parse_args()
name = "rootfs_{}_{}_{}.tar".format(args.arch, args.distribution,
datetime.now().strftime("%Y-%m-%d"))
hostname = "linaro-server"
user = "linaro/linaro"
root_password = "root"
print(name)
command = [
"sudo",
"vmdebootstrap",
"--arch={}".format(args.arch),
"--distribution={}".format(args.distribution),
"--mirror={}".format(args.mirror),
"--debootstrapopts=components={}".format(args.component),
"--tarball={}".format(name),
"--package={}".format(",".join(packages)),
"--hostname={}".format(hostname),
"--user={}".format(user),
"--root-password={}".format(root_password),
"--no-kernel",
"--verbose",
]
completed_command = subprocess.run(command, check=True)
compress(name)
if __name__ == "__main__":
main()
| gpl-2.0 |
pcm17/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 85 | 4704 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if v is None:
del self._columns[k]
elif isinstance(v, Series):
self._columns[k] = v
elif isinstance(v, Transform) and v.input_valency() == 0:
self._columns[k] = v()
else:
raise TypeError(
"Column in assignment must be an inflow.Series, inflow.Transform,"
" or None; got type '%s'." % type(v).__name__)
def select_columns(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def exclude_columns(self, exclude_keys):
"""Returns a new DataFrame with all columns not excluded via exclude_keys.
Args:
exclude_keys: A list of strings. Each should be the name of a column in
the DataFrame. These columns will be excluded from the result.
Returns:
A new DataFrame containing all columns except those specified.
"""
result = type(self)()
for key, value in self._columns.items():
if key not in exclude_keys:
result[key] = value
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def __delitem__(self, key):
if isinstance(key, str):
key = [key]
value = [None for _ in key]
self.assign(**dict(zip(key, value)))
def build(self, **kwargs):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache, **kwargs)
for name, c in self._columns.items()}
return tensors
| apache-2.0 |
ThunderShiviah/digits | digits/data_io.py | 1 | 1545 | import csv
import json
import os
import pandas as pd
import pickle
import numpy as np
def get_paths():
paths = json.loads(open("SETTINGS.json").read())
for key in paths:
paths[key] = os.path.expandvars(paths[key])
return paths
def identity(x):
return x
# For pandas >= 10.1 this will trigger the columns to be parsed as strings
converters = { "FullDescription" : identity
, "Title": identity
, "LocationRaw": identity
, "LocationNormalized": identity
}
def get_train_df():
train_path = get_paths()["train_data_path"]
return pd.read_csv(train_path, converters=converters)
def get_valid_df():
valid_path = get_paths()["valid_data_path"]
return pd.read_csv(valid_path, converters=converters)
def save_model(model):
out_path = get_paths()["model_path"]
pickle.dump(model, open(out_path, "wb"))
def load_model():
in_path = get_paths()["model_path"]
return pickle.load(open(in_path, "rb"))
def write_submission(predictions):
prediction_path = get_paths()["prediction_path"]
writer = csv.writer(open(prediction_path, "w"), lineterminator="\n")
valid = get_valid_df()
rows = [x for x in zip(valid.index + 1, predictions.flatten().astype(int))] # id starts at 1 not 0.
writer.writerow(("ImageId", "Label"))
writer.writerows(rows)
"""
print('saving predictions to {path}'.format(path=prediction_path))
np.savetxt(prediction_path, predictions, delimiter=',')
print('prediction saved.')
"""
| mit |
flightgong/scikit-learn | sklearn/feature_selection/rfe.py | 1 | 14108 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_arrays, safe_sqr
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from .base import SelectorMixin
from ..metrics.scorer import check_scoring
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
Attributes
----------
`n_features_` : int
The number of selected features.
`support_` : array of shape [n_features]
The mask of selected features.
`ranking_` : array of shape [n_features]
The feature ranking, such that `ranking_[i]` corresponds to the \
ranking position of the i-th feature. Selected (i.e., estimated \
best) features are assigned rank 1.
`estimator_` : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params={}, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
X, y = check_arrays(X, y, sparse_format="csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(self.step * n_features)
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
if estimator.coef_.ndim > 1:
ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(estimator.coef_))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, support_], y)
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
`n_features_` : int
The number of selected features with cross-validation.
`support_` : array of shape [n_features]
The mask of selected features.
`ranking_` : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
`grid_scores_` : array of shape [n_subsets_of_features]
The cross-validation scores such that
`grid_scores_[i]` corresponds to
the CV score of the i-th subset of features.
`estimator_` : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
loss_func=None, estimator_params={}, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.loss_func = loss_func
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_arrays(X, y, sparse_format="csr")
# Initialization
rfe = RFE(estimator=self.estimator, n_features_to_select=1,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring,
loss_func=self.loss_func)
scores = np.zeros(X.shape[1])
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
# Compute a full ranking of the features
ranking_ = rfe.fit(X_train, y_train).ranking_
# Score each subset of features
for k in range(0, max(ranking_)):
mask = np.where(ranking_ <= k + 1)[0]
estimator = clone(self.estimator)
estimator.fit(X_train[:, mask], y_train)
score = _score(estimator, X_test[:, mask], y_test, scorer)
if self.verbose > 0:
print("Finished fold with %d / %d feature ranks, score=%f"
% (k, max(ranking_), score))
scores[k] += score
# Pick the best number of features on average
k = np.argmax(scores)
best_score = scores[k]
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=k+1,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
self.grid_scores_ = scores / n
return self
| bsd-3-clause |
equialgo/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
sunyihuan326/DeltaLab | shuwei_fengge/practice_four/Style/accuracy.py | 1 | 4111 | # coding:utf-8
'''
Created on 2018/1/4.
@author: chk01
'''
from sklearn.metrics import confusion_matrix, classification_report
from practice_four.utils import *
from collections import Counter
import numpy
outline_parameters = scio.loadmat('best_parameter/outline64x64_parameter-2500.mat')
outline_2classes_parameters = scio.loadmat('best_parameter/outline64x64-2classes_parameter-4000.mat')
sense_parameters = scio.loadmat('best_parameter/sense64_parameter-5500.mat')
sense_2classes_parameters = scio.loadmat('best_parameter/sense64_02_parameter-2000.mat')
LabelToOutline = [0, 0, 0, 1, 1, 1, 2, 2, 2]
LabelToSense = [0, 1, 2, 0, 1, 2, 0, 1, 2]
def get_outline64(trX):
W = outline_parameters['W1']
b = outline_parameters['b1']
Z = np.add(np.matmul(trX, W.T), b)
return np.squeeze(np.argmax(Z, 1))
def get_outline64_2classes(trX):
W = outline_2classes_parameters['W1']
b = outline_2classes_parameters['b1']
Z = np.add(np.matmul(trX, W.T), b)
return np.squeeze(np.argmax(Z, 1))
def get_sense64(trX):
W = sense_parameters['W1']
b = sense_parameters['b1']
Z = np.add(np.matmul(trX, W.T), b)
return np.squeeze(np.argmax(Z, 1))
def get_sense64_2classes(trX):
W = sense_2classes_parameters['W1']
b = sense_2classes_parameters['b1']
Z = np.add(np.matmul(trX, W.T), b)
return np.squeeze(np.argmax(Z, 1))
def report(y_true, y_pred, typ):
print('---------------', str(typ).upper(), '----------------')
res = classification_report(y_true=y_true, y_pred=y_pred)
print(res)
def analysis(trY, style):
m = len(trY)
train_res_matrix = confusion_matrix(y_true=trY, y_pred=style)
print(train_res_matrix)
correct = 0
error = 0
for i in range(m):
if style[i] in accept_ans[trY[i]]:
correct += 1
elif style[i] in absolute_error[trY[i]]:
error += 1
print('准确率:', round(train_res_matrix.trace() / m, 2))
print('可接受率:', round(correct / m, 2))
print('原则性错误率:', round(error / m, 2))
for i in range(9):
num = np.sum(train_res_matrix[i, :])
acc = round((train_res_matrix[i, i]) / num, 2)
accept_num = 0
err_num = 0
for j in accept_ans[i]:
accept_num += train_res_matrix[i, j]
for k in absolute_error[i]:
err_num += train_res_matrix[i, k]
accept = round(accept_num / num, 2)
err = round(err_num / num, 2)
print('输入--------', i, '--------------')
print('准确率:', acc, '|可接受率:', accept, '|原则性错误率:', err)
print('----------------------------------------------------')
# c = Counter(style)
# c.most_common()
# print(c)
print("predict result:")
for i in range(9):
print(str(i) + "比例", round(100 * list(style).count(i) / len(list(style)), 2), "%")
print("true result:")
for i in range(9):
print(str(i) + "比例", round(100 * list(trY).count(i) / len(list(trY)), 2), "%")
def main():
file = 'data/style64x64.mat'
X_train_org, X_test_org, Y_train_org, Y_test_org = load_data(file, test_size=0.2)
trX = X_test_org
trY = np.argmax(Y_test_org, 1)
cor_outline = [LabelToOutline[l] for l in trY]
cor_sense = [LabelToSense[ll] for ll in trY]
sense = get_sense64(trX / 255.)
outline = get_outline64(trX / 255.)
sense_2 = get_sense64_2classes(trX / 255.)
outline_2 = get_outline64_2classes(trX / 255.)
report(cor_outline, outline, 'outline')
report(cor_sense, sense, 'sense')
style = 3 * outline + sense
outline_merge = outline.copy()
dif_idx = np.flatnonzero((outline - outline_2 * 2) != 0)
outline_merge[dif_idx] = 1
report(cor_outline, outline_merge, 'outline-merge')
sense_merge = sense.copy()
dif_idx = np.flatnonzero((sense - sense_2 * 2) != 0)
sense_merge[dif_idx] = 1
report(cor_sense, sense_merge, 'sense-merge')
style_2 = 3 * outline_merge + sense_merge
analysis(trY, style_2)
return True
if __name__ == '__main__':
main()
| mit |
kambysese/mne-python | examples/connectivity/plot_mixed_source_space_connectivity.py | 9 | 7046 | """
===============================================================================
Compute mixed source space connectivity and visualize it using a circular graph
===============================================================================
This example computes the all-to-all connectivity between 75 regions in a
mixed source space based on dSPM inverse solutions and a FreeSurfer cortical
parcellation. The connectivity is visualized using a circular graph which
is ordered based on the locations of the regions in the axial plane.
"""
# Author: Annalisa Pascarella <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import setup_volume_source_space, setup_source_space
from mne import make_forward_solution
from mne.io import read_raw_fif
from mne.minimum_norm import make_inverse_operator, apply_inverse_epochs
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
# Set directories
data_path = sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_raw = data_dir + '/sample_audvis_filt-0-40_raw.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_cov = data_dir + '/ernoise-cov.fif'
fname_event = data_dir + '/sample_audvis_filt-0-40_raw-eve.fif'
# List of sub structures we are interested in. We select only the
# sub structures we want to include in the source space
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
# Setup a surface-based source space, oct5 is not very dense (just used
# to speed up this example; we recommend oct6 in actual analyses)
src = setup_source_space(subject, subjects_dir=subjects_dir,
spacing='oct5', add_dist=False)
# Setup a volume source space
# set pos=10.0 for speed, not very accurate; we recommend something smaller
# like 5.0 in actual analyses:
vol_src = setup_volume_source_space(
subject, mri=fname_aseg, pos=10.0, bem=fname_model,
add_interpolator=False, # just for speed, usually use True
volume_label=labels_vol, subjects_dir=subjects_dir)
# Generate the mixed source space
src += vol_src
# Load data
raw = read_raw_fif(fname_raw)
raw.pick_types(meg=True, eeg=False, eog=True, stim=True).load_data()
events = mne.find_events(raw)
noise_cov = mne.read_cov(fname_cov)
# compute the fwd matrix
fwd = make_forward_solution(raw.info, fname_trans, src, fname_bem,
mindist=5.0) # ignore sources<=5mm from innerskull
del src
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
reject = dict(mag=4e-12, grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
reject=reject, preload=False)
del raw
# Compute inverse solution and for each epoch
snr = 1.0 # use smaller SNR for raw data
inv_method = 'dSPM'
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
# Compute inverse operator
inverse_operator = make_inverse_operator(
epochs.info, fwd, noise_cov, depth=None, fixed=False)
del fwd
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, inv_method,
pick_ori=None, return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(subject, parc=parc,
subjects_dir=subjects_dir)
# Average the source estimates within each label of the cortical parcellation
# and each sub structures contained in the src space
# If mode = 'mean_flip' this option is used only for the cortical label
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(
stcs, labels_parc, src, mode='mean_flip', allow_empty=True,
return_generator=True)
# We compute the connectivity in the alpha band and plot it using a circular
# graph layout
fmin = 8.
fmax = 13.
sfreq = epochs.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# We create a list of Label containing also the sub structures
labels_aseg = mne.get_volume_labels_from_src(src, subject, subjects_dir)
labels = labels_parc + labels_aseg
# read colors
node_colors = [label.color for label in labels]
# We reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
rh_labels = [name for name in label_names if name.endswith('rh')]
# Get the y-location of the label
label_ypos_lh = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos_lh.append(ypos)
try:
idx = label_names.index('Brain-Stem')
except ValueError:
pass
else:
ypos = np.mean(labels[idx].pos[:, 1])
lh_labels.append('Brain-Stem')
label_ypos_lh.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos_lh, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels
if label != 'Brain-Stem' and label[:-2] + 'rh' in rh_labels]
# Save the plot order
node_order = lh_labels[::-1] + rh_labels
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) // 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
conmat = con[:, :, 0]
fig = plt.figure(num=None, figsize=(8, 8), facecolor='black')
plot_connectivity_circle(conmat, label_names, n_lines=300,
node_angles=node_angles, node_colors=node_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)', fig=fig)
###############################################################################
# Save the figure (optional)
# --------------------------
#
# By default matplotlib does not save using the facecolor, even though this was
# set when the figure was generated. If not set via savefig, the labels, title,
# and legend will be cut off from the output png file::
#
# >>> fname_fig = data_path + '/MEG/sample/plot_mixed_connect.png'
# >>> plt.savefig(fname_fig, facecolor='black')
| bsd-3-clause |
quheng/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
hitszxp/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 28 | 10014 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
""" Check that the sparse_coef propery works """
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
""" Check that the normalize option in enet works """
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
"""Check that the sparse lasso can handle zero data without crashing"""
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
"""Test ElasticNet for various values of alpha and l1_ratio with list X"""
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
"""Test ElasticNet for various values of alpha and l1_ratio with sparse
X"""
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
backmari/moose | python/peacock/ExodusViewer/plugins/OutputPlugin.py | 1 | 2057 | import sys
import peacock
from PyQt5 import QtWidgets
from ExodusPlugin import ExodusPlugin
class OutputPlugin(peacock.base.OutputWidgetBase, ExodusPlugin):
"""
Plugin responsible for triggering the creation of png/py files and live script viewing.
"""
def __init__(self):
super(OutputPlugin, self).__init__()
self.MainLayout.addStretch()
self.setup()
def repr(self):
"""
Return matplotlib scripting information.
"""
window_options, window_sub_options = self._window.options().toScriptString()
output = dict()
output['window'] = ['window = chigger.RenderWindow(result)']
output['window'] += ['window.setOptions({})'.format(', '.join(window_options))]
for key, value in window_sub_options.iteritems():
output['window'] += ['window.setOptions({}, {})'.format(repr(key), ', '.join(value))]
output['window'] += ['window.start()']
return output
def onCameraChanged(self, *args):
"""
Slot that is called when the camera is changed.
"""
self.updateLiveScriptText()
def onWindowUpdated(self):
"""
Slot called when the window is changed.
"""
self.updateLiveScriptText()
def _setupPDFButton(self, qobject):
"""
Remove the PDF button.
"""
qobject.setVisible(False)
def main(size=None):
"""
Run the ContourPlugin all by its lonesome.
"""
from peacock.ExodusViewer.ExodusPluginManager import ExodusPluginManager
from peacock.ExodusViewer.plugins.VTKWindowPlugin import VTKWindowPlugin
widget = ExodusPluginManager(plugins=[lambda: VTKWindowPlugin(size=size), OutputPlugin])
widget.show()
return widget, widget.VTKWindowPlugin
if __name__ == '__main__':
from peacock.utils import Testing
app = QtWidgets.QApplication(sys.argv)
filename = Testing.get_chigger_input('mug_blocks_out.e')
widget, window = main()
window.initialize([filename])
sys.exit(app.exec_())
| lgpl-2.1 |
rerpy/rerpy | sandbox/eeglab.py | 1 | 7997 | # This file is part of pyrerp
# Copyright (C) 2013 Nathaniel Smith <[email protected]>
# See file COPYING for license information.
import os.path
import numpy as np
from scipy.io import loadmat
import pandas
from pyrerp.data import ElectrodeInfo, RecordingInfo, ContinuousData
from pyrerp.events import Events
__all__ = ["EEGLABError", "load_eeglab"]
class EEGLABError(Exception):
pass
# Format documentation:
# http://sccn.ucsd.edu/wiki/A05:_Data_Structures
# https://sccn.ucsd.edu/svn/software/eeglab/functions/adminfunc/eeg_checkset.m
# https://sccn.ucsd.edu/svn/software/eeglab/functions/popfunc/pop_loadset.m
# Events: boundaries, durations, epochs, etc.:
# http://sccn.ucsd.edu/wiki/Chapter_03:_Event_Processing
#
# pop_loadset does mat-file reading, sanity checking, and data pathname
# manipulation
# eeg_checkset(EEG, 'loaddata') is used to actually load the data
# which calls eeg_getdatact(EEG)
def chanlocs_array(EEG, key):
return [entry.squeeze()[()]
for entry in EEG["chanlocs"][key].squeeze()]
def extract_electrode_info(EEG):
# str() ensures that we get ascii on python2, unicode on python3
channel_names = [str(label) for label in chanlocs_array(EEG, "labels")]
thetas = np.asarray(chanlocs_array(EEG, "theta"), dtype=float)
rs = np.asarray(chanlocs_array(EEG, "radius"), dtype=float)
electrodes = ElectrodeInfo(channel_names, thetas, rs)
def extract_data_matrix(EEG, set_filename, dtype):
# Right, so, EEGLAB. It's... fun.
#
# .data is an attribute which can have different values:
# -- "EEGDATA" (case insensitive): means that the data's in the .EEGDATA
# attribute
# -- any other string: means that the data is in a file whose name is
# exactly like the name of *the .set file*, except that the last few
# characters of the .data attribute (either .fdt or .dat, case
# insensitive again) determine the order the data is stored in.
# -- or it can just contain the actual data
#
# EEGLAB's data loading routines represent the data as a matrix with
# dimensions
# [nbchan, pnts * trials]
# which really means
# [nbchan, pnts, trials]
# (where "trial" means "epoch", and trials==1 means continuous).
#
# Now, there are two things that make handling the data layout
# complicated. First, EEGLAB sort of makes a mess of it. MATLAB uses
# "Fortran" (column-major) ordering by default, and EEGLAB follows this
# convention. So when going from
# [nbchan, pnts * trials]
# to
# [nbchan, pnts, trials]
# we have to remember that we're using fortran order to divide up the
# second component. When it comes to actually storing the files, EEGLAB
# uses two conventions:
# FDT files:
# [nbchan, pnts * trials] stored in memory (fortran) order
# DAT files:
# [nbchan, pnts * trials] stored in transposed (C) order
# (But remember that even in DAT files, the pnts * trials part is in
# fortran order, i.e., this files have *mixed* order when considered as a
# 3-d [nbchan, pnts, trials] array.)
#
# The second complication is that numpy by default uses the opposite
# convention for storing arrays -- the default is "C" (row-major)
# ordering. Fortunately though it is very flexible and can handle
# arbitrary orderings, even mixed ones; we just have to be careful to
# specify what we're talking about when we set things up in the first
# place.
#
# This function always returns an array with shape
# (nbchan, pnts, trials)
# If you request a little-endian float32 dtype, then the returned value
# will be a view onto a read-only memory mapping of the file. Otherwise it
# will be loaded into memory and converted to the requested dtype.
nbchan = EEG["nbchan"].item()
pnts = EEG["pnts"].item()
trials = EEG["trials"].item()
if np.issubdtype(EEG["data"], np.character):
data_str = str(EEG["data"].item())
if data_str.lower() == "eegdata":
data = EEG["EEGDATA"]
else:
base_path, _ = os.path.splitext(set_filename)
_, ext = os.path.splitext(data_str)
data_path = base_path + ext
if ext.lower() == ".dat":
order = "C"
else:
order = "F"
data = np.memmap(data_path, "r", dtype="<f4", order=order,
shape=(nbchan, pnts * trials))
else:
data = EEG["data"]
# Now 'data' is an array with unknown dtype and shape
# (nbchan, pnts * trials)
# We now want to reshape this to an array with shape
# (nbchan, pnts, trials)
# *using Fortran rules* to determine the relationship between these array
# layouts, *regardless* of the actual underlying memory layout.
# Fortunately numpy makes this easy.
data.resize((nbchan, pnts, trials), order="F")
# And finally, require that the data be of the specified type. This passes
# through compatible arrays unchanged, or otherwise creates a new array of
# the specified type and casts the data into it:
data = np.asarray(data, dtype=dtype)
return data
def load_eeglab(set_filename, dtype=np.float64):
# Read the .mat file
contents = loadmat(set_filename)
if "EEG" not in contents:
if "ALLEEG" in contents:
raise EEGLABError("reading of multi-set files is not implemented "
" -- patches gratefully accepted")
else:
raise EEGLABError("no 'EEG' variable found in matlab file")
EEG = contents["EEG"][0, 0]
srate = EEG["srate"][0, 0]
units = "??"
electrodes = extract_electrode_info(EEG)
# General metadata:
metadata = {}
for key in ["setname", "filename", "filepath", "comments", "etc",
"subject", "group", "condition", "session", "ref",
"icasphere", "icaweights", "icawinv"]:
if key in EEG.dtype.names and EEG[key].size > 0:
if np.issubdtype(EEG[key], np.character_):
metadata[key] = EEG[key][0]
else:
metadata[key] = EEG[key]
recording_info = RecordingInfo(srate, units, electrodes, metadata)
data = extract_data_matrix(EEG, set_filename, dtype)
data = data.T
(num_epochs, num_channels, num_samples) = data.shape
if num_epochs != 1:
raise EEGLABError("reading of epoched data is not implemented "
" -- patches gratefully accepted")
assert num_epochs == 1
data.resize(data.shape[1:])
# Events
# type (string) and latency (int) are the main ones
# type == "boundary" is special
# duration is length of removed data if data was removed (generally 0,
# or NaN for breaks between concatenated datasets)
# usually only for type == "boundary"
# "latency" is a floating point number, which refers to a 1-based index in
# the data array. It's floating point because for some events, like
# "boundary" events, they actually place the event *in between* two frames
# -- so if you have two data sets
# [10, 11, 12]
# and
# [17, 18, 19]
# then the concatenated set is
# [10, 11, 12, 17, 18, 19]
# with the 'latency' of the boundary event set to 3.5.
zero_based_boundaries = []
for eeglab_event in EEG["event"].ravel():
if eeglab_event["type"].item() == "boundary":
zero_based_boundaries.append(eeglab_event["latency"] - 1)
name_index = [metadata["setname"]] * num_samples
era_index = np.empty(num_samples, dtype=int)
#time_index = np.empty(num_samples, dtype=
ev = Events((str, int, int))
if num_epochs == 1:
# Continuous data
# XX
pass
else:
# Epoched data
# EEG.xmin, EEG.xmax = start/end latency of epoch in seconds
# EEG.epoch == ?
# XX
pass
| gpl-2.0 |
vidartf/hyperspy | hyperspy/drawing/_markers/rectangle.py | 1 | 3327 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class Rectangle(MarkerBase):
"""Rectangle marker that can be added to the signal figure
Parameters
---------
x1: array or float
The position of the up left corner of the rectangle in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the nagivation axes.
y1: array or float
The position of the up left corner of the rectangle in y.
see x1 arguments
x2: array or float
The position of the down right corner of the rectangle in x.
see x1 arguments
y2: array or float
The position of the down right of the rectangle in y.
see x1 arguments
kwargs:
Kewywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> import scipy.misc
>>> im = hs.signals.Signal2D(scipy.misc.ascent())
>>> m = hs.plot.markers.rectangle(x1=150, y1=100, x2=400, y2=400,
>>> color='red')
>>> im.add_marker(m)
"""
def __init__(self, x1, y1, x2, y2, **kwargs):
MarkerBase.__init__(self)
lp = {'color': 'black', 'fill': None, 'linewidth': 1}
self.marker_properties = lp
self.set_data(x1=x1, y1=y1, x2=x2, y2=y2)
self.set_marker_properties(**kwargs)
def update(self):
if self.auto_update is False:
return
self.marker.set_xdata([self.get_data_position('x1'),
self.get_data_position('x2')])
self.marker.set_ydata([self.get_data_position('y1'),
self.get_data_position('y2')])
def plot(self):
if self.ax is None:
raise AttributeError(
"To use this method the marker needs to be first add to a " +
"figure using `s._plot.signal_plot.add_marker(m)` or " +
"`s._plot.navigator_plot.add_marker(m)`")
width = abs(self.get_data_position('x1') -
self.get_data_position('x2'))
height = abs(self.get_data_position('y1') -
self.get_data_position('y2'))
self.marker = self.ax.add_patch(plt.Rectangle(
(self.get_data_position('x1'), self.get_data_position('y1')),
width, height, **self.marker_properties))
self.marker.set_animated(True)
try:
self.ax.hspy_fig._draw_animated()
except:
pass
| gpl-3.0 |
apache/beam | sdks/python/apache_beam/dataframe/frames.py | 3 | 174092 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analogs for :class:`pandas.DataFrame` and :class:`pandas.Series`:
:class:`DeferredDataFrame` and :class:`DeferredSeries`.
These classes are effectively wrappers around a `schema-aware`_
:class:`~apache_beam.pvalue.PCollection` that provide a set of operations
compatible with the `pandas`_ API.
Note that we aim for the Beam DataFrame API to be completely compatible with
the pandas API, but there are some features that are currently unimplemented
for various reasons. Pay particular attention to the **'Differences from
pandas'** section for each operation to understand where we diverge.
.. _schema-aware:
https://beam.apache.org/documentation/programming-guide/#what-is-a-schema
.. _pandas:
https://pandas.pydata.org/
"""
import collections
import inspect
import itertools
import math
import re
import warnings
from typing import List
from typing import Optional
import numpy as np
import pandas as pd
from pandas.core.groupby.generic import DataFrameGroupBy
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frame_base
from apache_beam.dataframe import io
from apache_beam.dataframe import partitionings
__all__ = [
'DeferredSeries',
'DeferredDataFrame',
]
def populate_not_implemented(pd_type):
def wrapper(deferred_type):
for attr in dir(pd_type):
# Don't auto-define hidden methods or dunders
if attr.startswith('_'):
continue
if not hasattr(deferred_type, attr):
pd_value = getattr(pd_type, attr)
if isinstance(pd_value, property) or inspect.isclass(pd_value):
# Some of the properties on pandas types (cat, dt, sparse), are
# actually attributes with class values, not properties
setattr(
deferred_type,
attr,
property(
frame_base.not_implemented_method(attr, base_type=pd_type)))
elif callable(pd_value):
setattr(
deferred_type,
attr,
frame_base.not_implemented_method(attr, base_type=pd_type))
return deferred_type
return wrapper
def _fillna_alias(method):
def wrapper(self, *args, **kwargs):
return self.fillna(*args, method=method, **kwargs)
wrapper.__name__ = method
wrapper.__doc__ = (
f'{method} is only supported for axis="columns". '
'axis="index" is order-sensitive.')
return frame_base.with_docs_from(pd.DataFrame)(
frame_base.args_to_kwargs(pd.DataFrame)(
frame_base.populate_defaults(pd.DataFrame)(wrapper)))
LIFTABLE_AGGREGATIONS = ['all', 'any', 'max', 'min', 'prod', 'sum']
LIFTABLE_WITH_SUM_AGGREGATIONS = ['size', 'count']
UNLIFTABLE_AGGREGATIONS = [
'mean',
'median',
'quantile',
'describe',
# TODO: The below all have specialized distributed
# implementations, but they require tracking
# multiple intermediate series, which is difficult
# to lift in groupby
'std',
'var',
'corr',
'cov',
'nunique'
]
ALL_AGGREGATIONS = (
LIFTABLE_AGGREGATIONS + LIFTABLE_WITH_SUM_AGGREGATIONS +
UNLIFTABLE_AGGREGATIONS)
def _agg_method(base, func):
def wrapper(self, *args, **kwargs):
return self.agg(func, *args, **kwargs)
if func in UNLIFTABLE_AGGREGATIONS:
wrapper.__doc__ = (
f"``{func}`` cannot currently be parallelized. It will "
"require collecting all data on a single node.")
wrapper.__name__ = func
return frame_base.with_docs_from(base)(wrapper)
# Docstring to use for head and tail (commonly used to peek at datasets)
_PEEK_METHOD_EXPLANATION = (
"because it is `order-sensitive "
"<https://s.apache.org/dataframe-order-sensitive-operations>`_.\n\n"
"If you want to peek at a large dataset consider using interactive Beam's "
":func:`ib.collect "
"<apache_beam.runners.interactive.interactive_beam.collect>` "
"with ``n`` specified, or :meth:`sample`. If you want to find the "
"N largest elements, consider using :meth:`DeferredDataFrame.nlargest`.")
class DeferredDataFrameOrSeries(frame_base.DeferredFrame):
def _render_indexes(self):
if self.index.nlevels == 1:
return 'index=' + (
'<unnamed>' if self.index.name is None else repr(self.index.name))
else:
return 'indexes=[' + ', '.join(
'<unnamed>' if ix is None else repr(ix)
for ix in self.index.names) + ']'
__array__ = frame_base.wont_implement_method(
pd.Series, '__array__', reason="non-deferred-result")
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def drop(self, labels, axis, index, columns, errors, **kwargs):
"""drop is not parallelizable when dropping from the index and
``errors="raise"`` is specified. It requires collecting all data on a single
node in order to detect if one of the index values is missing."""
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
if axis in (0, 'index'):
index = labels
columns = None
elif axis in (1, 'columns'):
index = None
columns = labels
else:
raise ValueError(
"axis must be one of (0, 1, 'index', 'columns'), "
"got '%s'" % axis)
if columns is not None:
# Compute the proxy based on just the columns that are dropped.
proxy = self._expr.proxy().drop(columns=columns, errors=errors)
else:
proxy = self._expr.proxy()
if index is not None and errors == 'raise':
# In order to raise an error about missing index values, we'll
# need to collect the entire dataframe.
# TODO: This could be parallelized by putting index values in a
# ConstantExpression and partitioning by index.
requires = partitionings.Singleton(
reason=(
"drop(errors='raise', axis='index') is not currently "
"parallelizable. This requires collecting all data on a single "
f"node in order to detect if one of {index!r} is missing."))
else:
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'drop',
lambda df: df.drop(
axis=axis,
index=index,
columns=columns,
errors=errors,
**kwargs), [self._expr],
proxy=proxy,
requires_partition_by=requires))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def droplevel(self, level, axis):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'droplevel',
lambda df: df.droplevel(level, axis=axis), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()
if axis in (1, 'column') else partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def fillna(self, value, method, axis, limit, **kwargs):
"""When ``axis="index"``, both ``method`` and ``limit`` must be ``None``.
otherwise this operation is order-sensitive."""
# Default value is None, but is overriden with index.
axis = axis or 'index'
if axis in (0, 'index'):
if method is not None:
raise frame_base.WontImplementError(
f"fillna(method={method!r}, axis={axis!r}) is not supported "
"because it is order-sensitive. Only fillna(method=None) is "
f"supported with axis={axis!r}.",
reason="order-sensitive")
if limit is not None:
raise frame_base.WontImplementError(
f"fillna(limit={method!r}, axis={axis!r}) is not supported because "
"it is order-sensitive. Only fillna(limit=None) is supported with "
f"axis={axis!r}.",
reason="order-sensitive")
if isinstance(self, DeferredDataFrame) and isinstance(value,
DeferredSeries):
# If self is a DataFrame and value is a Series we want to broadcast value
# to all partitions of self.
# This is OK, as its index must be the same size as the columns set of
# self, so cannot be too large.
class AsScalar(object):
def __init__(self, value):
self.value = value
with expressions.allow_non_parallel_operations():
value_expr = expressions.ComputedExpression(
'as_scalar',
lambda df: AsScalar(df), [value._expr],
requires_partition_by=partitionings.Singleton())
get_value = lambda x: x.value
requires = partitionings.Arbitrary()
elif isinstance(value, frame_base.DeferredBase):
# For other DeferredBase combinations, use Index partitioning to
# co-locate on the Index
value_expr = value._expr
get_value = lambda x: x
requires = partitionings.Index()
else:
# Default case, pass value through as a constant, no particular
# partitioning requirement
value_expr = expressions.ConstantExpression(value)
get_value = lambda x: x
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
# yapf: disable
expressions.ComputedExpression(
'fillna',
lambda df,
value: df.fillna(
get_value(value),
method=method,
axis=axis,
limit=limit,
**kwargs), [self._expr, value_expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=requires))
ffill = _fillna_alias('ffill')
bfill = _fillna_alias('bfill')
backfill = _fillna_alias('backfill')
pad = _fillna_alias('pad')
@frame_base.with_docs_from(pd.DataFrame)
def first(self, offset):
per_partition = expressions.ComputedExpression(
'first-per-partition',
lambda df: df.sort_index().first(offset=offset), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'first',
lambda df: df.sort_index().first(offset=offset), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
def last(self, offset):
per_partition = expressions.ComputedExpression(
'last-per-partition',
lambda df: df.sort_index().last(offset=offset), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'last',
lambda df: df.sort_index().last(offset=offset), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def groupby(self, by, level, axis, as_index, group_keys, **kwargs):
"""``as_index`` and ``group_keys`` must both be ``True``.
Aggregations grouping by a categorical column with ``observed=False`` set
are not currently parallelizable
(`BEAM-11190 <https://issues.apache.org/jira/browse/BEAM-11190>`_).
"""
if not as_index:
raise NotImplementedError('groupby(as_index=False)')
if not group_keys:
raise NotImplementedError('groupby(group_keys=False)')
if axis in (1, 'columns'):
return _DeferredGroupByCols(
expressions.ComputedExpression(
'groupbycols',
lambda df: df.groupby(by, axis=axis, **kwargs), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
elif level is not None:
if isinstance(level, (list, tuple)):
grouping_indexes = level
else:
grouping_indexes = [level]
grouping_columns = []
index = self._expr.proxy().index
# Translate to level numbers only
grouping_indexes = [
l if isinstance(l, int) else index.names.index(l)
for l in grouping_indexes
]
if index.nlevels == 1:
to_group_with_index = self._expr
to_group = self._expr
else:
levels_to_drop = [
i for i in range(index.nlevels) if i not in grouping_indexes
]
# Reorder so the grouped indexes are first
to_group_with_index = self.reorder_levels(
grouping_indexes + levels_to_drop)
grouping_indexes = list(range(len(grouping_indexes)))
levels_to_drop = list(range(len(grouping_indexes), index.nlevels))
if levels_to_drop:
to_group = to_group_with_index.droplevel(levels_to_drop)._expr
else:
to_group = to_group_with_index._expr
to_group_with_index = to_group_with_index._expr
elif callable(by):
def map_index(df):
df = df.copy()
df.index = df.index.map(by)
return df
to_group = expressions.ComputedExpression(
'map_index',
map_index, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
orig_nlevels = self._expr.proxy().index.nlevels
def prepend_mapped_index(df):
df = df.copy()
index = df.index.to_frame()
index.insert(0, None, df.index.map(by))
df.index = pd.MultiIndex.from_frame(
index, names=[None] + list(df.index.names))
return df
to_group_with_index = expressions.ComputedExpression(
'map_index_keep_orig',
prepend_mapped_index,
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
# Partitioning by the original indexes is preserved
preserves_partition_by=partitionings.Index(
list(range(1, orig_nlevels + 1))))
grouping_columns = []
# The index we need to group by is the last one
grouping_indexes = [0]
elif isinstance(by, DeferredSeries):
if isinstance(self, DeferredSeries):
def set_index(s, by):
df = pd.DataFrame(s)
df, by = df.align(by, axis=0, join='inner')
return df.set_index(by).iloc[:, 0]
def prepend_index(s, by):
df = pd.DataFrame(s)
df, by = df.align(by, axis=0, join='inner')
return df.set_index([by, df.index]).iloc[:, 0]
else:
def set_index(df, by): # type: ignore
df, by = df.align(by, axis=0, join='inner')
return df.set_index(by)
def prepend_index(df, by): # type: ignore
df, by = df.align(by, axis=0, join='inner')
return df.set_index([by, df.index])
to_group = expressions.ComputedExpression(
'set_index',
set_index, [self._expr, by._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton())
orig_nlevels = self._expr.proxy().index.nlevels
to_group_with_index = expressions.ComputedExpression(
'prependindex',
prepend_index, [self._expr, by._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Index(
list(range(1, orig_nlevels + 1))))
grouping_columns = []
grouping_indexes = [0]
elif isinstance(by, np.ndarray):
raise frame_base.WontImplementError(
"Grouping by a concrete ndarray is order sensitive.",
reason="order-sensitive")
elif isinstance(self, DeferredDataFrame):
if not isinstance(by, list):
by = [by]
# Find the columns that we need to move into the index so we can group by
# them
column_names = self._expr.proxy().columns
grouping_columns = list(set(by).intersection(column_names))
index_names = self._expr.proxy().index.names
for label in by:
if label not in index_names and label not in self._expr.proxy().columns:
raise KeyError(label)
grouping_indexes = list(set(by).intersection(index_names))
if grouping_indexes:
if set(by) == set(index_names):
to_group = self._expr
elif set(by).issubset(index_names):
to_group = self.droplevel(index_names.difference(by))._expr
else:
to_group = self.reset_index(grouping_indexes).set_index(by)._expr
else:
to_group = self.set_index(by)._expr
if grouping_columns:
# TODO(BEAM-11711): It should be possible to do this without creating an
# expression manually, by using DeferredDataFrame.set_index, i.e.:
# to_group_with_index = self.set_index([self.index] +
# grouping_columns)._expr
to_group_with_index = expressions.ComputedExpression(
'move_grouped_columns_to_index',
lambda df: df.set_index([df.index] + grouping_columns, drop=False),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Index(
list(range(self._expr.proxy().index.nlevels))))
else:
to_group_with_index = self._expr
else:
raise NotImplementedError(by)
return DeferredGroupBy(
expressions.ComputedExpression(
'groupbyindex',
lambda df: df.groupby(
level=list(range(df.index.nlevels)), **kwargs), [to_group],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()),
kwargs,
to_group,
to_group_with_index,
grouping_columns=grouping_columns,
grouping_indexes=grouping_indexes)
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def loc(self):
return _DeferredLoc(self)
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def iloc(self):
"""Position-based indexing with `iloc` is order-sensitive in almost every
case. Beam DataFrame users should prefer label-based indexing with `loc`.
"""
return _DeferredILoc(self)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def reset_index(self, level=None, **kwargs):
"""Dropping the entire index (e.g. with ``reset_index(level=None)``) is
not parallelizable. It is also only guaranteed that the newly generated
index values will be unique. The Beam DataFrame API makes no guarantee
that the same index values as the equivalent pandas operation will be
generated, because that implementation is order-sensitive."""
if level is not None and not isinstance(level, (tuple, list)):
level = [level]
if level is None or len(level) == self._expr.proxy().index.nlevels:
# TODO(BEAM-12182): Could do distributed re-index with offsets.
requires_partition_by = partitionings.Singleton(
reason=(
f"reset_index(level={level!r}) drops the entire index and "
"creates a new one, so it cannot currently be parallelized "
"(BEAM-12182)."))
else:
requires_partition_by = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'reset_index',
lambda df: df.reset_index(level=level, **kwargs), [self._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=requires_partition_by))
abs = frame_base._elementwise_method('abs', base=pd.core.generic.NDFrame)
@frame_base.with_docs_from(pd.core.generic.NDFrame)
@frame_base.args_to_kwargs(pd.core.generic.NDFrame)
@frame_base.populate_defaults(pd.core.generic.NDFrame)
def astype(self, dtype, copy, errors):
"""astype is not parallelizable when ``errors="ignore"`` is specified.
``copy=False`` is not supported because it relies on memory-sharing
semantics.
``dtype="category`` is not supported because the type of the output column
depends on the data. Please use ``pd.CategoricalDtype`` with explicit
categories instead.
"""
requires = partitionings.Arbitrary()
if errors == "ignore":
# We need all data in order to ignore errors and propagate the original
# data.
requires = partitionings.Singleton(
reason=(
f"astype(errors={errors!r}) is currently not parallelizable, "
"because all data must be collected on one node to determine if "
"the original data should be propagated instead."))
if not copy:
raise frame_base.WontImplementError(
f"astype(copy={copy!r}) is not supported because it relies on "
"memory-sharing semantics that are not compatible with the Beam "
"model.")
if dtype == 'category':
raise frame_base.WontImplementError(
"astype(dtype='category') is not supported because the type of the "
"output column depends on the data. Please use pd.CategoricalDtype "
"with explicit categories instead.",
reason="non-deferred-columns")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'astype',
lambda df: df.astype(dtype=dtype, copy=copy, errors=errors),
[self._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()))
copy = frame_base._elementwise_method('copy', base=pd.core.generic.NDFrame)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def replace(self, to_replace, value, limit, method, **kwargs):
"""``method`` is not supported in the Beam DataFrame API because it is
order-sensitive. It cannot be specified.
If ``limit`` is specified this operation is not parallelizable."""
if method is not None and not isinstance(to_replace,
dict) and value is None:
# pandas only relies on method if to_replace is not a dictionary, and
# value is None
raise frame_base.WontImplementError(
f"replace(method={method!r}) is not supported because it is "
"order sensitive. Only replace(method=None) is supported.",
reason="order-sensitive")
if limit is None:
requires_partition_by = partitionings.Arbitrary()
else:
requires_partition_by = partitionings.Singleton(
reason=(
f"replace(limit={limit!r}) cannot currently be parallelized. It "
"requires collecting all data on a single node."))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'replace',
lambda df: df.replace(
to_replace=to_replace,
value=value,
limit=limit,
method=method,
**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=requires_partition_by))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def tz_localize(self, ambiguous, **kwargs):
"""``ambiguous`` cannot be set to ``"infer"`` as its semantics are
order-sensitive. Similarly, specifying ``ambiguous`` as an
:class:`~numpy.ndarray` is order-sensitive, but you can achieve similar
functionality by specifying ``ambiguous`` as a Series."""
if isinstance(ambiguous, np.ndarray):
raise frame_base.WontImplementError(
"tz_localize(ambiguous=ndarray) is not supported because it makes "
"this operation sensitive to the order of the data. Please use a "
"DeferredSeries instead.",
reason="order-sensitive")
elif isinstance(ambiguous, frame_base.DeferredFrame):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda df,
ambiguous: df.tz_localize(ambiguous=ambiguous, **kwargs),
[self._expr, ambiguous._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton()))
elif ambiguous == 'infer':
# infer attempts to infer based on the order of the timestamps
raise frame_base.WontImplementError(
f"tz_localize(ambiguous={ambiguous!r}) is not allowed because it "
"makes this operation sensitive to the order of the data.",
reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda df: df.tz_localize(ambiguous=ambiguous, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton()))
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def size(self):
sizes = expressions.ComputedExpression(
'get_sizes',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series(df.size),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'sum_sizes',
lambda sizes: sizes.sum(), [sizes],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
def length(self):
"""Alternative to ``len(df)`` which returns a deferred result that can be
used in arithmetic with :class:`DeferredSeries` or
:class:`DeferredDataFrame` instances."""
lengths = expressions.ComputedExpression(
'get_lengths',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series(len(df)),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'sum_lengths',
lambda lengths: lengths.sum(), [lengths],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
def __len__(self):
raise frame_base.WontImplementError(
"len(df) is not currently supported because it produces a non-deferred "
"result. Consider using df.length() instead.",
reason="non-deferred-result")
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def empty(self):
empties = expressions.ComputedExpression(
'get_empties',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series(df.empty),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'check_all_empty',
lambda empties: empties.all(), [empties],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
def bool(self):
# TODO: Documentation about DeferredScalar
# Will throw if any partition has >1 element
bools = expressions.ComputedExpression(
'get_bools',
# Wrap scalar results in a Series for easier concatenation later
lambda df: pd.Series([], dtype=bool)
if df.empty else pd.Series([df.bool()]),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
# Will throw if overall dataset has != 1 element
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_all_bools',
lambda bools: bools.bool(), [bools],
proxy=bool(),
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
def equals(self, other):
intermediate = expressions.ComputedExpression(
'equals_partitioned',
# Wrap scalar results in a Series for easier concatenation later
lambda df,
other: pd.Series(df.equals(other)),
[self._expr, other._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate_equals',
lambda df: df.all(), [intermediate],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def sort_values(self, axis, **kwargs):
"""``sort_values`` is not implemented.
It is not implemented for ``axis=index`` because it imposes an ordering on
the dataset, and it likely will not be maintained (see
https://s.apache.org/dataframe-order-sensitive-operations).
It is not implemented for ``axis=columns`` because it makes the order of
the columns depend on the data (see
https://s.apache.org/dataframe-non-deferred-columns)."""
if axis in (0, 'index'):
# axis=index imposes an ordering on the DataFrame rows which we do not
# support
raise frame_base.WontImplementError(
"sort_values(axis=index) is not supported because it imposes an "
"ordering on the dataset which likely will not be preserved.",
reason="order-sensitive")
else:
# axis=columns will reorder the columns based on the data
raise frame_base.WontImplementError(
"sort_values(axis=columns) is not supported because the order of the "
"columns in the result depends on the data.",
reason="non-deferred-columns")
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def sort_index(self, axis, **kwargs):
"""``axis=index`` is not allowed because it imposes an ordering on the
dataset, and we cannot guarantee it will be maintained (see
https://s.apache.org/dataframe-order-sensitive-operations). Only
``axis=columns`` is allowed."""
if axis in (0, 'index'):
# axis=rows imposes an ordering on the DataFrame which we do not support
raise frame_base.WontImplementError(
"sort_index(axis=index) is not supported because it imposes an "
"ordering on the dataset which we cannot guarantee will be "
"preserved.",
reason="order-sensitive")
# axis=columns reorders the columns by name
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'sort_index',
lambda df: df.sort_index(axis, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def where(self, cond, other, errors, **kwargs):
"""where is not parallelizable when ``errors="ignore"`` is specified."""
requires = partitionings.Arbitrary()
deferred_args = {}
actual_args = {}
# TODO(bhulette): This is very similar to the logic in
# frame_base.elementwise_method, can we unify it?
if isinstance(cond, frame_base.DeferredFrame):
deferred_args['cond'] = cond
requires = partitionings.Index()
else:
actual_args['cond'] = cond
if isinstance(other, frame_base.DeferredFrame):
deferred_args['other'] = other
requires = partitionings.Index()
else:
actual_args['other'] = other
if errors == "ignore":
# We need all data in order to ignore errors and propagate the original
# data.
requires = partitionings.Singleton(
reason=(
f"where(errors={errors!r}) is currently not parallelizable, "
"because all data must be collected on one node to determine if "
"the original data should be propagated instead."))
actual_args['errors'] = errors
def where_execution(df, *args):
runtime_values = {
name: value
for (name, value) in zip(deferred_args.keys(), args)
}
return df.where(**runtime_values, **actual_args, **kwargs)
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
"where",
where_execution,
[self._expr] + [df._expr for df in deferred_args.values()],
requires_partition_by=requires,
preserves_partition_by=partitionings.Index(),
))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def mask(self, cond, **kwargs):
"""mask is not parallelizable when ``errors="ignore"`` is specified."""
return self.where(~cond, **kwargs)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def xs(self, key, axis, level, **kwargs):
"""Note that ``xs(axis='index')`` will raise a ``KeyError`` at execution
time if the key does not exist in the index."""
if axis in ('columns', 1):
# Special case for axis=columns. This is a simple project that raises a
# KeyError at construction time for missing columns.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'xs',
lambda df: df.xs(key, axis=axis, **kwargs), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif axis not in ('index', 0):
# Make sure that user's axis is valid
raise ValueError(
"axis must be one of ('index', 0, 'columns', 1). "
f"got {axis!r}.")
if not isinstance(key, tuple):
key = (key, )
key_size = len(key)
key_series = pd.Series([key], pd.MultiIndex.from_tuples([key]))
key_expr = expressions.ConstantExpression(
key_series, proxy=key_series.iloc[:0])
if level is None:
reindexed = self
else:
if not isinstance(level, list):
level = [level]
# If user specifed levels, reindex so those levels are at the beginning.
# Keep the others and preserve their order.
level = [
l if isinstance(l, int) else list(self.index.names).index(l)
for l in level
]
reindexed = self.reorder_levels(
level + [i for i in range(self.index.nlevels) if i not in level])
def xs_partitioned(frame, key):
if not len(key):
# key is not in this partition, return empty dataframe
return frame.iloc[:0].droplevel(list(range(key_size)))
# key should be in this partition, call xs. Will raise KeyError if not
# present.
return frame.xs(key.item())
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'xs',
xs_partitioned,
[reindexed._expr, key_expr],
requires_partition_by=partitionings.Index(list(range(key_size))),
# Drops index levels, so partitioning is not preserved
preserves_partition_by=partitionings.Singleton()))
@property
def dtype(self):
return self._expr.proxy().dtype
isin = frame_base._elementwise_method('isin', base=pd.DataFrame)
combine_first = frame_base._elementwise_method(
'combine_first', base=pd.DataFrame)
combine = frame_base._proxy_method(
'combine',
base=pd.DataFrame,
requires_partition_by=expressions.partitionings.Singleton(
reason="combine() is not parallelizable because func might operate "
"on the full dataset."),
preserves_partition_by=expressions.partitionings.Singleton())
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def ndim(self):
return self._expr.proxy().ndim
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def index(self):
return _DeferredIndex(self)
@index.setter
def _set_index(self, value):
# TODO: assigning the index is generally order-sensitive, but we could
# support it in some rare cases, e.g. when assigning the index from one
# of a DataFrame's columns
raise NotImplementedError(
"Assigning an index is not yet supported. "
"Consider using set_index() instead.")
reindex = frame_base.wont_implement_method(
pd.DataFrame, 'reindex', reason="order-sensitive")
hist = frame_base.wont_implement_method(
pd.DataFrame, 'hist', reason="plotting-tools")
attrs = property(
frame_base.wont_implement_method(
pd.DataFrame, 'attrs', reason='experimental'))
reorder_levels = frame_base._proxy_method(
'reorder_levels',
base=pd.DataFrame,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
resample = frame_base.wont_implement_method(
pd.DataFrame, 'resample', reason='event-time-semantics')
rolling = frame_base.wont_implement_method(
pd.DataFrame, 'rolling', reason='event-time-semantics')
sparse = property(
frame_base.not_implemented_method(
'sparse', 'BEAM-12425', base_type=pd.DataFrame))
transform = frame_base._elementwise_method('transform', base=pd.DataFrame)
tz_convert = frame_base._proxy_method(
'tz_convert',
base=pd.DataFrame,
requires_partition_by=partitionings.Arbitrary(),
# Manipulates index, partitioning is not preserved
preserves_partition_by=partitionings.Singleton())
@populate_not_implemented(pd.Series)
@frame_base.DeferredFrame._register_for(pd.Series)
class DeferredSeries(DeferredDataFrameOrSeries):
def __repr__(self):
return (
f'DeferredSeries(name={self.name!r}, dtype={self.dtype}, '
f'{self._render_indexes()})')
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def name(self):
return self._expr.proxy().name
@name.setter
def name(self, value):
def fn(s):
s = s.copy()
s.name = value
return s
self._expr = expressions.ComputedExpression(
'series_set_name',
fn, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def dtype(self):
return self._expr.proxy().dtype
dtypes = dtype
def __getitem__(self, key):
if _is_null_slice(key) or key is Ellipsis:
return self
elif (isinstance(key, int) or _is_integer_slice(key)
) and self._expr.proxy().index._should_fallback_to_positional():
raise frame_base.WontImplementError(
"Accessing an item by an integer key is order sensitive for this "
"Series.",
reason="order-sensitive")
elif isinstance(key, slice) or callable(key):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
# yapf: disable
'getitem',
lambda df: df[key],
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
# yapf: disable
'getitem',
lambda df,
indexer: df[indexer],
[self._expr, key._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif pd.core.series.is_iterator(key) or pd.core.common.is_bool_indexer(key):
raise frame_base.WontImplementError(
"Accessing a DeferredSeries with an iterator is sensitive to the "
"order of the data.",
reason="order-sensitive")
else:
# We could consider returning a deferred scalar, but that might
# be more surprising than a clear error.
raise frame_base.WontImplementError(
f"Indexing a series with key of type {type(key)} is not supported "
"because it produces a non-deferred result.",
reason="non-deferred-result")
@frame_base.with_docs_from(pd.Series)
def keys(self):
return self.index
# Series.T == transpose. Both are a no-op
T = frame_base._elementwise_method('T', base=pd.Series)
transpose = frame_base._elementwise_method('transpose', base=pd.Series)
shape = property(
frame_base.wont_implement_method(
pd.Series, 'shape', reason="non-deferred-result"))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def append(self, to_append, ignore_index, verify_integrity, **kwargs):
"""``ignore_index=True`` is not supported, because it requires generating an
order-sensitive index."""
if not isinstance(to_append, DeferredSeries):
raise frame_base.WontImplementError(
"append() only accepts DeferredSeries instances, received " +
str(type(to_append)))
if ignore_index:
raise frame_base.WontImplementError(
"append(ignore_index=True) is order sensitive because it requires "
"generating a new index based on the order of the data.",
reason="order-sensitive")
if verify_integrity:
# We can verify the index is non-unique within index partitioned data.
requires = partitionings.Index()
else:
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'append',
lambda s,
to_append: s.append(
to_append, verify_integrity=verify_integrity, **kwargs),
[self._expr, to_append._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def align(self, other, join, axis, level, method, **kwargs):
"""Aligning per-level is not yet supported. Only the default,
``level=None``, is allowed.
Filling NaN values via ``method`` is not supported, because it is
`order-sensitive
<https://s.apache.org/dataframe-order-sensitive-operations>`_.
Only the default, ``method=None``, is allowed."""
if level is not None:
raise NotImplementedError('per-level align')
if method is not None:
raise frame_base.WontImplementError(
f"align(method={method!r}) is not supported because it is "
"order sensitive. Only align(method=None) is supported.",
reason="order-sensitive")
# We're using pd.concat here as expressions don't yet support
# multiple return values.
aligned = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'align',
lambda x,
y: pd.concat([x, y], axis=1, join='inner'),
[self._expr, other._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
return aligned.iloc[:, 0], aligned.iloc[:, 1]
argsort = frame_base.wont_implement_method(
pd.Series, 'argsort', reason="order-sensitive")
array = property(
frame_base.wont_implement_method(
pd.Series, 'array', reason="non-deferred-result"))
# We can't reliably predict the output type, it depends on whether `key` is:
# - not in the index (default_value)
# - in the index once (constant)
# - in the index multiple times (Series)
get = frame_base.wont_implement_method(
pd.Series, 'get', reason="non-deferred-columns")
ravel = frame_base.wont_implement_method(
pd.Series, 'ravel', reason="non-deferred-result")
rename = frame_base._elementwise_method('rename', base=pd.Series)
between = frame_base._elementwise_method('between', base=pd.Series)
add_suffix = frame_base._proxy_method(
'add_suffix',
base=pd.DataFrame,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
add_prefix = frame_base._proxy_method(
'add_prefix',
base=pd.DataFrame,
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
@frame_base.with_docs_from(pd.DataFrame)
def dot(self, other):
"""``other`` must be a :class:`DeferredDataFrame` or :class:`DeferredSeries`
instance. Computing the dot product with an array-like is not supported
because it is order-sensitive."""
left = self._expr
if isinstance(other, DeferredSeries):
right = expressions.ComputedExpression(
'to_dataframe',
pd.DataFrame, [other._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
right_is_series = True
elif isinstance(other, DeferredDataFrame):
right = other._expr
right_is_series = False
else:
raise frame_base.WontImplementError(
"other must be a DeferredDataFrame or DeferredSeries instance. "
"Passing a concrete list or numpy array is not supported. Those "
"types have no index and must be joined based on the order of the "
"data.",
reason="order-sensitive")
dots = expressions.ComputedExpression(
'dot',
# Transpose so we can sum across rows.
(lambda left, right: pd.DataFrame(left @ right).T),
[left, right],
requires_partition_by=partitionings.Index())
with expressions.allow_non_parallel_operations(True):
sums = expressions.ComputedExpression(
'sum',
lambda dots: dots.sum(), #
[dots],
requires_partition_by=partitionings.Singleton())
if right_is_series:
result = expressions.ComputedExpression(
'extract',
lambda df: df[0], [sums],
requires_partition_by=partitionings.Singleton())
else:
result = sums
return frame_base.DeferredFrame.wrap(result)
__matmul__ = dot
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def nunique(self, **kwargs):
return self.drop_duplicates(keep="any").size
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def quantile(self, q, **kwargs):
"""quantile is not parallelizable. See
`BEAM-12167 <https://issues.apache.org/jira/browse/BEAM-12167>`_ tracking
the possible addition of an approximate, parallelizable implementation of
quantile."""
# TODO(BEAM-12167): Provide an option for approximate distributed
# quantiles
requires = partitionings.Singleton(
reason=(
"Computing quantiles across index cannot currently be "
"parallelized. See BEAM-12167 tracking the possible addition of an "
"approximate, parallelizable implementation of quantile."))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'quantile',
lambda df: df.quantile(q=q, **kwargs), [self._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
def std(self, *args, **kwargs):
# Compute variance (deferred scalar) with same args, then sqrt it
return self.var(*args, **kwargs).apply(lambda var: math.sqrt(var))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def var(self, axis, skipna, level, ddof, **kwargs):
"""Per-level aggregation is not yet supported (BEAM-11777). Only the
default, ``level=None``, is allowed."""
if level is not None:
raise NotImplementedError("per-level aggregation")
if skipna is None or skipna:
self = self.dropna() # pylint: disable=self-cls-assignment
# See the online, numerically stable formulae at
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# and
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
def compute_moments(x):
n = len(x)
m = x.std(ddof=0)**2 * n
s = x.sum()
return pd.DataFrame(dict(m=[m], s=[s], n=[n]))
def combine_moments(data):
m = s = n = 0.0
for datum in data.itertuples():
if datum.n == 0:
continue
elif n == 0:
m, s, n = datum.m, datum.s, datum.n
else:
delta = s / n - datum.s / datum.n
m += datum.m + delta**2 * n * datum.n / (n + datum.n)
s += datum.s
n += datum.n
if n <= ddof:
return float('nan')
else:
return m / (n - ddof)
moments = expressions.ComputedExpression(
'compute_moments',
compute_moments, [self._expr],
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_moments',
combine_moments, [moments],
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def corr(self, other, method, min_periods):
"""Only ``method='pearson'`` is currently parallelizable."""
if method == 'pearson': # Note that this is the default.
x, y = self.dropna().align(other.dropna(), 'inner')
return x._corr_aligned(y, min_periods)
else:
reason = (
f"Encountered corr(method={method!r}) which cannot be "
"parallelized. Only corr(method='pearson') is currently "
"parallelizable.")
# The rank-based correlations are not obviously parallelizable, though
# perhaps an approximation could be done with a knowledge of quantiles
# and custom partitioning.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'corr',
lambda df,
other: df.corr(other, method=method, min_periods=min_periods),
[self._expr, other._expr],
requires_partition_by=partitionings.Singleton(reason=reason)))
def _corr_aligned(self, other, min_periods):
std_x = self.std()
std_y = other.std()
cov = self._cov_aligned(other, min_periods)
return cov.apply(
lambda cov, std_x, std_y: cov / (std_x * std_y), args=[std_x, std_y])
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def cov(self, other, min_periods, ddof):
x, y = self.dropna().align(other.dropna(), 'inner')
return x._cov_aligned(y, min_periods, ddof)
def _cov_aligned(self, other, min_periods, ddof=1):
# Use the formulae from
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Covariance
def compute_co_moments(x, y):
n = len(x)
if n <= 1:
c = 0
else:
c = x.cov(y) * (n - 1)
sx = x.sum()
sy = y.sum()
return pd.DataFrame(dict(c=[c], sx=[sx], sy=[sy], n=[n]))
def combine_co_moments(data):
c = sx = sy = n = 0.0
for datum in data.itertuples():
if datum.n == 0:
continue
elif n == 0:
c, sx, sy, n = datum.c, datum.sx, datum.sy, datum.n
else:
c += (
datum.c + (sx / n - datum.sx / datum.n) *
(sy / n - datum.sy / datum.n) * n * datum.n / (n + datum.n))
sx += datum.sx
sy += datum.sy
n += datum.n
if n < max(2, ddof, min_periods or 0):
return float('nan')
else:
return c / (n - ddof)
moments = expressions.ComputedExpression(
'compute_co_moments',
compute_co_moments, [self._expr, other._expr],
requires_partition_by=partitionings.Index())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine_co_moments',
combine_co_moments, [moments],
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def dropna(self, **kwargs):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'dropna',
lambda df: df.dropna(**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary()))
isnull = isna = frame_base._elementwise_method('isna', base=pd.Series)
notnull = notna = frame_base._elementwise_method('notna', base=pd.Series)
items = frame_base.wont_implement_method(
pd.Series, 'items', reason="non-deferred-result")
iteritems = frame_base.wont_implement_method(
pd.Series, 'iteritems', reason="non-deferred-result")
tolist = frame_base.wont_implement_method(
pd.Series, 'tolist', reason="non-deferred-result")
to_numpy = frame_base.wont_implement_method(
pd.Series, 'to_numpy', reason="non-deferred-result")
to_string = frame_base.wont_implement_method(
pd.Series, 'to_string', reason="non-deferred-result")
def _wrap_in_df(self):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'wrap_in_df',
lambda s: pd.DataFrame(s),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def duplicated(self, keep):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# Re-use the DataFrame based duplcated, extract the series back out
df = self._wrap_in_df()
return df.duplicated(keep=keep)[df.columns[0]]
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def drop_duplicates(self, keep):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# Re-use the DataFrame based drop_duplicates, extract the series back out
df = self._wrap_in_df()
return df.drop_duplicates(keep=keep)[df.columns[0]]
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
@frame_base.maybe_inplace
def sample(self, **kwargs):
"""Only ``n`` and/or ``weights`` may be specified. ``frac``,
``random_state``, and ``replace=True`` are not yet supported.
See `BEAM-12476 <https://issues.apache.org/jira/BEAM-12476>`_.
Note that pandas will raise an error if ``n`` is larger than the length
of the dataset, while the Beam DataFrame API will simply return the full
dataset in that case."""
# Re-use the DataFrame based sample, extract the series back out
df = self._wrap_in_df()
return df.sample(**kwargs)[df.columns[0]]
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def aggregate(self, func, axis, *args, **kwargs):
"""Some aggregation methods cannot be parallelized, and computing
them will require collecting all data on a single machine."""
if kwargs.get('skipna', False):
# Eagerly generate a proxy to make sure skipna is a valid argument
# for this aggregation method
_ = self._expr.proxy().aggregate(func, axis, *args, **kwargs)
kwargs.pop('skipna')
return self.dropna().aggregate(func, axis, *args, **kwargs)
if isinstance(func, list) and len(func) > 1:
# level arg is ignored for multiple aggregations
_ = kwargs.pop('level', None)
# Aggregate with each method separately, then stick them all together.
rows = [self.agg([f], *args, **kwargs) for f in func]
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_aggregate',
lambda *rows: pd.concat(rows), [row._expr for row in rows]))
else:
# We're only handling a single column. It could be 'func' or ['func'],
# which produce different results. 'func' produces a scalar, ['func']
# produces a single element Series.
base_func = func[0] if isinstance(func, list) else func
if (_is_numeric(base_func) and
not pd.core.dtypes.common.is_numeric_dtype(self.dtype)):
warnings.warn(
f"Performing a numeric aggregation, {base_func!r}, on "
f"Series {self._expr.proxy().name!r} with non-numeric type "
f"{self.dtype!r}. This can result in runtime errors or surprising "
"results.")
if 'level' in kwargs:
# Defer to groupby.agg for level= mode
return self.groupby(
level=kwargs.pop('level'), axis=axis).agg(func, *args, **kwargs)
singleton_reason = None
if 'min_count' in kwargs:
# Eagerly generate a proxy to make sure min_count is a valid argument
# for this aggregation method
_ = self._expr.proxy().agg(func, axis, *args, **kwargs)
singleton_reason = (
"Aggregation with min_count= requires collecting all data on a "
"single node.")
# We have specialized distributed implementations for these
if base_func in ('quantile', 'std', 'var', 'nunique', 'corr', 'cov'):
result = getattr(self, base_func)(*args, **kwargs)
if isinstance(func, list):
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'wrap_aggregate',
lambda x: pd.Series(x, index=[base_func]), [result._expr],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
else:
return result
agg_kwargs = kwargs.copy()
if ((_is_associative(base_func) or _is_liftable_with_sum(base_func)) and
singleton_reason is None):
intermediate = expressions.ComputedExpression(
'pre_aggregate',
# Coerce to a Series, if the result is scalar we still want a Series
# so we can combine and do the final aggregation next.
lambda s: pd.Series(s.agg(func, *args, **kwargs)),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
allow_nonparallel_final = True
if _is_associative(base_func):
agg_func = func
else:
agg_func = ['sum'] if isinstance(func, list) else 'sum'
else:
intermediate = self._expr
allow_nonparallel_final = None # i.e. don't change the value
agg_func = func
singleton_reason = (
f"Aggregation function {func!r} cannot currently be "
"parallelized. It requires collecting all data for "
"this Series on a single node.")
with expressions.allow_non_parallel_operations(allow_nonparallel_final):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate',
lambda s: s.agg(agg_func, *args, **agg_kwargs), [intermediate],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton(
reason=singleton_reason)))
agg = aggregate
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def axes(self):
return [self.index]
clip = frame_base._elementwise_method('clip', base=pd.Series)
all = _agg_method(pd.Series, 'all')
any = _agg_method(pd.Series, 'any')
# TODO(BEAM-12074): Document that Series.count(level=) will drop NaN's
count = _agg_method(pd.Series, 'count')
describe = _agg_method(pd.Series, 'describe')
min = _agg_method(pd.Series, 'min')
max = _agg_method(pd.Series, 'max')
prod = product = _agg_method(pd.Series, 'prod')
sum = _agg_method(pd.Series, 'sum')
mean = _agg_method(pd.Series, 'mean')
median = _agg_method(pd.Series, 'median')
argmax = frame_base.wont_implement_method(
pd.Series, 'argmax', reason='order-sensitive')
argmin = frame_base.wont_implement_method(
pd.Series, 'argmin', reason='order-sensitive')
cummax = frame_base.wont_implement_method(
pd.Series, 'cummax', reason='order-sensitive')
cummin = frame_base.wont_implement_method(
pd.Series, 'cummin', reason='order-sensitive')
cumprod = frame_base.wont_implement_method(
pd.Series, 'cumprod', reason='order-sensitive')
cumsum = frame_base.wont_implement_method(
pd.Series, 'cumsum', reason='order-sensitive')
diff = frame_base.wont_implement_method(
pd.Series, 'diff', reason='order-sensitive')
interpolate = frame_base.wont_implement_method(
pd.Series, 'interpolate', reason='order-sensitive')
searchsorted = frame_base.wont_implement_method(
pd.Series, 'searchsorted', reason='order-sensitive')
shift = frame_base.wont_implement_method(
pd.Series, 'shift', reason='order-sensitive')
head = frame_base.wont_implement_method(
pd.Series, 'head', explanation=_PEEK_METHOD_EXPLANATION)
tail = frame_base.wont_implement_method(
pd.Series, 'tail', explanation=_PEEK_METHOD_EXPLANATION)
filter = frame_base._elementwise_method('filter', base=pd.Series)
memory_usage = frame_base.wont_implement_method(
pd.Series, 'memory_usage', reason="non-deferred-result")
# In Series __contains__ checks the index
__contains__ = frame_base.wont_implement_method(
pd.Series, '__contains__', reason="non-deferred-result")
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def nlargest(self, keep, **kwargs):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# TODO(robertwb): Document 'any' option.
# TODO(robertwb): Consider (conditionally) defaulting to 'any' if no
# explicit keep parameter is requested.
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError(
f"nlargest(keep={keep!r}) is not supported because it is "
"order sensitive. Only keep=\"all\" is supported.",
reason="order-sensitive")
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nlargest-per-partition',
lambda df: df.nlargest(**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nlargest',
lambda df: df.nlargest(**kwargs), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def nsmallest(self, keep, **kwargs):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError(
f"nsmallest(keep={keep!r}) is not supported because it is "
"order sensitive. Only keep=\"all\" is supported.",
reason="order-sensitive")
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nsmallest-per-partition',
lambda df: df.nsmallest(**kwargs), [self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nsmallest',
lambda df: df.nsmallest(**kwargs), [per_partition],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Singleton()))
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def is_unique(self):
def set_index(s):
s = s[:]
s.index = s
return s
self_index = expressions.ComputedExpression(
'set_index',
set_index, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton())
is_unique_distributed = expressions.ComputedExpression(
'is_unique_distributed',
lambda s: pd.Series(s.is_unique), [self_index],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton())
with expressions.allow_non_parallel_operations():
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'combine',
lambda s: s.all(), [is_unique_distributed],
requires_partition_by=partitionings.Singleton(),
preserves_partition_by=partitionings.Singleton()))
plot = frame_base.wont_implement_method(
pd.Series, 'plot', reason="plotting-tools")
pop = frame_base.wont_implement_method(
pd.Series, 'pop', reason="non-deferred-result")
rename_axis = frame_base._elementwise_method('rename_axis', base=pd.Series)
round = frame_base._elementwise_method('round', base=pd.Series)
take = frame_base.wont_implement_method(
pd.Series, 'take', reason='deprecated')
to_dict = frame_base.wont_implement_method(
pd.Series, 'to_dict', reason="non-deferred-result")
to_frame = frame_base._elementwise_method('to_frame', base=pd.Series)
@frame_base.with_docs_from(pd.Series)
def unique(self, as_series=False):
"""unique is not supported by default because it produces a
non-deferred result: an :class:`~numpy.ndarray`. You can use the
Beam-specific argument ``unique(as_series=True)`` to get the result as
a :class:`DeferredSeries`"""
if not as_series:
raise frame_base.WontImplementError(
"unique() is not supported by default because it produces a "
"non-deferred result: a numpy array. You can use the Beam-specific "
"argument unique(as_series=True) to get the result as a "
"DeferredSeries",
reason="non-deferred-result")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'unique',
lambda df: pd.Series(df.unique()), [self._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton(
reason="unique() cannot currently be parallelized.")))
@frame_base.with_docs_from(pd.Series)
def update(self, other):
self._expr = expressions.ComputedExpression(
'update',
lambda df,
other: df.update(other) or df, [self._expr, other._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Index())
unstack = frame_base.wont_implement_method(
pd.Series, 'unstack', reason='non-deferred-columns')
@frame_base.with_docs_from(pd.Series)
def value_counts(
self,
sort=False,
normalize=False,
ascending=False,
bins=None,
dropna=True):
"""``sort`` is ``False`` by default, and ``sort=True`` is not supported
because it imposes an ordering on the dataset which likely will not be
preserved.
When ``bin`` is specified this operation is not parallelizable. See
[BEAM-12441](https://issues.apache.org/jira/browse/BEAM-12441) tracking the
possible addition of a distributed implementation."""
if sort:
raise frame_base.WontImplementError(
"value_counts(sort=True) is not supported because it imposes an "
"ordering on the dataset which likely will not be preserved.",
reason="order-sensitive")
if bins is not None:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'value_counts',
lambda s: s.value_counts(
normalize=normalize, bins=bins, dropna=dropna)[self._expr],
requires_partition_by=partitionings.Singleton(
reason=(
"value_counts with bin specified requires collecting "
"the entire dataset to identify the range.")),
preserves_partition_by=partitionings.Singleton(),
))
if dropna:
column = self.dropna()
else:
column = self
result = column.groupby(column).size()
# groupby.size() names the index, which we don't need
result.index.name = None
if normalize:
return result / column.length()
else:
return result
values = property(
frame_base.wont_implement_method(
pd.Series, 'values', reason="non-deferred-result"))
view = frame_base.wont_implement_method(
pd.Series,
'view',
explanation=(
"because it relies on memory-sharing semantics that are "
"not compatible with the Beam model."))
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def str(self):
return _DeferredStringMethods(self._expr)
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def cat(self):
return _DeferredCategoricalMethods(self._expr)
@property # type: ignore
@frame_base.with_docs_from(pd.Series)
def dt(self):
return _DeferredDatetimeMethods(self._expr)
@frame_base.with_docs_from(pd.Series)
def mode(self, *args, **kwargs):
"""mode is not currently parallelizable. An approximate,
parallelizable implementation of mode may be added in the future
(`BEAM-12181 <https://issues.apache.org/jira/BEAM-12181>`_)."""
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'mode',
lambda df: df.mode(*args, **kwargs),
[self._expr],
#TODO(BEAM-12181): Can we add an approximate implementation?
requires_partition_by=partitionings.Singleton(
reason=(
"mode cannot currently be parallelized. See "
"BEAM-12181 tracking the possble addition of "
"an approximate, parallelizable implementation of mode.")),
preserves_partition_by=partitionings.Singleton()))
apply = frame_base._elementwise_method('apply', base=pd.Series)
map = frame_base._elementwise_method('map', base=pd.Series)
# TODO(BEAM-11636): Implement transform using type inference to determine the
# proxy
#transform = frame_base._elementwise_method('transform', base=pd.Series)
@frame_base.with_docs_from(pd.Series)
@frame_base.args_to_kwargs(pd.Series)
@frame_base.populate_defaults(pd.Series)
def repeat(self, repeats, axis):
"""``repeats`` must be an ``int`` or a :class:`DeferredSeries`. Lists are
not supported because they make this operation order-sensitive."""
if isinstance(repeats, int):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series: series.repeat(repeats), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, frame_base.DeferredBase):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series,
repeats_series: series.repeat(repeats_series),
[self._expr, repeats._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, list):
raise frame_base.WontImplementError(
"repeat(repeats=) repeats must be an int or a DeferredSeries. "
"Lists are not supported because they make this operation sensitive "
"to the order of the data.",
reason="order-sensitive")
else:
raise TypeError(
"repeat(repeats=) value must be an int or a "
f"DeferredSeries (encountered {type(repeats)}).")
@populate_not_implemented(pd.DataFrame)
@frame_base.DeferredFrame._register_for(pd.DataFrame)
class DeferredDataFrame(DeferredDataFrameOrSeries):
def __repr__(self):
return (
f'DeferredDataFrame(columns={list(self.columns)}, '
f'{self._render_indexes()})')
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def columns(self):
return self._expr.proxy().columns
@columns.setter
def columns(self, columns):
def set_columns(df):
df = df.copy()
df.columns = columns
return df
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'set_columns',
set_columns, [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.DataFrame)
def keys(self):
return self.columns
def __getattr__(self, name):
# Column attribute access.
if name in self._expr.proxy().columns:
return self[name]
else:
return object.__getattribute__(self, name)
def __getitem__(self, key):
# TODO: Replicate pd.DataFrame.__getitem__ logic
if isinstance(key, DeferredSeries) and key._expr.proxy().dtype == bool:
return self.loc[key]
elif isinstance(key, frame_base.DeferredBase):
# Fail early if key is a DeferredBase as it interacts surprisingly with
# key in self._expr.proxy().columns
raise NotImplementedError(
"Indexing with a non-bool deferred frame is not yet supported. "
"Consider using df.loc[...]")
elif isinstance(key, slice):
if _is_null_slice(key):
return self
elif _is_integer_slice(key):
# This depends on the contents of the index.
raise frame_base.WontImplementError(
"Integer slices are not supported as they are ambiguous. Please "
"use iloc or loc with integer slices.")
else:
return self.loc[key]
elif (
(isinstance(key, list) and all(key_column in self._expr.proxy().columns
for key_column in key)) or
key in self._expr.proxy().columns):
return self._elementwise(lambda df: df[key], 'get_column')
else:
raise NotImplementedError(key)
def __contains__(self, key):
# Checks if proxy has the given column
return self._expr.proxy().__contains__(key)
def __setitem__(self, key, value):
if isinstance(
key, str) or (isinstance(key, list) and
all(isinstance(c, str)
for c in key)) or (isinstance(key, DeferredSeries) and
key._expr.proxy().dtype == bool):
# yapf: disable
return self._elementwise(
lambda df, key, value: df.__setitem__(key, value),
'set_column',
(key, value),
inplace=True)
else:
raise NotImplementedError(key)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def align(self, other, join, axis, copy, level, method, **kwargs):
"""Aligning per level is not yet supported. Only the default,
``level=None``, is allowed.
Filling NaN values via ``method`` is not supported, because it is
`order-sensitive
<https://s.apache.org/dataframe-order-sensitive-operations>`_. Only the
default, ``method=None``, is allowed.
``copy=False`` is not supported because its behavior (whether or not it is
an inplace operation) depends on the data."""
if not copy:
raise frame_base.WontImplementError(
"align(copy=False) is not supported because it might be an inplace "
"operation depending on the data. Please prefer the default "
"align(copy=True).")
if method is not None:
raise frame_base.WontImplementError(
f"align(method={method!r}) is not supported because it is "
"order sensitive. Only align(method=None) is supported.",
reason="order-sensitive")
if kwargs:
raise NotImplementedError('align(%s)' % ', '.join(kwargs.keys()))
if level is not None:
# Could probably get by partitioning on the used levels.
requires_partition_by = partitionings.Singleton(reason=(
f"align(level={level}) is not currently parallelizable. Only "
"align(level=None) can be parallelized."))
elif axis in ('columns', 1):
requires_partition_by = partitionings.Arbitrary()
else:
requires_partition_by = partitionings.Index()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'align',
lambda df, other: df.align(other, join=join, axis=axis),
[self._expr, other._expr],
requires_partition_by=requires_partition_by,
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def append(self, other, ignore_index, verify_integrity, sort, **kwargs):
"""``ignore_index=True`` is not supported, because it requires generating an
order-sensitive index."""
if not isinstance(other, DeferredDataFrame):
raise frame_base.WontImplementError(
"append() only accepts DeferredDataFrame instances, received " +
str(type(other)))
if ignore_index:
raise frame_base.WontImplementError(
"append(ignore_index=True) is order sensitive because it requires "
"generating a new index based on the order of the data.",
reason="order-sensitive")
if verify_integrity:
# We can verify the index is non-unique within index partitioned data.
requires = partitionings.Index()
else:
requires = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'append',
lambda s, other: s.append(other, sort=sort,
verify_integrity=verify_integrity,
**kwargs),
[self._expr, other._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()
)
)
# If column name exists this is a simple project, otherwise it is a constant
# (default_value)
@frame_base.with_docs_from(pd.DataFrame)
def get(self, key, default_value=None):
if key in self.columns:
return self[key]
else:
return default_value
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def set_index(self, keys, **kwargs):
"""``keys`` must be a ``str`` or ``List[str]``. Passing an Index or Series
is not yet supported (`BEAM-11711
<https://issues.apache.org/jira/browse/BEAM-11711>`_)."""
if isinstance(keys, str):
keys = [keys]
if any(isinstance(k, (_DeferredIndex, frame_base.DeferredFrame))
for k in keys):
raise NotImplementedError("set_index with Index or Series instances is "
"not yet supported (BEAM-11711).")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'set_index',
lambda df: df.set_index(keys, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton()))
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def axes(self):
return (self.index, self.columns)
@property # type: ignore
@frame_base.with_docs_from(pd.DataFrame)
def dtypes(self):
return self._expr.proxy().dtypes
@frame_base.with_docs_from(pd.DataFrame)
def assign(self, **kwargs):
"""``value`` must be a ``callable`` or :class:`DeferredSeries`. Other types
make this operation order-sensitive."""
for name, value in kwargs.items():
if not callable(value) and not isinstance(value, DeferredSeries):
raise frame_base.WontImplementError(
f"Unsupported value for new column '{name}': '{value}'. Only "
"callables and DeferredSeries instances are supported. Other types "
"make this operation sensitive to the order of the data",
reason="order-sensitive")
return self._elementwise(
lambda df, *args, **kwargs: df.assign(*args, **kwargs),
'assign',
other_kwargs=kwargs)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def explode(self, column, ignore_index):
# ignoring the index will not preserve it
preserves = (partitionings.Singleton() if ignore_index
else partitionings.Index())
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'explode',
lambda df: df.explode(column, ignore_index),
[self._expr],
preserves_partition_by=preserves,
requires_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def insert(self, value, **kwargs):
"""``value`` cannot be a ``List`` because aligning it with this
DeferredDataFrame is order-sensitive."""
if isinstance(value, list):
raise frame_base.WontImplementError(
"insert(value=list) is not supported because it joins the input "
"list to the deferred DataFrame based on the order of the data.",
reason="order-sensitive")
if isinstance(value, pd.core.generic.NDFrame):
value = frame_base.DeferredFrame.wrap(
expressions.ConstantExpression(value))
if isinstance(value, frame_base.DeferredFrame):
def func_zip(df, value):
df = df.copy()
df.insert(value=value, **kwargs)
return df
inserted = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'insert',
func_zip,
[self._expr, value._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
else:
def func_elementwise(df):
df = df.copy()
df.insert(value=value, **kwargs)
return df
inserted = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'insert',
func_elementwise,
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
self._expr = inserted._expr
@staticmethod
@frame_base.with_docs_from(pd.DataFrame)
def from_dict(*args, **kwargs):
return frame_base.DeferredFrame.wrap(
expressions.ConstantExpression(pd.DataFrame.from_dict(*args, **kwargs)))
@staticmethod
@frame_base.with_docs_from(pd.DataFrame)
def from_records(*args, **kwargs):
return frame_base.DeferredFrame.wrap(
expressions.ConstantExpression(pd.DataFrame.from_records(*args,
**kwargs)))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def duplicated(self, keep, subset):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# TODO(BEAM-12074): Document keep="any"
if keep == 'any':
keep = 'first'
elif keep is not False:
raise frame_base.WontImplementError(
f"duplicated(keep={keep!r}) is not supported because it is "
"sensitive to the order of the data. Only keep=False and "
"keep=\"any\" are supported.",
reason="order-sensitive")
by = subset or list(self.columns)
# Workaround a bug where groupby.apply() that returns a single-element
# Series moves index label to column
return self.groupby(by).apply(
lambda df: pd.DataFrame(df.duplicated(keep=keep, subset=subset),
columns=[None]))[None]
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def drop_duplicates(self, keep, subset, ignore_index):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
# TODO(BEAM-12074): Document keep="any"
if keep == 'any':
keep = 'first'
elif keep is not False:
raise frame_base.WontImplementError(
f"drop_duplicates(keep={keep!r}) is not supported because it is "
"sensitive to the order of the data. Only keep=False and "
"keep=\"any\" are supported.",
reason="order-sensitive")
if ignore_index is not False:
raise frame_base.WontImplementError(
"drop_duplicates(ignore_index=False) is not supported because it "
"requires generating a new index that is sensitive to the order of "
"the data.",
reason="order-sensitive")
by = subset or list(self.columns)
return self.groupby(by).apply(
lambda df: df.drop_duplicates(keep=keep, subset=subset)).droplevel(by)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def aggregate(self, func, axis, *args, **kwargs):
# We have specialized implementations for these.
if func in ('quantile',):
return getattr(self, func)(*args, axis=axis, **kwargs)
# Maps to a property, args are ignored
if func in ('size',):
return getattr(self, func)
# We also have specialized distributed implementations for these. They only
# support axis=0 (implicitly) though. axis=1 should fall through
if func in ('corr', 'cov') and axis in (0, 'index'):
return getattr(self, func)(*args, **kwargs)
if axis is None:
# Aggregate across all elements by first aggregating across columns,
# then across rows.
return self.agg(func, *args, **dict(kwargs, axis=1)).agg(
func, *args, **dict(kwargs, axis=0))
elif axis in (1, 'columns'):
# This is an easy elementwise aggregation.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate',
lambda df: df.agg(func, axis=1, *args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary()))
elif len(self._expr.proxy().columns) == 0:
# For this corner case, just colocate everything.
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'aggregate',
lambda df: df.agg(func, *args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Singleton()))
else:
# In the general case, we will compute the aggregation of each column
# separately, then recombine.
# First, handle any kwargs that cause a projection, by eagerly generating
# the proxy, and only including the columns that are in the output.
PROJECT_KWARGS = ('numeric_only', 'bool_only', 'include', 'exclude')
proxy = self._expr.proxy().agg(func, axis, *args, **kwargs)
if isinstance(proxy, pd.DataFrame):
projected = self[list(proxy.columns)]
elif isinstance(proxy, pd.Series):
projected = self[list(proxy.index)]
else:
projected = self
nonnumeric_columns = [name for (name, dtype) in projected.dtypes.items()
if not
pd.core.dtypes.common.is_numeric_dtype(dtype)]
if _is_numeric(func) and nonnumeric_columns:
if 'numeric_only' in kwargs and kwargs['numeric_only'] is False:
# User has opted in to execution with non-numeric columns, they
# will accept runtime errors
pass
else:
raise frame_base.WontImplementError(
f"Numeric aggregation ({func!r}) on a DataFrame containing "
f"non-numeric columns ({*nonnumeric_columns,!r} is not "
"supported, unless `numeric_only=` is specified.\n"
"Use `numeric_only=True` to only aggregate over numeric "
"columns.\nUse `numeric_only=False` to aggregate over all "
"columns. Note this is not recommended, as it could result in "
"execution time errors.")
for key in PROJECT_KWARGS:
if key in kwargs:
kwargs.pop(key)
if not isinstance(func, dict):
col_names = list(projected._expr.proxy().columns)
func_by_col = {col: func for col in col_names}
else:
func_by_col = func
col_names = list(func.keys())
aggregated_cols = []
has_lists = any(isinstance(f, list) for f in func_by_col.values())
for col in col_names:
funcs = func_by_col[col]
if has_lists and not isinstance(funcs, list):
# If any of the columns do multiple aggregations, they all must use
# "list" style output
funcs = [funcs]
aggregated_cols.append(projected[col].agg(funcs, *args, **kwargs))
# The final shape is different depending on whether any of the columns
# were aggregated by a list of aggregators.
with expressions.allow_non_parallel_operations():
if isinstance(proxy, pd.Series):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_aggregate',
lambda *cols: pd.Series(
{col: value for col, value in zip(col_names, cols)}),
[col._expr for col in aggregated_cols],
requires_partition_by=partitionings.Singleton()))
elif isinstance(proxy, pd.DataFrame):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_aggregate',
lambda *cols: pd.DataFrame(
{col: value for col, value in zip(col_names, cols)}),
[col._expr for col in aggregated_cols],
requires_partition_by=partitionings.Singleton()))
else:
raise AssertionError("Unexpected proxy type for "
f"DataFrame.aggregate!: proxy={proxy!r}, "
f"type(proxy)={type(proxy)!r}")
agg = aggregate
applymap = frame_base._elementwise_method('applymap', base=pd.DataFrame)
add_prefix = frame_base._elementwise_method('add_prefix', base=pd.DataFrame)
add_suffix = frame_base._elementwise_method('add_suffix', base=pd.DataFrame)
memory_usage = frame_base.wont_implement_method(
pd.DataFrame, 'memory_usage', reason="non-deferred-result")
info = frame_base.wont_implement_method(
pd.DataFrame, 'info', reason="non-deferred-result")
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def clip(self, axis, **kwargs):
"""``lower`` and ``upper`` must be :class:`DeferredSeries` instances, or
constants. Array-like arguments are not supported because they are
order-sensitive."""
if any(isinstance(kwargs.get(arg, None), frame_base.DeferredFrame)
for arg in ('upper', 'lower')) and axis not in (0, 'index'):
raise frame_base.WontImplementError(
"axis must be 'index' when upper and/or lower are a DeferredFrame",
reason='order-sensitive')
return frame_base._elementwise_method('clip', base=pd.DataFrame)(self,
axis=axis,
**kwargs)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def corr(self, method, min_periods):
"""Only ``method="pearson"`` can be parallelized. Other methods require
collecting all data on a single worker (see
https://s.apache.org/dataframe-non-parallel-operations for details).
"""
if method == 'pearson':
proxy = self._expr.proxy().corr()
columns = list(proxy.columns)
args = []
arg_indices = []
for col1, col2 in itertools.combinations(columns, 2):
arg_indices.append((col1, col2))
args.append(self[col1].corr(self[col2], method=method,
min_periods=min_periods))
def fill_matrix(*args):
data = collections.defaultdict(dict)
for col in columns:
data[col][col] = 1.0
for ix, (col1, col2) in enumerate(arg_indices):
data[col1][col2] = data[col2][col1] = args[ix]
return pd.DataFrame(data, columns=columns, index=columns)
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'fill_matrix',
fill_matrix,
[arg._expr for arg in args],
requires_partition_by=partitionings.Singleton(),
proxy=proxy))
else:
reason = (f"Encountered corr(method={method!r}) which cannot be "
"parallelized. Only corr(method='pearson') is currently "
"parallelizable.")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'corr',
lambda df: df.corr(method=method, min_periods=min_periods),
[self._expr],
requires_partition_by=partitionings.Singleton(reason=reason)))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def cov(self, min_periods, ddof):
proxy = self._expr.proxy().corr()
columns = list(proxy.columns)
args = []
arg_indices = []
for col in columns:
arg_indices.append((col, col))
std = self[col].std(ddof)
args.append(std.apply(lambda x: x*x, 'square'))
for ix, col1 in enumerate(columns):
for col2 in columns[ix+1:]:
arg_indices.append((col1, col2))
# Note that this set may be different for each pair.
no_na = self.loc[self[col1].notna() & self[col2].notna()]
args.append(no_na[col1]._cov_aligned(no_na[col2], min_periods, ddof))
def fill_matrix(*args):
data = collections.defaultdict(dict)
for ix, (col1, col2) in enumerate(arg_indices):
data[col1][col2] = data[col2][col1] = args[ix]
return pd.DataFrame(data, columns=columns, index=columns)
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'fill_matrix',
fill_matrix,
[arg._expr for arg in args],
requires_partition_by=partitionings.Singleton(),
proxy=proxy))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def corrwith(self, other, axis, drop, method):
if axis in (1, 'columns'):
return self._elementwise(
lambda df, other: df.corrwith(other, axis=axis, drop=drop,
method=method),
'corrwith',
other_args=(other,))
if not isinstance(other, frame_base.DeferredFrame):
other = frame_base.DeferredFrame.wrap(
expressions.ConstantExpression(other))
if isinstance(other, DeferredSeries):
proxy = self._expr.proxy().corrwith(other._expr.proxy(), axis=axis,
drop=drop, method=method)
self, other = self.align(other, axis=0, join='inner')
col_names = proxy.index
other_cols = [other] * len(col_names)
elif isinstance(other, DeferredDataFrame):
proxy = self._expr.proxy().corrwith(
other._expr.proxy(), axis=axis, method=method, drop=drop)
self, other = self.align(other, axis=0, join='inner')
col_names = list(
set(self.columns)
.intersection(other.columns)
.intersection(proxy.index))
other_cols = [other[col_name] for col_name in col_names]
else:
# Raise the right error.
self._expr.proxy().corrwith(other._expr.proxy(), axis=axis, drop=drop,
method=method)
# Just in case something else becomes valid.
raise NotImplementedError('corrwith(%s)' % type(other._expr.proxy))
# Generate expressions to compute the actual correlations.
corrs = [
self[col_name].corr(other_col, method)
for col_name, other_col in zip(col_names, other_cols)]
# Combine the results
def fill_dataframe(*args):
result = proxy.copy(deep=True)
for col, value in zip(proxy.index, args):
result[col] = value
return result
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'fill_dataframe',
fill_dataframe,
[corr._expr for corr in corrs],
requires_partition_by=partitionings.Singleton(),
proxy=proxy))
cummax = frame_base.wont_implement_method(pd.DataFrame, 'cummax',
reason='order-sensitive')
cummin = frame_base.wont_implement_method(pd.DataFrame, 'cummin',
reason='order-sensitive')
cumprod = frame_base.wont_implement_method(pd.DataFrame, 'cumprod',
reason='order-sensitive')
cumsum = frame_base.wont_implement_method(pd.DataFrame, 'cumsum',
reason='order-sensitive')
# TODO(BEAM-12071): Consider adding an order-insensitive implementation for
# diff that relies on the index
diff = frame_base.wont_implement_method(pd.DataFrame, 'diff',
reason='order-sensitive')
interpolate = frame_base.wont_implement_method(pd.DataFrame, 'interpolate',
reason='order-sensitive')
head = frame_base.wont_implement_method(pd.DataFrame, 'head',
explanation=_PEEK_METHOD_EXPLANATION)
tail = frame_base.wont_implement_method(pd.DataFrame, 'tail',
explanation=_PEEK_METHOD_EXPLANATION)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def sample(self, n, frac, replace, weights, random_state, axis):
"""When ``axis='index'``, only ``n`` and/or ``weights`` may be specified.
``frac``, ``random_state``, and ``replace=True`` are not yet supported.
See `BEAM-12476 <https://issues.apache.org/jira/BEAM-12476>`_.
Note that pandas will raise an error if ``n`` is larger than the length
of the dataset, while the Beam DataFrame API will simply return the full
dataset in that case.
sample is fully supported for axis='columns'."""
if axis in (1, 'columns'):
# Sampling on axis=columns just means projecting random columns
# Eagerly generate proxy to determine the set of columns at construction
# time
proxy = self._expr.proxy().sample(n=n, frac=frac, replace=replace,
weights=weights,
random_state=random_state, axis=axis)
# Then do the projection
return self[list(proxy.columns)]
# axis='index'
if frac is not None or random_state is not None or replace:
raise NotImplementedError(
f"When axis={axis!r}, only n and/or weights may be specified. "
"frac, random_state, and replace=True are not yet supported "
f"(got frac={frac!r}, random_state={random_state!r}, "
f"replace={replace!r}). See BEAM-12476.")
if n is None:
n = 1
if isinstance(weights, str):
weights = self[weights]
tmp_weight_column_name = "___Beam_DataFrame_weights___"
if weights is None:
self_with_randomized_weights = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'randomized_weights',
lambda df: df.assign(**{tmp_weight_column_name:
np.random.rand(len(df))}),
[self._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
else:
# See "Fast Parallel Weighted Random Sampling" by Efraimidis and Spirakis
# https://www.cti.gr/images_gr/reports/99-06-02.ps
def assign_randomized_weights(df, weights):
non_zero_weights = (weights > 0) | pd.Series(dtype=bool, index=df.index)
df = df.loc[non_zero_weights]
weights = weights.loc[non_zero_weights]
random_weights = np.log(np.random.rand(len(weights))) / weights
return df.assign(**{tmp_weight_column_name: random_weights})
self_with_randomized_weights = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'randomized_weights',
assign_randomized_weights,
[self._expr, weights._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
return self_with_randomized_weights.nlargest(
n=n, columns=tmp_weight_column_name, keep='any').drop(
tmp_weight_column_name, axis=1)
@frame_base.with_docs_from(pd.DataFrame)
def dot(self, other):
# We want to broadcast the right hand side to all partitions of the left.
# This is OK, as its index must be the same size as the columns set of self,
# so cannot be too large.
class AsScalar(object):
def __init__(self, value):
self.value = value
if isinstance(other, frame_base.DeferredFrame):
proxy = other._expr.proxy()
with expressions.allow_non_parallel_operations():
side = expressions.ComputedExpression(
'as_scalar',
lambda df: AsScalar(df),
[other._expr],
requires_partition_by=partitionings.Singleton())
else:
proxy = pd.DataFrame(columns=range(len(other[0])))
side = expressions.ConstantExpression(AsScalar(other))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'dot',
lambda left, right: left @ right.value,
[self._expr, side],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
proxy=proxy))
__matmul__ = dot
@frame_base.with_docs_from(pd.DataFrame)
def mode(self, axis=0, *args, **kwargs):
"""mode with axis="columns" is not implemented because it produces
non-deferred columns.
mode with axis="index" is not currently parallelizable. An approximate,
parallelizable implementation of mode may be added in the future
(`BEAM-12181 <https://issues.apache.org/jira/BEAM-12181>`_)."""
if axis == 1 or axis == 'columns':
# Number of columns is max(number mode values for each row), so we can't
# determine how many there will be before looking at the data.
raise frame_base.WontImplementError(
"mode(axis=columns) is not supported because it produces a variable "
"number of columns depending on the data.",
reason="non-deferred-columns")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'mode',
lambda df: df.mode(*args, **kwargs),
[self._expr],
#TODO(BEAM-12181): Can we add an approximate implementation?
requires_partition_by=partitionings.Singleton(reason=(
"mode(axis='index') cannot currently be parallelized. See "
"BEAM-12181 tracking the possble addition of an approximate, "
"parallelizable implementation of mode."
)),
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
@frame_base.maybe_inplace
def dropna(self, axis, **kwargs):
"""dropna with axis="columns" specified cannot be parallelized."""
# TODO(robertwb): This is a common pattern. Generalize?
if axis in (1, 'columns'):
requires_partition_by = partitionings.Singleton(reason=(
"dropna(axis=1) cannot currently be parallelized. It requires "
"checking all values in each column for NaN values, to determine "
"if that column should be dropped."
))
else:
requires_partition_by = partitionings.Arbitrary()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'dropna',
lambda df: df.dropna(axis=axis, **kwargs),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=requires_partition_by))
def _eval_or_query(self, name, expr, inplace, **kwargs):
for key in ('local_dict', 'global_dict', 'level', 'target', 'resolvers'):
if key in kwargs:
raise NotImplementedError(f"Setting '{key}' is not yet supported")
# look for '@<py identifier>'
if re.search(r'\@[^\d\W]\w*', expr, re.UNICODE):
raise NotImplementedError("Accessing locals with @ is not yet supported "
"(BEAM-11202)")
result_expr = expressions.ComputedExpression(
name,
lambda df: getattr(df, name)(expr, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
if inplace:
self._expr = result_expr
else:
return frame_base.DeferredFrame.wrap(result_expr)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def eval(self, expr, inplace, **kwargs):
"""Accessing local variables with ``@<varname>`` is not yet supported
(`BEAM-11202 <https://issues.apache.org/jira/browse/BEAM-11202>`_).
Arguments ``local_dict``, ``global_dict``, ``level``, ``target``, and
``resolvers`` are not yet supported."""
return self._eval_or_query('eval', expr, inplace, **kwargs)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def query(self, expr, inplace, **kwargs):
"""Accessing local variables with ``@<varname>`` is not yet supported
(`BEAM-11202 <https://issues.apache.org/jira/browse/BEAM-11202>`_).
Arguments ``local_dict``, ``global_dict``, ``level``, ``target``, and
``resolvers`` are not yet supported."""
return self._eval_or_query('query', expr, inplace, **kwargs)
isnull = isna = frame_base._elementwise_method('isna', base=pd.DataFrame)
notnull = notna = frame_base._elementwise_method('notna', base=pd.DataFrame)
items = frame_base.wont_implement_method(pd.DataFrame, 'items',
reason="non-deferred-result")
itertuples = frame_base.wont_implement_method(pd.DataFrame, 'itertuples',
reason="non-deferred-result")
iterrows = frame_base.wont_implement_method(pd.DataFrame, 'iterrows',
reason="non-deferred-result")
iteritems = frame_base.wont_implement_method(pd.DataFrame, 'iteritems',
reason="non-deferred-result")
def _cols_as_temporary_index(self, cols, suffix=''):
original_index_names = list(self._expr.proxy().index.names)
new_index_names = [
'__apache_beam_temp_%d_%s' % (ix, suffix)
for (ix, _) in enumerate(original_index_names)]
def reindex(df):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'reindex',
lambda df:
df.rename_axis(index=new_index_names, copy=False)
.reset_index().set_index(cols),
[df._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Arbitrary()))
def revert(df):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join_restoreindex',
lambda df:
df.reset_index().set_index(new_index_names)
.rename_axis(index=original_index_names, copy=False),
[df._expr],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Arbitrary()))
return reindex, revert
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def join(self, other, on, **kwargs):
if on is not None:
reindex, revert = self._cols_as_temporary_index(on)
return revert(reindex(self).join(other, **kwargs))
if isinstance(other, list):
other_is_list = True
else:
other = [other]
other_is_list = False
placeholder = object()
other_exprs = [
df._expr for df in other if isinstance(df, frame_base.DeferredFrame)]
const_others = [
placeholder if isinstance(df, frame_base.DeferredFrame) else df
for df in other]
def fill_placeholders(values):
values = iter(values)
filled = [
next(values) if df is placeholder else df for df in const_others]
if other_is_list:
return filled
else:
return filled[0]
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'join',
lambda df, *deferred_others: df.join(
fill_placeholders(deferred_others), **kwargs),
[self._expr] + other_exprs,
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Index()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def merge(
self,
right,
on,
left_on,
right_on,
left_index,
right_index,
suffixes,
**kwargs):
"""merge is not parallelizable unless ``left_index`` or ``right_index`` is
``True`, because it requires generating an entirely new unique index.
See notes on :meth:`DeferredDataFrame.reset_index`. It is recommended to
move the join key for one of your columns to the index to avoid this issue.
For an example see the enrich pipeline in
:mod:`apache_beam.examples.dataframe.taxiride`.
``how="cross"`` is not yet supported.
"""
self_proxy = self._expr.proxy()
right_proxy = right._expr.proxy()
# Validate with a pandas call.
_ = self_proxy.merge(
right_proxy,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
**kwargs)
if kwargs.get('how', None) == 'cross':
raise NotImplementedError("cross join is not yet implemented (BEAM-9547)")
if not any([on, left_on, right_on, left_index, right_index]):
on = [col for col in self_proxy.columns if col in right_proxy.columns]
if not left_on:
left_on = on
if left_on and not isinstance(left_on, list):
left_on = [left_on]
if not right_on:
right_on = on
if right_on and not isinstance(right_on, list):
right_on = [right_on]
if left_index:
indexed_left = self
else:
indexed_left = self.set_index(left_on, drop=False)
if right_index:
indexed_right = right
else:
indexed_right = right.set_index(right_on, drop=False)
if left_on and right_on:
common_cols = set(left_on).intersection(right_on)
if len(common_cols):
# When merging on the same column name from both dfs, we need to make
# sure only one df has the column. Otherwise we end up with
# two duplicate columns, one with lsuffix and one with rsuffix.
# It's safe to drop from either because the data has already been duped
# to the index.
indexed_right = indexed_right.drop(columns=common_cols)
merged = frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'merge',
lambda left, right: left.merge(right,
left_index=True,
right_index=True,
suffixes=suffixes,
**kwargs),
[indexed_left._expr, indexed_right._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Index()))
if left_index or right_index:
return merged
else:
return merged.reset_index(drop=True)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def nlargest(self, keep, **kwargs):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError(
f"nlargest(keep={keep!r}) is not supported because it is "
"order sensitive. Only keep=\"all\" is supported.",
reason="order-sensitive")
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nlargest-per-partition',
lambda df: df.nlargest(**kwargs),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nlargest',
lambda df: df.nlargest(**kwargs),
[per_partition],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def nsmallest(self, keep, **kwargs):
"""Only ``keep=False`` and ``keep="any"`` are supported. Other values of
``keep`` make this an order-sensitive operation. Note ``keep="any"`` is
a Beam-specific option that guarantees only one duplicate will be kept, but
unlike ``"first"`` and ``"last"`` it makes no guarantees about _which_
duplicate element is kept."""
if keep == 'any':
keep = 'first'
elif keep != 'all':
raise frame_base.WontImplementError(
f"nsmallest(keep={keep!r}) is not supported because it is "
"order sensitive. Only keep=\"all\" is supported.",
reason="order-sensitive")
kwargs['keep'] = keep
per_partition = expressions.ComputedExpression(
'nsmallest-per-partition',
lambda df: df.nsmallest(**kwargs),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
with expressions.allow_non_parallel_operations(True):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'nsmallest',
lambda df: df.nsmallest(**kwargs),
[per_partition],
preserves_partition_by=partitionings.Singleton(),
requires_partition_by=partitionings.Singleton()))
plot = frame_base.wont_implement_method(pd.DataFrame, 'plot',
reason="plotting-tools")
@frame_base.with_docs_from(pd.DataFrame)
def pop(self, item):
result = self[item]
self._expr = expressions.ComputedExpression(
'popped',
lambda df: df.drop(columns=[item]),
[self._expr],
preserves_partition_by=partitionings.Arbitrary(),
requires_partition_by=partitionings.Arbitrary())
return result
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def quantile(self, q, axis, **kwargs):
"""``quantile(axis="index")`` is not parallelizable. See
`BEAM-12167 <https://issues.apache.org/jira/browse/BEAM-12167>`_ tracking
the possible addition of an approximate, parallelizable implementation of
quantile.
When using quantile with ``axis="columns"`` only a single ``q`` value can be
specified."""
if axis in (1, 'columns'):
if isinstance(q, list):
raise frame_base.WontImplementError(
"quantile(axis=columns) with multiple q values is not supported "
"because it transposes the input DataFrame. Note computing "
"an individual quantile across columns (e.g. "
f"df.quantile(q={q[0]!r}, axis={axis!r}) is supported.",
reason="non-deferred-columns")
else:
requires = partitionings.Arbitrary()
else: # axis='index'
# TODO(BEAM-12167): Provide an option for approximate distributed
# quantiles
requires = partitionings.Singleton(reason=(
"Computing quantiles across index cannot currently be parallelized. "
"See BEAM-12167 tracking the possible addition of an approximate, "
"parallelizable implementation of quantile."
))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'quantile',
lambda df: df.quantile(q=q, axis=axis, **kwargs),
[self._expr],
requires_partition_by=requires,
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.maybe_inplace
def rename(self, **kwargs):
"""rename is not parallelizable when ``axis="index"`` and
``errors="raise"``. It requires collecting all data on a single
node in order to detect if one of the index values is missing."""
rename_index = (
'index' in kwargs
or kwargs.get('axis', None) in (0, 'index')
or ('columns' not in kwargs and 'axis' not in kwargs))
rename_columns = (
'columns' in kwargs
or kwargs.get('axis', None) in (1, 'columns'))
if rename_index:
# Technically, it's still partitioned by index, but it's no longer
# partitioned by the hash of the index.
preserves_partition_by = partitionings.Singleton()
else:
preserves_partition_by = partitionings.Index()
if kwargs.get('errors', None) == 'raise' and rename_index:
# TODO: We could do this in parallel by creating a ConstantExpression
# with a series created from the mapper dict. Then Index() partitioning
# would co-locate the necessary index values and we could raise
# individually within each partition. Execution time errors are
# discouraged anyway so probably not worth the effort.
requires_partition_by = partitionings.Singleton(reason=(
"rename(errors='raise', axis='index') requires collecting all "
"data on a single node in order to detect missing index values."
))
else:
requires_partition_by = partitionings.Arbitrary()
proxy = None
if rename_index:
# The proxy can't be computed by executing rename, it will error
# renaming the index.
if rename_columns:
# Note if both are being renamed, index and columns must be specified
# (not axis)
proxy = self._expr.proxy().rename(**{k: v for (k, v) in kwargs.items()
if not k == 'index'})
else:
# No change in columns, reuse proxy
proxy = self._expr.proxy()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'rename',
lambda df: df.rename(**kwargs),
[self._expr],
proxy=proxy,
preserves_partition_by=preserves_partition_by,
requires_partition_by=requires_partition_by))
rename_axis = frame_base._elementwise_method('rename_axis', base=pd.DataFrame)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def round(self, decimals, *args, **kwargs):
if isinstance(decimals, frame_base.DeferredFrame):
# Disallow passing a deferred Series in, our current partitioning model
# prevents us from using it correctly.
raise NotImplementedError("Passing a deferred series to round() is not "
"supported, please use a concrete pd.Series "
"instance or a dictionary")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'round',
lambda df: df.round(decimals, *args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Index()
)
)
select_dtypes = frame_base._elementwise_method('select_dtypes',
base=pd.DataFrame)
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def shift(self, axis, freq, **kwargs):
"""shift with ``axis="index" is only supported with ``freq`` specified and
``fill_value`` undefined. Other configurations make this operation
order-sensitive."""
if axis in (1, 'columns'):
preserves = partitionings.Arbitrary()
proxy = None
else:
if freq is None or 'fill_value' in kwargs:
fill_value = kwargs.get('fill_value', 'NOT SET')
raise frame_base.WontImplementError(
f"shift(axis={axis!r}) is only supported with freq defined, and "
f"fill_value undefined (got freq={freq!r},"
f"fill_value={fill_value!r}). Other configurations are sensitive "
"to the order of the data because they require populating shifted "
"rows with `fill_value`.",
reason="order-sensitive")
# proxy generation fails in pandas <1.2
# Seems due to https://github.com/pandas-dev/pandas/issues/14811,
# bug with shift on empty indexes.
# Fortunately the proxy should be identical to the input.
proxy = self._expr.proxy().copy()
# index is modified, so no partitioning is preserved.
preserves = partitionings.Singleton()
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'shift',
lambda df: df.shift(axis=axis, freq=freq, **kwargs),
[self._expr],
proxy=proxy,
preserves_partition_by=preserves,
requires_partition_by=partitionings.Arbitrary()))
shape = property(frame_base.wont_implement_method(
pd.DataFrame, 'shape', reason="non-deferred-result"))
stack = frame_base._elementwise_method('stack', base=pd.DataFrame)
all = _agg_method(pd.DataFrame, 'all')
any = _agg_method(pd.DataFrame, 'any')
count = _agg_method(pd.DataFrame, 'count')
describe = _agg_method(pd.DataFrame, 'describe')
max = _agg_method(pd.DataFrame, 'max')
min = _agg_method(pd.DataFrame, 'min')
prod = product = _agg_method(pd.DataFrame, 'prod')
sum = _agg_method(pd.DataFrame, 'sum')
mean = _agg_method(pd.DataFrame, 'mean')
median = _agg_method(pd.DataFrame, 'median')
nunique = _agg_method(pd.DataFrame, 'nunique')
std = _agg_method(pd.DataFrame, 'std')
var = _agg_method(pd.DataFrame, 'var')
take = frame_base.wont_implement_method(pd.DataFrame, 'take',
reason='deprecated')
to_records = frame_base.wont_implement_method(pd.DataFrame, 'to_records',
reason="non-deferred-result")
to_dict = frame_base.wont_implement_method(pd.DataFrame, 'to_dict',
reason="non-deferred-result")
to_numpy = frame_base.wont_implement_method(pd.DataFrame, 'to_numpy',
reason="non-deferred-result")
to_string = frame_base.wont_implement_method(pd.DataFrame, 'to_string',
reason="non-deferred-result")
to_sparse = frame_base.wont_implement_method(pd.DataFrame, 'to_sparse',
reason="non-deferred-result")
transpose = frame_base.wont_implement_method(
pd.DataFrame, 'transpose', reason='non-deferred-columns')
T = property(frame_base.wont_implement_method(
pd.DataFrame, 'T', reason='non-deferred-columns'))
@frame_base.with_docs_from(pd.DataFrame)
def unstack(self, *args, **kwargs):
"""unstack cannot be used on :class:`DeferredDataFrame` instances with
multiple index levels, because the columns in the output depend on the
data."""
if self._expr.proxy().index.nlevels == 1:
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'unstack',
lambda df: df.unstack(*args, **kwargs),
[self._expr],
requires_partition_by=partitionings.Index()))
else:
raise frame_base.WontImplementError(
"unstack() is not supported on DataFrames with a multiple indexes, "
"because the columns in the output depend on the input data.",
reason="non-deferred-columns")
update = frame_base._proxy_method(
'update',
inplace=True,
base=pd.DataFrame,
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary())
values = property(frame_base.wont_implement_method(
pd.DataFrame, 'values', reason="non-deferred-result"))
style = property(frame_base.wont_implement_method(
pd.DataFrame, 'style', reason="non-deferred-result"))
@frame_base.with_docs_from(pd.DataFrame)
@frame_base.args_to_kwargs(pd.DataFrame)
@frame_base.populate_defaults(pd.DataFrame)
def melt(self, ignore_index, **kwargs):
"""``ignore_index=True`` is not supported, because it requires generating an
order-sensitive index."""
if ignore_index:
raise frame_base.WontImplementError(
"melt(ignore_index=True) is order sensitive because it requires "
"generating a new index based on the order of the data.",
reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'melt',
lambda df: df.melt(ignore_index=False, **kwargs), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton()))
@frame_base.with_docs_from(pd.DataFrame)
def value_counts(self, subset=None, sort=False, normalize=False,
ascending=False):
"""``sort`` is ``False`` by default, and ``sort=True`` is not supported
because it imposes an ordering on the dataset which likely will not be
preserved."""
if sort:
raise frame_base.WontImplementError(
"value_counts(sort=True) is not supported because it imposes an "
"ordering on the dataset which likely will not be preserved.",
reason="order-sensitive")
columns = subset or list(self.columns)
result = self.groupby(columns).size()
if normalize:
return result/self.dropna().length()
else:
return result
for io_func in dir(io):
if io_func.startswith('to_'):
setattr(DeferredDataFrame, io_func, getattr(io, io_func))
setattr(DeferredSeries, io_func, getattr(io, io_func))
for meth in ('filter', ):
setattr(DeferredDataFrame, meth,
frame_base._elementwise_method(meth, base=pd.DataFrame))
@populate_not_implemented(DataFrameGroupBy)
class DeferredGroupBy(frame_base.DeferredFrame):
def __init__(self, expr, kwargs,
ungrouped: expressions.Expression[pd.core.generic.NDFrame],
ungrouped_with_index: expressions.Expression[pd.core.generic.NDFrame], # pylint: disable=line-too-long
grouping_columns,
grouping_indexes,
projection=None):
"""This object represents the result of::
ungrouped.groupby(level=[grouping_indexes + grouping_columns],
**kwargs)[projection]
:param expr: An expression to compute a pandas GroupBy object. Convenient
for unliftable aggregations.
:param ungrouped: An expression to compute the DataFrame pre-grouping, the
(Multi)Index contains only the grouping columns/indexes.
:param ungrouped_with_index: Same as ungrouped, except the index includes
all of the original indexes as well as any grouping columns. This is
important for operations that expose the original index, e.g. .apply(),
but we only use it when necessary to avoid unnessary data transfer and
GBKs.
:param grouping_columns: list of column labels that were in the original
groupby(..) ``by`` parameter. Only relevant for grouped DataFrames.
:param grouping_indexes: list of index names (or index level numbers) to be
grouped.
:param kwargs: Keywords args passed to the original groupby(..) call."""
super(DeferredGroupBy, self).__init__(expr)
self._ungrouped = ungrouped
self._ungrouped_with_index = ungrouped_with_index
self._projection = projection
self._grouping_columns = grouping_columns
self._grouping_indexes = grouping_indexes
self._kwargs = kwargs
if (self._kwargs.get('dropna', True) is False and
self._ungrouped.proxy().index.nlevels > 1):
raise NotImplementedError(
"dropna=False does not work as intended in the Beam DataFrame API "
"when grouping on multiple columns or indexes (See BEAM-12495).")
def __getattr__(self, name):
return DeferredGroupBy(
expressions.ComputedExpression(
'groupby_project',
lambda gb: getattr(gb, name), [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()),
self._kwargs,
self._ungrouped,
self._ungrouped_with_index,
self._grouping_columns,
self._grouping_indexes,
projection=name)
def __getitem__(self, name):
return DeferredGroupBy(
expressions.ComputedExpression(
'groupby_project',
lambda gb: gb[name], [self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()),
self._kwargs,
self._ungrouped,
self._ungrouped_with_index,
self._grouping_columns,
self._grouping_indexes,
projection=name)
@frame_base.with_docs_from(DataFrameGroupBy)
def agg(self, fn, *args, **kwargs):
if _is_associative(fn):
return _liftable_agg(fn)(self, *args, **kwargs)
elif _is_liftable_with_sum(fn):
return _liftable_agg(fn, postagg_meth='sum')(self, *args, **kwargs)
elif _is_unliftable(fn):
return _unliftable_agg(fn)(self, *args, **kwargs)
elif callable(fn):
return DeferredDataFrame(
expressions.ComputedExpression(
'agg',
lambda gb: gb.agg(fn, *args, **kwargs), [self._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Singleton()))
else:
raise NotImplementedError(f"GroupBy.agg(func={fn!r})")
@property
def ndim(self):
return self._expr.proxy().ndim
@frame_base.with_docs_from(DataFrameGroupBy)
def apply(self, func, *args, **kwargs):
"""Note that ``func`` will be called once during pipeline construction time
with an empty pandas object, so take care if ``func`` has a side effect.
When called with an empty pandas object, ``func`` is expected to return an
object of the same type as what will be returned when the pipeline is
processing actual data. If the result is a pandas object it should have the
same type and name (for a Series) or column types and names (for
a DataFrame) as the actual results."""
project = _maybe_project_func(self._projection)
grouping_indexes = self._grouping_indexes
grouping_columns = self._grouping_columns
# Unfortunately pandas does not execute func to determine the right proxy.
# We run user func on a proxy here to detect the return type and generate
# the proxy.
fn_input = project(self._ungrouped_with_index.proxy().reset_index(
grouping_columns, drop=True))
result = func(fn_input)
if isinstance(result, pd.core.generic.NDFrame):
if result.index is fn_input.index:
proxy = result
else:
proxy = result[:0]
def index_to_arrays(index):
return [index.get_level_values(level)
for level in range(index.nlevels)]
# The final result will have the grouped indexes + the indexes from the
# result
proxy.index = pd.MultiIndex.from_arrays(
index_to_arrays(self._ungrouped.proxy().index) +
index_to_arrays(proxy.index),
names=self._ungrouped.proxy().index.names + proxy.index.names)
else:
# The user fn returns some non-pandas type. The expected result is a
# Series where each element is the result of one user fn call.
dtype = pd.Series([result]).dtype
proxy = pd.Series([], dtype=dtype, index=self._ungrouped.proxy().index)
def do_partition_apply(df):
# Remove columns from index, we only needed them there for partitioning
df = df.reset_index(grouping_columns, drop=True)
gb = df.groupby(level=grouping_indexes or None,
by=grouping_columns or None)
gb = project(gb)
return gb.apply(func, *args, **kwargs)
return DeferredDataFrame(
expressions.ComputedExpression(
'apply',
do_partition_apply,
[self._ungrouped_with_index],
proxy=proxy,
requires_partition_by=partitionings.Index(grouping_indexes +
grouping_columns),
preserves_partition_by=partitionings.Index(grouping_indexes)))
@frame_base.with_docs_from(DataFrameGroupBy)
def transform(self, fn, *args, **kwargs):
"""Note that ``func`` will be called once during pipeline construction time
with an empty pandas object, so take care if ``func`` has a side effect.
When called with an empty pandas object, ``func`` is expected to return an
object of the same type as what will be returned when the pipeline is
processing actual data. The result should have the same type and name (for
a Series) or column types and names (for a DataFrame) as the actual
results."""
if not callable(fn):
raise NotImplementedError(
"String functions are not yet supported in transform.")
if self._grouping_columns and not self._projection:
grouping_columns = self._grouping_columns
def fn_wrapper(x, *args, **kwargs):
x = x.droplevel(grouping_columns)
return fn(x, *args, **kwargs)
else:
fn_wrapper = fn
project = _maybe_project_func(self._projection)
# pandas cannot execute fn to determine the right proxy.
# We run user fn on a proxy here to detect the return type and generate the
# proxy.
result = fn_wrapper(project(self._ungrouped_with_index.proxy()))
parent_frame = self._ungrouped.args()[0].proxy()
if isinstance(result, pd.core.generic.NDFrame):
proxy = result[:0]
else:
# The user fn returns some non-pandas type. The expected result is a
# Series where each element is the result of one user fn call.
dtype = pd.Series([result]).dtype
proxy = pd.Series([], dtype=dtype, name=project(parent_frame).name)
if not isinstance(self._projection, list):
proxy.name = self._projection
# The final result will have the original indexes
proxy.index = parent_frame.index
levels = self._grouping_indexes + self._grouping_columns
return DeferredDataFrame(
expressions.ComputedExpression(
'transform',
lambda df: project(df.groupby(level=levels)).transform(
fn_wrapper,
*args,
**kwargs).droplevel(self._grouping_columns),
[self._ungrouped_with_index],
proxy=proxy,
requires_partition_by=partitionings.Index(levels),
preserves_partition_by=partitionings.Index(self._grouping_indexes)))
@frame_base.with_docs_from(DataFrameGroupBy)
def filter(self, func=None, dropna=True):
if func is None or not callable(func):
raise TypeError("func must be specified and it must be callable")
def apply_fn(df):
if func(df):
return df
elif not dropna:
result = df.copy()
result.iloc[:, :] = np.nan
return result
else:
return df.iloc[:0]
return self.apply(apply_fn).droplevel(self._grouping_columns)
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def dtypes(self):
grouping_columns = self._grouping_columns
return self.apply(lambda df: df.drop(grouping_columns, axis=1).dtypes)
fillna = frame_base.wont_implement_method(
DataFrameGroupBy, 'fillna', explanation=(
"df.fillna() should be used instead. Only method=None is supported "
"because other methods are order-sensitive. df.groupby(..).fillna() "
"without a method is equivalent to df.fillna()."))
ffill = frame_base.wont_implement_method(DataFrameGroupBy, 'ffill',
reason="order-sensitive")
bfill = frame_base.wont_implement_method(DataFrameGroupBy, 'bfill',
reason="order-sensitive")
pad = frame_base.wont_implement_method(DataFrameGroupBy, 'pad',
reason="order-sensitive")
backfill = frame_base.wont_implement_method(DataFrameGroupBy, 'backfill',
reason="order-sensitive")
aggregate = agg
hist = frame_base.wont_implement_method(DataFrameGroupBy, 'hist',
reason="plotting-tools")
plot = frame_base.wont_implement_method(DataFrameGroupBy, 'plot',
reason="plotting-tools")
boxplot = frame_base.wont_implement_method(DataFrameGroupBy, 'boxplot',
reason="plotting-tools")
head = frame_base.wont_implement_method(
DataFrameGroupBy, 'head', explanation=_PEEK_METHOD_EXPLANATION)
tail = frame_base.wont_implement_method(
DataFrameGroupBy, 'tail', explanation=_PEEK_METHOD_EXPLANATION)
first = frame_base.not_implemented_method('first', base_type=DataFrameGroupBy)
last = frame_base.not_implemented_method('last', base_type=DataFrameGroupBy)
nth = frame_base.wont_implement_method(
DataFrameGroupBy, 'nth', reason='order-sensitive')
cumcount = frame_base.wont_implement_method(
DataFrameGroupBy, 'cumcount', reason='order-sensitive')
cummax = frame_base.wont_implement_method(
DataFrameGroupBy, 'cummax', reason='order-sensitive')
cummin = frame_base.wont_implement_method(
DataFrameGroupBy, 'cummin', reason='order-sensitive')
cumsum = frame_base.wont_implement_method(
DataFrameGroupBy, 'cumsum', reason='order-sensitive')
cumprod = frame_base.wont_implement_method(
DataFrameGroupBy, 'cumprod', reason='order-sensitive')
diff = frame_base.wont_implement_method(DataFrameGroupBy, 'diff',
reason='order-sensitive')
shift = frame_base.wont_implement_method(DataFrameGroupBy, 'shift',
reason='order-sensitive')
# TODO(BEAM-12169): Consider allowing this for categorical keys.
__len__ = frame_base.wont_implement_method(
DataFrameGroupBy, '__len__', reason="non-deferred-result")
groups = property(frame_base.wont_implement_method(
DataFrameGroupBy, 'groups', reason="non-deferred-result"))
indices = property(frame_base.wont_implement_method(
DataFrameGroupBy, 'indices', reason="non-deferred-result"))
resample = frame_base.wont_implement_method(
DataFrameGroupBy, 'resample', reason='event-time-semantics')
rolling = frame_base.wont_implement_method(
DataFrameGroupBy, 'rolling', reason='event-time-semantics')
def _maybe_project_func(projection: Optional[List[str]]):
""" Returns identity func if projection is empty or None, else returns
a function that projects the specified columns. """
if projection:
return lambda df: df[projection]
else:
return lambda x: x
def _liftable_agg(meth, postagg_meth=None):
agg_name, _ = frame_base.name_and_func(meth)
if postagg_meth is None:
post_agg_name = agg_name
else:
post_agg_name, _ = frame_base.name_and_func(postagg_meth)
@frame_base.with_docs_from(DataFrameGroupBy, name=agg_name)
def wrapper(self, *args, **kwargs):
assert isinstance(self, DeferredGroupBy)
if 'min_count' in kwargs:
return _unliftable_agg(meth)(self, *args, **kwargs)
to_group = self._ungrouped.proxy().index
is_categorical_grouping = any(to_group.get_level_values(i).is_categorical()
for i in self._grouping_indexes)
groupby_kwargs = self._kwargs
# Don't include un-observed categorical values in the preagg
preagg_groupby_kwargs = groupby_kwargs.copy()
preagg_groupby_kwargs['observed'] = True
project = _maybe_project_func(self._projection)
pre_agg = expressions.ComputedExpression(
'pre_combine_' + agg_name,
lambda df: getattr(
project(
df.groupby(level=list(range(df.index.nlevels)),
**preagg_groupby_kwargs)
),
agg_name)(**kwargs),
[self._ungrouped],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
post_agg = expressions.ComputedExpression(
'post_combine_' + post_agg_name,
lambda df: getattr(
df.groupby(level=list(range(df.index.nlevels)),
**groupby_kwargs),
post_agg_name)(**kwargs),
[pre_agg],
requires_partition_by=(partitionings.Singleton(reason=(
"Aggregations grouped by a categorical column are not currently "
"parallelizable (BEAM-11190)."
))
if is_categorical_grouping
else partitionings.Index()),
preserves_partition_by=partitionings.Arbitrary())
return frame_base.DeferredFrame.wrap(post_agg)
return wrapper
def _unliftable_agg(meth):
agg_name, _ = frame_base.name_and_func(meth)
@frame_base.with_docs_from(DataFrameGroupBy, name=agg_name)
def wrapper(self, *args, **kwargs):
assert isinstance(self, DeferredGroupBy)
to_group = self._ungrouped.proxy().index
is_categorical_grouping = any(to_group.get_level_values(i).is_categorical()
for i in self._grouping_indexes)
groupby_kwargs = self._kwargs
project = _maybe_project_func(self._projection)
post_agg = expressions.ComputedExpression(
agg_name,
lambda df: getattr(project(
df.groupby(level=list(range(df.index.nlevels)),
**groupby_kwargs),
), agg_name)(**kwargs),
[self._ungrouped],
requires_partition_by=(partitionings.Singleton(reason=(
"Aggregations grouped by a categorical column are not currently "
"parallelizable (BEAM-11190)."
))
if is_categorical_grouping
else partitionings.Index()),
# Some aggregation methods (e.g. corr/cov) add additional index levels.
# We only preserve the ones that existed _before_ the groupby.
preserves_partition_by=partitionings.Index(
list(range(self._ungrouped.proxy().index.nlevels))))
return frame_base.DeferredFrame.wrap(post_agg)
return wrapper
for meth in LIFTABLE_AGGREGATIONS:
setattr(DeferredGroupBy, meth, _liftable_agg(meth))
for meth in LIFTABLE_WITH_SUM_AGGREGATIONS:
setattr(DeferredGroupBy, meth, _liftable_agg(meth, postagg_meth='sum'))
for meth in UNLIFTABLE_AGGREGATIONS:
setattr(DeferredGroupBy, meth, _unliftable_agg(meth))
def _check_str_or_np_builtin(agg_func, func_list):
return agg_func in func_list or (
getattr(agg_func, '__name__', None) in func_list
and agg_func.__module__ in ('numpy', 'builtins'))
def _is_associative(agg_func):
return _check_str_or_np_builtin(agg_func, LIFTABLE_AGGREGATIONS)
def _is_liftable_with_sum(agg_func):
return _check_str_or_np_builtin(agg_func, LIFTABLE_WITH_SUM_AGGREGATIONS)
def _is_unliftable(agg_func):
return _check_str_or_np_builtin(agg_func, UNLIFTABLE_AGGREGATIONS)
NUMERIC_AGGREGATIONS = ['max', 'min', 'prod', 'sum', 'mean', 'median', 'std',
'var']
def _is_numeric(agg_func):
return _check_str_or_np_builtin(agg_func, NUMERIC_AGGREGATIONS)
@populate_not_implemented(DataFrameGroupBy)
class _DeferredGroupByCols(frame_base.DeferredFrame):
# It's not clear that all of these make sense in Pandas either...
agg = aggregate = frame_base._elementwise_method('agg', base=DataFrameGroupBy)
any = frame_base._elementwise_method('any', base=DataFrameGroupBy)
all = frame_base._elementwise_method('all', base=DataFrameGroupBy)
boxplot = frame_base.wont_implement_method(
DataFrameGroupBy, 'boxplot', reason="plotting-tools")
describe = frame_base.not_implemented_method('describe',
base_type=DataFrameGroupBy)
diff = frame_base._elementwise_method('diff', base=DataFrameGroupBy)
fillna = frame_base._elementwise_method('fillna', base=DataFrameGroupBy)
filter = frame_base._elementwise_method('filter', base=DataFrameGroupBy)
first = frame_base._elementwise_method('first', base=DataFrameGroupBy)
get_group = frame_base._elementwise_method('get_group', base=DataFrameGroupBy)
head = frame_base.wont_implement_method(
DataFrameGroupBy, 'head', explanation=_PEEK_METHOD_EXPLANATION)
hist = frame_base.wont_implement_method(
DataFrameGroupBy, 'hist', reason="plotting-tools")
idxmax = frame_base._elementwise_method('idxmax', base=DataFrameGroupBy)
idxmin = frame_base._elementwise_method('idxmin', base=DataFrameGroupBy)
last = frame_base._elementwise_method('last', base=DataFrameGroupBy)
mad = frame_base._elementwise_method('mad', base=DataFrameGroupBy)
max = frame_base._elementwise_method('max', base=DataFrameGroupBy)
mean = frame_base._elementwise_method('mean', base=DataFrameGroupBy)
median = frame_base._elementwise_method('median', base=DataFrameGroupBy)
min = frame_base._elementwise_method('min', base=DataFrameGroupBy)
nunique = frame_base._elementwise_method('nunique', base=DataFrameGroupBy)
plot = frame_base.wont_implement_method(
DataFrameGroupBy, 'plot', reason="plotting-tools")
prod = frame_base._elementwise_method('prod', base=DataFrameGroupBy)
quantile = frame_base._elementwise_method('quantile', base=DataFrameGroupBy)
shift = frame_base._elementwise_method('shift', base=DataFrameGroupBy)
size = frame_base._elementwise_method('size', base=DataFrameGroupBy)
skew = frame_base._elementwise_method('skew', base=DataFrameGroupBy)
std = frame_base._elementwise_method('std', base=DataFrameGroupBy)
sum = frame_base._elementwise_method('sum', base=DataFrameGroupBy)
tail = frame_base.wont_implement_method(
DataFrameGroupBy, 'tail', explanation=_PEEK_METHOD_EXPLANATION)
take = frame_base.wont_implement_method(
DataFrameGroupBy, 'take', reason='deprecated')
tshift = frame_base._elementwise_method('tshift', base=DataFrameGroupBy)
var = frame_base._elementwise_method('var', base=DataFrameGroupBy)
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def groups(self):
return self._expr.proxy().groups
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def indices(self):
return self._expr.proxy().indices
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def ndim(self):
return self._expr.proxy().ndim
@property # type: ignore
@frame_base.with_docs_from(DataFrameGroupBy)
def ngroups(self):
return self._expr.proxy().ngroups
@populate_not_implemented(pd.core.indexes.base.Index)
class _DeferredIndex(object):
def __init__(self, frame):
self._frame = frame
@property
def names(self):
return self._frame._expr.proxy().index.names
@names.setter
def names(self, value):
def set_index_names(df):
df = df.copy()
df.index.names = value
return df
self._frame._expr = expressions.ComputedExpression(
'set_index_names',
set_index_names,
[self._frame._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary())
@property
def name(self):
return self._frame._expr.proxy().index.name
@name.setter
def name(self, value):
self.names = [value]
@property
def ndim(self):
return self._frame._expr.proxy().index.ndim
@property
def dtype(self):
return self._frame._expr.proxy().index.dtype
@property
def nlevels(self):
return self._frame._expr.proxy().index.nlevels
def __getattr__(self, name):
raise NotImplementedError('index.%s' % name)
@populate_not_implemented(pd.core.indexing._LocIndexer)
class _DeferredLoc(object):
def __init__(self, frame):
self._frame = frame
def __getitem__(self, key):
if isinstance(key, tuple):
rows, cols = key
return self[rows][cols]
elif isinstance(key, list) and key and isinstance(key[0], bool):
# Aligned by numerical key.
raise NotImplementedError(type(key))
elif isinstance(key, list):
# Select rows, but behaves poorly on missing values.
raise NotImplementedError(type(key))
elif isinstance(key, slice):
args = [self._frame._expr]
func = lambda df: df.loc[key]
elif isinstance(key, frame_base.DeferredFrame):
func = lambda df, key: df.loc[key]
if pd.core.dtypes.common.is_bool_dtype(key._expr.proxy()):
# Boolean indexer, just pass it in as-is
args = [self._frame._expr, key._expr]
else:
# Likely a DeferredSeries of labels, overwrite the key's index with it's
# values so we can colocate them with the labels they're selecting
def data_to_index(s):
s = s.copy()
s.index = s
return s
reindexed_expr = expressions.ComputedExpression(
'data_to_index',
data_to_index,
[key._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Singleton(),
)
args = [self._frame._expr, reindexed_expr]
elif callable(key):
def checked_callable_key(df):
computed_index = key(df)
if isinstance(computed_index, tuple):
row_index, _ = computed_index
else:
row_index = computed_index
if isinstance(row_index, list) and row_index and isinstance(
row_index[0], bool):
raise NotImplementedError(type(row_index))
elif not isinstance(row_index, (slice, pd.Series)):
raise NotImplementedError(type(row_index))
return computed_index
args = [self._frame._expr]
func = lambda df: df.loc[checked_callable_key]
else:
raise NotImplementedError(type(key))
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'loc',
func,
args,
requires_partition_by=(
partitionings.Index()
if len(args) > 1
else partitionings.Arbitrary()),
preserves_partition_by=partitionings.Arbitrary()))
__setitem__ = frame_base.not_implemented_method(
'loc.setitem', base_type=pd.core.indexing._LocIndexer)
@populate_not_implemented(pd.core.indexing._iLocIndexer)
class _DeferredILoc(object):
def __init__(self, frame):
self._frame = frame
def __getitem__(self, index):
if isinstance(index, tuple):
rows, _ = index
if rows != slice(None, None, None):
raise frame_base.WontImplementError(
"Using iloc to select rows is not supported because it's "
"position-based indexing is sensitive to the order of the data.",
reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'iloc',
lambda df: df.iloc[index],
[self._frame._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
else:
raise frame_base.WontImplementError(
"Using iloc to select rows is not supported because it's "
"position-based indexing is sensitive to the order of the data.",
reason="order-sensitive")
def __setitem__(self, index, value):
raise frame_base.WontImplementError(
"Using iloc to mutate a frame is not supported because it's "
"position-based indexing is sensitive to the order of the data.",
reason="order-sensitive")
class _DeferredStringMethods(frame_base.DeferredBase):
@frame_base.with_docs_from(pd.core.strings.StringMethods)
@frame_base.args_to_kwargs(pd.core.strings.StringMethods)
@frame_base.populate_defaults(pd.core.strings.StringMethods)
def cat(self, others, join, **kwargs):
"""If defined, ``others`` must be a :class:`DeferredSeries` or a ``list`` of
``DeferredSeries``."""
if others is None:
# Concatenate series into a single String
requires = partitionings.Singleton(reason=(
"cat(others=None) concatenates all data in a Series into a single "
"string, so it requires collecting all data on a single node."
))
func = lambda df: df.str.cat(join=join, **kwargs)
args = [self._expr]
elif (isinstance(others, frame_base.DeferredBase) or
(isinstance(others, list) and
all(isinstance(other, frame_base.DeferredBase) for other in others))):
if isinstance(others, frame_base.DeferredBase):
others = [others]
requires = partitionings.Index()
def func(*args):
return args[0].str.cat(others=args[1:], join=join, **kwargs)
args = [self._expr] + [other._expr for other in others]
else:
raise frame_base.WontImplementError(
"others must be None, DeferredSeries, or List[DeferredSeries] "
f"(encountered {type(others)}). Other types are not supported "
"because they make this operation sensitive to the order of the "
"data.", reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'cat',
func,
args,
requires_partition_by=requires,
preserves_partition_by=partitionings.Arbitrary()))
@frame_base.with_docs_from(pd.core.strings.StringMethods)
@frame_base.args_to_kwargs(pd.core.strings.StringMethods)
def repeat(self, repeats):
"""``repeats`` must be an ``int`` or a :class:`DeferredSeries`. Lists are
not supported because they make this operation order-sensitive."""
if isinstance(repeats, int):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series: series.str.repeat(repeats),
[self._expr],
# TODO(BEAM-11155): Defer to pandas to compute this proxy.
# Currently it incorrectly infers dtype bool, may require upstream
# fix.
proxy=self._expr.proxy(),
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, frame_base.DeferredBase):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'repeat',
lambda series, repeats_series: series.str.repeat(repeats_series),
[self._expr, repeats._expr],
# TODO(BEAM-11155): Defer to pandas to compute this proxy.
# Currently it incorrectly infers dtype bool, may require upstream
# fix.
proxy=self._expr.proxy(),
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif isinstance(repeats, list):
raise frame_base.WontImplementError(
"str.repeat(repeats=) repeats must be an int or a DeferredSeries. "
"Lists are not supported because they make this operation sensitive "
"to the order of the data.", reason="order-sensitive")
else:
raise TypeError("str.repeat(repeats=) value must be an int or a "
f"DeferredSeries (encountered {type(repeats)}).")
get_dummies = frame_base.wont_implement_method(
pd.core.strings.StringMethods, 'get_dummies',
reason='non-deferred-columns')
split = frame_base.wont_implement_method(
pd.core.strings.StringMethods, 'split',
reason='non-deferred-columns')
rsplit = frame_base.wont_implement_method(
pd.core.strings.StringMethods, 'rsplit',
reason='non-deferred-columns')
ELEMENTWISE_STRING_METHODS = [
'capitalize',
'casefold',
'contains',
'count',
'endswith',
'extract',
'extractall',
'findall',
'fullmatch',
'get',
'isalnum',
'isalpha',
'isdecimal',
'isdigit',
'islower',
'isnumeric',
'isspace',
'istitle',
'isupper',
'join',
'len',
'lower',
'lstrip',
'match',
'pad',
'partition',
'replace',
'rpartition',
'rstrip',
'slice',
'slice_replace',
'startswith',
'strip',
'swapcase',
'title',
'upper',
'wrap',
'zfill',
'__getitem__',
]
def make_str_func(method):
def func(df, *args, **kwargs):
try:
df_str = df.str
except AttributeError:
# If there's a non-string value in a Series passed to .str method, pandas
# will generally just replace it with NaN in the result. However if
# there are _only_ non-string values, pandas will raise:
#
# AttributeError: Can only use .str accessor with string values!
#
# This can happen to us at execution time if we split a partition that is
# only non-strings. This branch just replaces all those values with NaN
# in that case.
return df.map(lambda _: np.nan)
else:
return getattr(df_str, method)(*args, **kwargs)
return func
for method in ELEMENTWISE_STRING_METHODS:
setattr(_DeferredStringMethods,
method,
frame_base._elementwise_method(make_str_func(method),
name=method,
base=pd.core.strings.StringMethods))
def make_cat_func(method):
def func(df, *args, **kwargs):
return getattr(df.cat, method)(*args, **kwargs)
return func
class _DeferredCategoricalMethods(frame_base.DeferredBase):
@property # type: ignore
@frame_base.with_docs_from(pd.core.arrays.categorical.CategoricalAccessor)
def categories(self):
return self._expr.proxy().cat.categories
@property # type: ignore
@frame_base.with_docs_from(pd.core.arrays.categorical.CategoricalAccessor)
def ordered(self):
return self._expr.proxy().cat.ordered
@property # type: ignore
@frame_base.with_docs_from(pd.core.arrays.categorical.CategoricalAccessor)
def codes(self):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'codes',
lambda s: s.cat.codes,
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary(),
)
)
remove_unused_categories = frame_base.wont_implement_method(
pd.core.arrays.categorical.CategoricalAccessor,
'remove_unused_categories', reason="non-deferred-columns")
ELEMENTWISE_CATEGORICAL_METHODS = [
'add_categories',
'as_ordered',
'as_unordered',
'remove_categories',
'rename_categories',
'reorder_categories',
'set_categories',
]
for method in ELEMENTWISE_CATEGORICAL_METHODS:
setattr(_DeferredCategoricalMethods,
method,
frame_base._elementwise_method(
make_cat_func(method), name=method,
base=pd.core.arrays.categorical.CategoricalAccessor))
class _DeferredDatetimeMethods(frame_base.DeferredBase):
@property # type: ignore
@frame_base.with_docs_from(pd.core.indexes.accessors.DatetimeProperties)
def tz(self):
return self._expr.proxy().dt.tz
@property # type: ignore
@frame_base.with_docs_from(pd.core.indexes.accessors.DatetimeProperties)
def freq(self):
return self._expr.proxy().dt.freq
@frame_base.with_docs_from(pd.core.indexes.accessors.DatetimeProperties)
def tz_localize(self, *args, ambiguous='infer', **kwargs):
"""``ambiguous`` cannot be set to ``"infer"`` as its semantics are
order-sensitive. Similarly, specifying ``ambiguous`` as an
:class:`~numpy.ndarray` is order-sensitive, but you can achieve similar
functionality by specifying ``ambiguous`` as a Series."""
if isinstance(ambiguous, np.ndarray):
raise frame_base.WontImplementError(
"tz_localize(ambiguous=ndarray) is not supported because it makes "
"this operation sensitive to the order of the data. Please use a "
"DeferredSeries instead.",
reason="order-sensitive")
elif isinstance(ambiguous, frame_base.DeferredFrame):
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda s,
ambiguous: s.dt.tz_localize(*args, ambiguous=ambiguous, **kwargs),
[self._expr, ambiguous._expr],
requires_partition_by=partitionings.Index(),
preserves_partition_by=partitionings.Arbitrary()))
elif ambiguous == 'infer':
# infer attempts to infer based on the order of the timestamps
raise frame_base.WontImplementError(
f"tz_localize(ambiguous={ambiguous!r}) is not allowed because it "
"makes this operation sensitive to the order of the data.",
reason="order-sensitive")
return frame_base.DeferredFrame.wrap(
expressions.ComputedExpression(
'tz_localize',
lambda s: s.dt.tz_localize(*args, ambiguous=ambiguous, **kwargs),
[self._expr],
requires_partition_by=partitionings.Arbitrary(),
preserves_partition_by=partitionings.Arbitrary()))
to_period = frame_base.wont_implement_method(
pd.core.indexes.accessors.DatetimeProperties, 'to_period',
reason="event-time-semantics")
to_pydatetime = frame_base.wont_implement_method(
pd.core.indexes.accessors.DatetimeProperties, 'to_pydatetime',
reason="non-deferred-result")
to_pytimedelta = frame_base.wont_implement_method(
pd.core.indexes.accessors.DatetimeProperties, 'to_pytimedelta',
reason="non-deferred-result")
def make_dt_property(method):
def func(df):
return getattr(df.dt, method)
return func
def make_dt_func(method):
def func(df, *args, **kwargs):
return getattr(df.dt, method)(*args, **kwargs)
return func
ELEMENTWISE_DATETIME_METHODS = [
'ceil',
'day_name',
'month_name',
'floor',
'isocalendar',
'round',
'normalize',
'strftime',
'tz_convert',
]
for method in ELEMENTWISE_DATETIME_METHODS:
setattr(_DeferredDatetimeMethods,
method,
frame_base._elementwise_method(
make_dt_func(method),
name=method,
base=pd.core.indexes.accessors.DatetimeProperties))
ELEMENTWISE_DATETIME_PROPERTIES = [
'date',
'day',
'dayofweek',
'dayofyear',
'days_in_month',
'daysinmonth',
'hour',
'is_leap_year',
'is_month_end',
'is_month_start',
'is_quarter_end',
'is_quarter_start',
'is_year_end',
'is_year_start',
'microsecond',
'minute',
'month',
'nanosecond',
'quarter',
'second',
'time',
'timetz',
'week',
'weekday',
'weekofyear',
'year',
]
for method in ELEMENTWISE_DATETIME_PROPERTIES:
setattr(_DeferredDatetimeMethods,
method,
property(frame_base._elementwise_method(
make_dt_property(method),
name=method,
base=pd.core.indexes.accessors.DatetimeProperties)))
for base in ['add',
'sub',
'mul',
'div',
'truediv',
'floordiv',
'mod',
'divmod',
'pow',
'and',
'or']:
for p in ['%s', 'r%s', '__%s__', '__r%s__']:
# TODO: non-trivial level?
name = p % base
if hasattr(pd.Series, name):
setattr(
DeferredSeries,
name,
frame_base._elementwise_method(name, restrictions={'level': None},
base=pd.Series))
if hasattr(pd.DataFrame, name):
setattr(
DeferredDataFrame,
name,
frame_base._elementwise_method(name, restrictions={'level': None},
base=pd.DataFrame))
inplace_name = '__i%s__' % base
if hasattr(pd.Series, inplace_name):
setattr(
DeferredSeries,
inplace_name,
frame_base._elementwise_method(inplace_name, inplace=True,
base=pd.Series))
if hasattr(pd.DataFrame, inplace_name):
setattr(
DeferredDataFrame,
inplace_name,
frame_base._elementwise_method(inplace_name, inplace=True,
base=pd.DataFrame))
for name in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
for p in '%s', '__%s__':
# Note that non-underscore name is used for both as the __xxx__ methods are
# order-sensitive.
setattr(DeferredSeries, p % name,
frame_base._elementwise_method(name, base=pd.Series))
setattr(DeferredDataFrame, p % name,
frame_base._elementwise_method(name, base=pd.DataFrame))
for name in ['__neg__', '__pos__', '__invert__']:
setattr(DeferredSeries, name,
frame_base._elementwise_method(name, base=pd.Series))
setattr(DeferredDataFrame, name,
frame_base._elementwise_method(name, base=pd.DataFrame))
DeferredSeries.multiply = DeferredSeries.mul # type: ignore
DeferredDataFrame.multiply = DeferredDataFrame.mul # type: ignore
def _slice_parts(s):
yield s.start
yield s.stop
yield s.step
def _is_null_slice(s):
return isinstance(s, slice) and all(x is None for x in _slice_parts(s))
def _is_integer_slice(s):
return isinstance(s, slice) and all(
x is None or isinstance(x, int)
for x in _slice_parts(s)) and not _is_null_slice(s)
| apache-2.0 |
sorgerlab/ramm_tox | ramm_tox/imaging_viability_correlation.py | 1 | 2626 | import operator
import collections
import pandas as pd
import numpy as np
from ramm_tox import util, stats
def round_concentration(values):
"""Round concentration values to 4 decimal places in log space."""
return 10 ** np.round(np.log10(values), 4)
def strings_to_wordsets(strings, stop_words=None):
"""Build a dict of wordsets from a list of strings, with optional filter.
For each distinct word found in the list of strings, the wordset dict will
map that word to a set of the strings that contain it. A list of words to
ignore may be passed in stop_words.
"""
string_words = [set(w.split(' ')) for w in (s.lower() for s in strings)]
words = reduce(operator.or_, string_words)
if stop_words:
words -= set(stop_words)
wordsets = collections.OrderedDict(
(w, set(strings[i] for i, s in enumerate(string_words) if w in s))
for w in sorted(words))
return wordsets
util.init_paths()
viability_path = util.data_path.child('CellViability_Combined_mean&variance2.xlsx')
imaging_path = util.data_path.child('HCI_Rep1-4_zscores.csv')
viability_data = pd.read_excel(viability_path, 0)
vd_filter_no_media = ~viability_data.name.str.lower().str.startswith('medium')
viability_data = viability_data[vd_filter_no_media]
vd_filter_72h = viability_data['time [h]'] == 72
viability_data = viability_data[vd_filter_72h].drop('time [h]', axis=1)
viability_data.dose = round_concentration(viability_data.dose)
imaging_data = pd.read_csv(imaging_path, encoding='utf-8')
imaging_filter_no_media = ~imaging_data.pert_iname.str.lower().str.startswith('medium')
imaging_data = imaging_data[imaging_filter_no_media]
imaging_data.pert_dose = round_concentration(imaging_data.pert_dose)
viability_single = viability_data[viability_data.name == 'Omeprazole'] \
[['dose', 'average']].pivot_table(index='dose').average
imaging_single = imaging_data[imaging_data.pert_iname == 'Omeprazole'] \
.drop('pert_iname', axis=1).rename(columns=({'pert_dose': 'dose'})) \
.pivot_table(index='dose', columns='Timepoint [h]')
corr, p = stats.pearsonr(imaging_single.values.T, viability_single.values)
fake_genes = [x[0] + ' t=' + unicode(x[1]) + 'h' for x in imaging_single.columns]
fake_genes = [s.replace('(2)-', '(2) -') for s in fake_genes]
wordsets = strings_to_wordsets(fake_genes, stop_words=['', '-'])
with open('genesets.gmt', 'w') as f:
gmt_rows = ('\t'.join([w, ''] + list(ss)) for w, ss in wordsets.items())
f.write('\n'.join(gmt_rows).encode('utf-8'))
rnk_data = pd.Series(corr, index=fake_genes)
rnk_data.to_csv('data.rnk', sep='\t', encoding='utf8')
| mit |
Blaffie/Hello-world | Programs/Firkant profil program.py | 1 | 5693 | #Program utregning av bøyningsspenning i Firkantprofil
import math
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from tkinter import * #Brukes til GUI
F = 1200 #N
lengde_start = 0
lengde_slutt = 2500 #mm
max_bøyespenning = 200 #N/mm2
#variabler
lengde_mellom = int(lengde_slutt / 10)
w_x_firkantprofil = ()
w_x_rør = ()
w_y_rør = ()
x_main = [0, ]
y_main = [0, ]
y_main_copy = []
x_main_copy = []
firkantprofil_dim_main = [0, ]
overskrift_graff = ""
def Firkantprofil_hull ():
global w_x_firkantprofil
global Størelse_profil
global overskrift_graff
global t
overskrift_graff = "Firkantprofil"
Størelse_profil = ()
t = 3
firkant_max_size = 100
firkant_min_size = 20
faktor = int(firkant_max_size / 10)
for Størelse_profil in range (firkant_min_size, firkant_max_size+faktor, 10):
B = Størelse_profil
H = B
b = B - (t*2)
h = H - (t*2)
w_x= ((B*H**3) - (b * h **3)) / (6 * H)
print ("B Størelse på firkantprofil: " + str(B))
print ("H Størelse på firkantprofil: " + str(H))
print ("Tykkelse på firkantprofil: " + str(t))
print ()
print("wx Firkantprofil: " + str(round(w_x, 2)))
print ()
utregning_sigma_b(w_x)
firkantprofil_dim_main.append (Størelse_profil)
def utregning_sigma_b (w_x):
global x_main_copy
global y_main_copy
lengde = lengde_start
for lengde in range (lengde_start, (lengde_slutt+lengde_mellom),lengde_mellom):
Mb = F * lengde
sigma_b = Mb / w_x
x_main.append(lengde)
y_main.append (sigma_b)
print ("sigmabøy:" + str(round(sigma_b, 2) ) + " N/mm2. "
+ "lengde: " + str(lengde) + " mm")
lengde += lengde_mellom
print ()
def Lag_Graff():
y_max = max_bøyespenning
#liste over kordinater
x1 = []
y1 = []
x2 = []
y2 = []
x3 = []
y3 = []
x4 = []
y4 = []
x5 = []
y5 = []
x6 = []
y6 = []
x7 = []
y7 = []
x8 = []
y8 = []
x9 = []
y9 = []
x10 = []
y10 = []
range_min = 1
range_max = 12
for x in range (range_min, range_max):
x1.append ( x_main.pop())
y1.append ( y_main.pop())
for x in range (range_min, range_max):
x2.append ( x_main.pop())
y2.append ( y_main.pop())
for x in range (range_min, range_max):
x3.append ( x_main.pop())
y3.append ( y_main.pop())
for x in range (range_min, range_max):
x4.append ( x_main.pop())
y4.append ( y_main.pop())
for x in range (range_min, range_max):
x5.append ( x_main.pop())
y5.append ( y_main.pop())
for x in range (range_min, range_max):
x6.append ( x_main.pop())
y6.append ( y_main.pop())
for x in range (range_min, range_max):
x7.append ( x_main.pop())
y7.append ( y_main.pop())
for x in range (range_min, range_max):
x8.append ( x_main.pop())
y8.append ( y_main.pop())
for x in range (range_min, range_max):
x9.append ( x_main.pop())
y9.append ( y_main.pop())
"""
for x in range (1, 11):
x10.append ( x_main.pop())
y10.append ( y_main.pop())
"""
style.use("seaborn-dark")
fig = plt.figure()
ax1 = fig.add_subplot(211)
plt.xlabel("Lengde i mm")
plt.ylabel("Sigma bøy N/mm^2")
plt.title("Oversikt over " + overskrift_graff)
firkantprofil_dim_main.reverse()
ax1.plot(x1, y1, label = firkantprofil_dim_main[0],linewidth=2, color = "#ff00ff") #rosa
ax1.plot(x2, y2, label = firkantprofil_dim_main[1],linewidth=2, color = "#20e251") #lyse grønn
ax1.plot(x3, y3, label = firkantprofil_dim_main[2],linewidth=2, color = "#20a129") #grønn
ax1.plot(x4, y4, label = firkantprofil_dim_main[3],linewidth=2, color = "#3e18e2") #blå
ax1.plot(x5, y5, label = firkantprofil_dim_main[4],linewidth=2, color = "#e23e18") #orange
ax1.plot(x6, y6, label = firkantprofil_dim_main[5],linewidth=2, color = "#14ded2") #cyan
ax1.plot(x7, y7, label = firkantprofil_dim_main[6],linewidth=2, color = "#efff00") #gull
ax1.plot(x8, y8, label = firkantprofil_dim_main[7],linewidth=2, color = "#52114d") #lilla
ax1.plot(x9, y9, label = firkantprofil_dim_main[8],linewidth=2, color = "#147151") #mørke grønn
#ax1.legend()
ax1.legend(bbox_to_anchor=(0., -0.27 , 1., .102), loc=2,
ncol=5, borderaxespad=0.)
#Text nedde til venstre
ax1.text(0, -(y_max * 0.15),
"Fargekoder på dimmensjon av " + overskrift_graff +
" i mm. Med en tykkelse på " + str(t) + "mm", fontsize=15)
#max min aksene
ax1.set_ylim([0, y_max])
plt.grid(True)
plt.show()
def Lag_GUI():
class Window (Frame):
def __init__(self, master=None):
Frame.__init__(self, master=None)
self.master = master
self.init_window()
def init_window(self):
self.master.title("Firkant profil utregninger")
self.pack(fill=BOTH, expand=1)
menu = Menu(self.master)
self.master.config(menu=menu)
file = Menu(menu)
file.add_command(label = "Lag graf", command = Lag_Graff )
file.add_command(label = "Avslut", command = self.client_exit)
menu.add_cascade(label="Valg", menu=file)
def client_exit(self):
exit()
root = Tk()
# size of the windwow
root.geometry("400x300")
app = Window(root)
root.mainloop()
Firkantprofil_hull()
Lag_GUI()
#Lag_Graff()
| mit |
justincassidy/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
Visual-analitics-2015/-VAST-Challenge-2015-Mini-Challenge-1 | Atraction_Analysis.py | 1 | 2168 |
# coding: utf-8
# # Análisis de comportamiento de una atracción
# Este análisis es de que cuantas personas ingresan a una atracción en un rango de tiempo especifico.
# **Importar Librerias**
# In[2]:
import pandas as pd
get_ipython().magic(u'matplotlib inline')
import seaborn as sns
from matplotlib import pyplot as plt
import numpy as np
sns.set_style("darkgrid")
# **Lectura de Datos**
# In[3]:
cd /Users/Sebastian/Google\ Drive/Uniandes/Visual\ Analytics/Mini_chalange1/MC1\ 2015\ Data/
# In[4]:
data = pd.read_csv("park-movement-Fri.csv")
# **Transformar los datos de tiempo**
# In[5]:
data["time"] = pd.to_datetime(data.Timestamp, format="%Y-%m-%d %H:%M:%S")
# **Identificar checkins**
# In[6]:
checkins = data.loc[data["type"]=="check-in"]
# In[40]:
movements = data.loc[data["type"]=="movement"]
# **Establecer un rango de tiempo**
# In[86]:
time = (checkins["time"] >= '2014-06-06 08:00:00') & (checkins["time"] <= '2014-06-06 23:59:59')
# **Buscar datos checkins en el rango**
# In[87]:
checkins_by_time = checkins.loc[time]
# **Crear una clase para trabajar con cada atracción**
# In[88]:
class Atraccion(object):
def __init__(self,X,Y,Visitors):
self.X = X
self.Y = Y
self.Visitors = Visitors
self.Visits = Visitors["id"].count()
self.Wait_Time = 0
# **Llenar los objetos de Atracción**
# In[89]:
checkins_by_range = checkins_by_time.groupby(['X', 'Y'])
Atracciones = []
for key,group in checkins_by_range:
Atracciones.append(Atraccion(key[0],key[1],group))
# In[ ]:
for atrac in Atracciones:
wt=0
print "Atraccion"
for index, per in atrac.Visitors.iterrows():
t_i = per["time"]
t_f = movements.loc[(movements["id"] == per["id"]) & (movements["time"] >t_i)]["time"].min()
t_w = (t_f - t_i) / np.timedelta64(1,'m')
wt = wt + t_w
atrac.Wait_Time = wt / atrac.Visits
# In[93]:
for Atr in Atracciones:
print str("X: ")+str(Atr.X)
print str("Y: ")+str(Atr.Y)
print str("Visitantes: ")+str(Atr.Visits)
print str("Tiempo de espera: ")+str(Atr.Wait_Time)
# In[91]:
len(Atracciones)
# In[ ]:
| mit |
hitszxp/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 21 | 4761 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the boostrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
sumspr/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
xubenben/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
wzbozon/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/dask/array/routines.py | 2 | 38268 | from __future__ import division, print_function, absolute_import
import inspect
import math
import warnings
from distutils.version import LooseVersion
from functools import wraps, partial
from numbers import Real, Integral
import numpy as np
from toolz import concat, sliding_window, interleave
from .. import sharedict
from ..compatibility import Iterable
from ..core import flatten
from ..base import tokenize
from ..utils import funcname
from . import chunk
from .creation import arange
from .utils import safe_wraps, validate_axis
from .wrap import ones
from .ufunc import multiply
from .core import (Array, map_blocks, elemwise, from_array, asarray,
asanyarray, concatenate, stack, atop, broadcast_shapes,
is_scalar_for_elemwise, broadcast_to, tensordot_lookup)
from .einsumfuncs import einsum # noqa
@wraps(np.array)
def array(x, dtype=None, ndmin=None):
while ndmin is not None and x.ndim < ndmin:
x = x[None, :]
if dtype is not None and x.dtype != dtype:
x = x.astype(dtype)
return x
@wraps(np.result_type)
def result_type(*args):
args = [a if is_scalar_for_elemwise(a) else a.dtype for a in args]
return np.result_type(*args)
@wraps(np.atleast_3d)
def atleast_3d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None, None, None]
elif x.ndim == 1:
x = x[None, :, None]
elif x.ndim == 2:
x = x[:, :, None]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@wraps(np.atleast_2d)
def atleast_2d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None, None]
elif x.ndim == 1:
x = x[None, :]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@wraps(np.atleast_1d)
def atleast_1d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@wraps(np.vstack)
def vstack(tup):
tup = tuple(atleast_2d(x) for x in tup)
return concatenate(tup, axis=0)
@wraps(np.hstack)
def hstack(tup):
if all(x.ndim == 1 for x in tup):
return concatenate(tup, axis=0)
else:
return concatenate(tup, axis=1)
@wraps(np.dstack)
def dstack(tup):
tup = tuple(atleast_3d(x) for x in tup)
return concatenate(tup, axis=2)
@wraps(np.swapaxes)
def swapaxes(a, axis1, axis2):
if axis1 == axis2:
return a
if axis1 < 0:
axis1 = axis1 + a.ndim
if axis2 < 0:
axis2 = axis2 + a.ndim
ind = list(range(a.ndim))
out = list(ind)
out[axis1], out[axis2] = axis2, axis1
return atop(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2,
dtype=a.dtype)
@wraps(np.transpose)
def transpose(a, axes=None):
if axes:
if len(axes) != a.ndim:
raise ValueError("axes don't match array")
else:
axes = tuple(range(a.ndim))[::-1]
axes = tuple(d + a.ndim if d < 0 else d for d in axes)
return atop(np.transpose, axes, a, tuple(range(a.ndim)),
dtype=a.dtype, axes=axes)
def flip(m, axis):
"""
Reverse element order along axis.
Parameters
----------
axis : int
Axis to reverse element order of.
Returns
-------
reversed array : ndarray
"""
m = asanyarray(m)
sl = m.ndim * [slice(None)]
try:
sl[axis] = slice(None, None, -1)
except IndexError:
raise ValueError(
"`axis` of %s invalid for %s-D array" % (str(axis), str(m.ndim))
)
sl = tuple(sl)
return m[sl]
@wraps(np.flipud)
def flipud(m):
return flip(m, 0)
@wraps(np.fliplr)
def fliplr(m):
return flip(m, 1)
alphabet = 'abcdefghijklmnopqrstuvwxyz'
ALPHABET = alphabet.upper()
def _tensordot(a, b, axes):
x = max([a, b], key=lambda x: x.__array_priority__)
tensordot = tensordot_lookup.dispatch(type(x))
# workaround may be removed when numpy version (currently 1.13.0) is bumped
a_dims = np.array([a.shape[i] for i in axes[0]])
b_dims = np.array([b.shape[i] for i in axes[1]])
if len(a_dims) > 0 and (a_dims == b_dims).all() and a_dims.min() == 0:
x = np.zeros(tuple([s for i, s in enumerate(a.shape) if i not in axes[0]] +
[s for i, s in enumerate(b.shape) if i not in axes[1]]))
else:
x = tensordot(a, b, axes=axes)
ind = [slice(None, None)] * x.ndim
for a in sorted(axes[0]):
ind.insert(a, None)
x = x[tuple(ind)]
return x
@wraps(np.tensordot)
def tensordot(lhs, rhs, axes=2):
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, Integral):
left_axes = (left_axes,)
if isinstance(right_axes, Integral):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
dt = np.promote_types(lhs.dtype, rhs.dtype)
left_index = list(alphabet[:lhs.ndim])
right_index = list(ALPHABET[:rhs.ndim])
out_index = left_index + right_index
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
right_index[r] = left_index[l]
intermediate = atop(_tensordot, out_index,
lhs, left_index,
rhs, right_index, dtype=dt,
axes=(left_axes, right_axes))
result = intermediate.sum(axis=left_axes)
return result
@wraps(np.dot)
def dot(a, b):
return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))
@wraps(np.vdot)
def vdot(a, b):
return dot(a.conj().ravel(), b.ravel())
@wraps(np.matmul)
def matmul(a, b):
a = asanyarray(a)
b = asanyarray(b)
if a.ndim == 0 or b.ndim == 0:
raise ValueError("`matmul` does not support scalars.")
a_is_1d = False
if a.ndim == 1:
a_is_1d = True
a = a[np.newaxis, :]
b_is_1d = False
if b.ndim == 1:
b_is_1d = True
b = b[:, np.newaxis]
if a.ndim < b.ndim:
a = a[(b.ndim - a.ndim) * (np.newaxis,)]
elif a.ndim > b.ndim:
b = b[(a.ndim - b.ndim) * (np.newaxis,)]
out = atop(
np.matmul, tuple(range(1, a.ndim + 1)),
a, tuple(range(1, a.ndim - 1)) + (a.ndim - 1, 0,),
b, tuple(range(1, a.ndim - 1)) + (0, a.ndim,),
dtype=result_type(a, b),
concatenate=True
)
if a_is_1d:
out = out[..., 0, :]
if b_is_1d:
out = out[..., 0]
return out
@wraps(np.outer)
def outer(a, b):
a = a.flatten()
b = b.flatten()
dtype = np.outer(a.dtype.type(), b.dtype.type()).dtype
return atop(np.outer, "ij", a, "i", b, "j", dtype=dtype)
def _inner_apply_along_axis(arr,
func1d,
func1d_axis,
func1d_args,
func1d_kwargs):
return np.apply_along_axis(
func1d, func1d_axis, arr, *func1d_args, **func1d_kwargs
)
@wraps(np.apply_along_axis)
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
arr = asarray(arr)
# Validate and normalize axis.
arr.shape[axis]
axis = len(arr.shape[:axis])
# Test out some data with the function.
test_data = np.ones((1,), dtype=arr.dtype)
test_result = np.array(func1d(test_data, *args, **kwargs))
if (LooseVersion(np.__version__) < LooseVersion("1.13.0") and
(np.array(test_result.shape) > 1).sum(dtype=int) > 1):
raise ValueError(
"No more than one non-trivial dimension allowed in result. "
"Need NumPy 1.13.0+ for this functionality."
)
# Rechunk so that func1d is applied over the full axis.
arr = arr.rechunk(
arr.chunks[:axis] + (arr.shape[axis:axis + 1],) + arr.chunks[axis + 1:]
)
# Map func1d over the data to get the result
# Adds other axes as needed.
result = arr.map_blocks(
_inner_apply_along_axis,
name=funcname(func1d) + '-along-axis',
dtype=test_result.dtype,
chunks=(arr.chunks[:axis] + test_result.shape + arr.chunks[axis + 1:]),
drop_axis=axis,
new_axis=list(range(axis, axis + test_result.ndim, 1)),
func1d=func1d,
func1d_axis=axis,
func1d_args=args,
func1d_kwargs=kwargs,
)
return result
@wraps(np.apply_over_axes)
def apply_over_axes(func, a, axes):
# Validate arguments
a = asarray(a)
try:
axes = tuple(axes)
except TypeError:
axes = (axes,)
sl = a.ndim * (slice(None),)
# Compute using `apply_along_axis`.
result = a
for i in axes:
result = apply_along_axis(func, i, result, 0)
# Restore original dimensionality or error.
if result.ndim == (a.ndim - 1):
result = result[sl[:i] + (None,)]
elif result.ndim != a.ndim:
raise ValueError(
"func must either preserve dimensionality of the input"
" or reduce it by one."
)
return result
@wraps(np.ptp)
def ptp(a, axis=None):
return a.max(axis=axis) - a.min(axis=axis)
@wraps(np.diff)
def diff(a, n=1, axis=-1):
a = asarray(a)
n = int(n)
axis = int(axis)
sl_1 = a.ndim * [slice(None)]
sl_2 = a.ndim * [slice(None)]
sl_1[axis] = slice(1, None)
sl_2[axis] = slice(None, -1)
sl_1 = tuple(sl_1)
sl_2 = tuple(sl_2)
r = a
for i in range(n):
r = r[sl_1] - r[sl_2]
return r
@wraps(np.ediff1d)
def ediff1d(ary, to_end=None, to_begin=None):
ary = asarray(ary)
aryf = ary.flatten()
r = aryf[1:] - aryf[:-1]
r = [r]
if to_begin is not None:
r = [asarray(to_begin).flatten()] + r
if to_end is not None:
r = r + [asarray(to_end).flatten()]
r = concatenate(r)
return r
def _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):
"""
x: nd-array
array of one block
coord: 1d-array or scalar
coordinate along which the gradient is computed.
axis: int
axis along which the gradient is computed
array_locs:
actual location along axis. None if coordinate is scalar
grad_kwargs:
keyword to be passed to np.gradient
"""
block_loc = block_id[axis]
if array_locs is not None:
coord = coord[array_locs[0][block_loc]:array_locs[1][block_loc]]
grad = np.gradient(x, coord, axis=axis, **grad_kwargs)
return grad
@wraps(np.gradient)
def gradient(f, *varargs, **kwargs):
f = asarray(f)
kwargs["edge_order"] = math.ceil(kwargs.get("edge_order", 1))
if kwargs["edge_order"] > 2:
raise ValueError("edge_order must be less than or equal to 2.")
drop_result_list = False
axis = kwargs.pop("axis", None)
if axis is None:
axis = tuple(range(f.ndim))
elif isinstance(axis, Integral):
drop_result_list = True
axis = (axis,)
axis = validate_axis(axis, f.ndim)
if len(axis) != len(set(axis)):
raise ValueError("duplicate axes not allowed")
axis = tuple(ax % f.ndim for ax in axis)
if varargs == ():
varargs = (1,)
if len(varargs) == 1:
varargs = len(axis) * varargs
if len(varargs) != len(axis):
raise TypeError(
"Spacing must either be a single scalar, or a scalar / 1d-array "
"per axis"
)
if issubclass(f.dtype.type, (np.bool8, Integral)):
f = f.astype(float)
elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:
f = f.astype(float)
results = []
for i, ax in enumerate(axis):
for c in f.chunks[ax]:
if np.min(c) < kwargs["edge_order"] + 1:
raise ValueError(
'Chunk size must be larger than edge_order + 1. '
'Minimum chunk for aixs {} is {}. Rechunk to '
'proceed.'.format(np.min(c), ax))
if np.isscalar(varargs[i]):
array_locs = None
else:
if isinstance(varargs[i], Array):
raise NotImplementedError(
'dask array coordinated is not supported.')
# coordinate position for each block taking overlap into account
chunk = np.array(f.chunks[ax])
array_loc_stop = np.cumsum(chunk) + 1
array_loc_start = array_loc_stop - chunk - 2
array_loc_stop[-1] -= 1
array_loc_start[0] = 0
array_locs = (array_loc_start, array_loc_stop)
results.append(f.map_overlap(
_gradient_kernel,
dtype=f.dtype,
depth={j: 1 if j == ax else 0 for j in range(f.ndim)},
boundary="none",
coord=varargs[i],
axis=ax,
array_locs=array_locs,
grad_kwargs=kwargs,
))
if drop_result_list:
results = results[0]
return results
@wraps(np.bincount)
def bincount(x, weights=None, minlength=None):
if minlength is None:
raise TypeError("Must specify minlength argument in da.bincount")
assert x.ndim == 1
if weights is not None:
assert weights.chunks == x.chunks
# Call np.bincount on each block, possibly with weights
token = tokenize(x, weights, minlength)
name = 'bincount-' + token
if weights is not None:
dsk = {(name, i): (np.bincount, (x.name, i), (weights.name, i), minlength)
for i, _ in enumerate(x.__dask_keys__())}
dtype = np.bincount([1], weights=[1]).dtype
else:
dsk = {(name, i): (np.bincount, (x.name, i), None, minlength)
for i, _ in enumerate(x.__dask_keys__())}
dtype = np.bincount([]).dtype
# Sum up all of the intermediate bincounts per block
name = 'bincount-sum-' + token
dsk[(name, 0)] = (np.sum, list(dsk), 0)
chunks = ((minlength,),)
dsk = sharedict.merge((name, dsk), x.dask)
if weights is not None:
dsk.update(weights.dask)
return Array(dsk, name, chunks, dtype)
@wraps(np.digitize)
def digitize(a, bins, right=False):
bins = np.asarray(bins)
dtype = np.digitize([0], bins, right=False).dtype
return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)
def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):
"""
Blocked variant of :func:`numpy.histogram`.
Follows the signature of :func:`numpy.histogram` exactly with the following
exceptions:
- Either an iterable specifying the ``bins`` or the number of ``bins``
and a ``range`` argument is required as computing ``min`` and ``max``
over blocked arrays is an expensive operation that must be performed
explicitly.
- ``weights`` must be a dask.array.Array with the same block structure
as ``a``.
Examples
--------
Using number of bins and range:
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array(np.arange(10000), chunks=10)
>>> h, bins = da.histogram(x, bins=10, range=[0, 10000])
>>> bins
array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000.,
8000., 9000., 10000.])
>>> h.compute()
array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])
Explicitly specifying the bins:
>>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))
>>> bins
array([ 0, 5000, 10000])
>>> h.compute()
array([5000, 5000])
"""
if bins is None or (range is None and bins is None):
raise ValueError('dask.array.histogram requires either bins '
'or bins and range to be defined.')
if weights is not None and weights.chunks != a.chunks:
raise ValueError('Input array and weights must have the same '
'chunked structure')
if not np.iterable(bins):
bin_token = bins
mn, mx = range
if mn == mx:
mn -= 0.5
mx += 0.5
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else:
bin_token = bins
token = tokenize(a, bin_token, range, normed, weights, density)
nchunks = len(list(flatten(a.__dask_keys__())))
chunks = ((1,) * nchunks, (len(bins) - 1,))
name = 'histogram-sum-' + token
# Map the histogram to all bins
def block_hist(x, range=None, weights=None):
return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]
if weights is None:
dsk = {(name, i, 0): (block_hist, k, range)
for i, k in enumerate(flatten(a.__dask_keys__()))}
dtype = np.histogram([])[0].dtype
else:
a_keys = flatten(a.__dask_keys__())
w_keys = flatten(weights.__dask_keys__())
dsk = {(name, i, 0): (block_hist, k, range, w)
for i, (k, w) in enumerate(zip(a_keys, w_keys))}
dtype = weights.dtype
all_dsk = sharedict.merge(a.dask, (name, dsk))
if weights is not None:
all_dsk.update(weights.dask)
mapped = Array(all_dsk, name, chunks, dtype=dtype)
n = mapped.sum(axis=0)
# We need to replicate normed and density options from numpy
if density is not None:
if density:
db = from_array(np.diff(bins).astype(float), chunks=n.chunks)
return n / db / n.sum(), bins
else:
return n, bins
else:
# deprecated, will be removed from Numpy 2.0
if normed:
db = from_array(np.diff(bins).astype(float), chunks=n.chunks)
return n / (n * db).sum(), bins
else:
return n, bins
@wraps(np.cov)
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
# This was copied almost verbatim from np.cov
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X = X - X.mean(axis=1 - axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
@wraps(np.corrcoef)
def corrcoef(x, y=None, rowvar=1):
from .ufunc import sqrt
from .creation import diag
c = cov(x, y, rowvar)
if c.shape == ():
return c / c
d = diag(c)
d = d.reshape((d.shape[0], 1))
sqr_d = sqrt(d)
return (c / sqr_d) / sqr_d.T
@wraps(np.round)
def round(a, decimals=0):
return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)
def _unique_internal(ar, indices, counts, return_inverse=False):
"""
Helper/wrapper function for :func:`numpy.unique`.
Uses :func:`numpy.unique` to find the unique values for the array chunk.
Given this chunk may not represent the whole array, also take the
``indices`` and ``counts`` that are in 1-to-1 correspondence to ``ar``
and reduce them in the same fashion as ``ar`` is reduced. Namely sum
any counts that correspond to the same value and take the smallest
index that corresponds to the same value.
To handle the inverse mapping from the unique values to the original
array, simply return a NumPy array created with ``arange`` with enough
values to correspond 1-to-1 to the unique values. While there is more
work needed to be done to create the full inverse mapping for the
original array, this provides enough information to generate the
inverse mapping in Dask.
Given Dask likes to have one array returned from functions like
``atop``, some formatting is done to stuff all of the resulting arrays
into one big NumPy structured array. Dask is then able to handle this
object and can split it apart into the separate results on the Dask
side, which then can be passed back to this function in concatenated
chunks for further reduction or can be return to the user to perform
other forms of analysis.
By handling the problem in this way, it does not matter where a chunk
is in a larger array or how big it is. The chunk can still be computed
on the same way. Also it does not matter if the chunk is the result of
other chunks being run through this function multiple times. The end
result will still be just as accurate using this strategy.
"""
return_index = (indices is not None)
return_counts = (counts is not None)
u = np.unique(ar)
dt = [("values", u.dtype)]
if return_index:
dt.append(("indices", np.intp))
if return_inverse:
dt.append(("inverse", np.intp))
if return_counts:
dt.append(("counts", np.intp))
r = np.empty(u.shape, dtype=dt)
r["values"] = u
if return_inverse:
r["inverse"] = np.arange(len(r), dtype=np.intp)
if return_index or return_counts:
for i, v in enumerate(r["values"]):
m = (ar == v)
if return_index:
indices[m].min(keepdims=True, out=r["indices"][i:i + 1])
if return_counts:
counts[m].sum(keepdims=True, out=r["counts"][i:i + 1])
return r
@wraps(np.unique)
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
ar = ar.ravel()
# Run unique on each chunk and collect results in a Dask Array of
# unknown size.
args = [ar, "i"]
out_dtype = [("values", ar.dtype)]
if return_index:
args.extend([
arange(ar.shape[0], dtype=np.intp, chunks=ar.chunks[0]),
"i"
])
out_dtype.append(("indices", np.intp))
else:
args.extend([None, None])
if return_counts:
args.extend([
ones((ar.shape[0],), dtype=np.intp, chunks=ar.chunks[0]),
"i"
])
out_dtype.append(("counts", np.intp))
else:
args.extend([None, None])
out = atop(
_unique_internal, "i",
*args,
dtype=out_dtype,
return_inverse=False
)
out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)
# Take the results from the unique chunks and do the following.
#
# 1. Collect all results as arguments.
# 2. Concatenate each result into one big array.
# 3. Pass all results as arguments to the internal unique again.
#
# TODO: This should be replaced with a tree reduction using this strategy.
# xref: https://github.com/dask/dask/issues/2851
out_parts = [out["values"]]
if return_index:
out_parts.append(out["indices"])
else:
out_parts.append(None)
if return_counts:
out_parts.append(out["counts"])
else:
out_parts.append(None)
name = 'unique-aggregate-' + out.name
dsk = {
(name, 0): (
(_unique_internal,) +
tuple(
(np.concatenate, o. __dask_keys__())
if hasattr(o, "__dask_keys__") else o
for o in out_parts
) +
(return_inverse,)
)
}
out_dtype = [("values", ar.dtype)]
if return_index:
out_dtype.append(("indices", np.intp))
if return_inverse:
out_dtype.append(("inverse", np.intp))
if return_counts:
out_dtype.append(("counts", np.intp))
out = Array(
sharedict.merge(*(
[(name, dsk)] +
[o.dask for o in out_parts if hasattr(o, "__dask_keys__")]
)),
name,
((np.nan,),),
out_dtype
)
# Split out all results to return to the user.
result = [out["values"]]
if return_index:
result.append(out["indices"])
if return_inverse:
# Using the returned unique values and arange of unknown length, find
# each value matching a unique value and replace it with its
# corresponding index or `0`. There should be only one entry for this
# index in axis `1` (the one of unknown length). Reduce axis `1`
# through summing to get an array with known dimensionality and the
# mapping of the original values.
mtches = (ar[:, None] == out["values"][None, :]).astype(np.intp)
result.append((mtches * out["inverse"]).sum(axis=1))
if return_counts:
result.append(out["counts"])
if len(result) == 1:
result = result[0]
else:
result = tuple(result)
return result
def _isin_kernel(element, test_elements, assume_unique=False):
values = np.in1d(element.ravel(), test_elements,
assume_unique=assume_unique)
return values.reshape(element.shape + (1,) * test_elements.ndim)
@safe_wraps(getattr(np, 'isin', None))
def isin(element, test_elements, assume_unique=False, invert=False):
element = asarray(element)
test_elements = asarray(test_elements)
element_axes = tuple(range(element.ndim))
test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))
mapped = atop(_isin_kernel, element_axes + test_axes,
element, element_axes,
test_elements, test_axes,
adjust_chunks={axis: lambda _: 1 for axis in test_axes},
dtype=bool,
assume_unique=assume_unique)
result = mapped.any(axis=test_axes)
if invert:
result = ~result
return result
@wraps(np.roll)
def roll(array, shift, axis=None):
result = array
if axis is None:
result = ravel(result)
if not isinstance(shift, Integral):
raise TypeError(
"Expect `shift` to be an instance of Integral"
" when `axis` is None."
)
shift = (shift,)
axis = (0,)
else:
try:
len(shift)
except TypeError:
shift = (shift,)
try:
len(axis)
except TypeError:
axis = (axis,)
if len(shift) != len(axis):
raise ValueError("Must have the same number of shifts as axes.")
for i, s in zip(axis, shift):
s = -s
s %= result.shape[i]
sl1 = result.ndim * [slice(None)]
sl2 = result.ndim * [slice(None)]
sl1[i] = slice(s, None)
sl2[i] = slice(None, s)
sl1 = tuple(sl1)
sl2 = tuple(sl2)
result = concatenate([result[sl1], result[sl2]], axis=i)
result = result.reshape(array.shape)
return result
@wraps(np.ravel)
def ravel(array):
return array.reshape((-1,))
@wraps(np.squeeze)
def squeeze(a, axis=None):
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
elif not isinstance(axis, tuple):
axis = (axis,)
if any(a.shape[i] != 1 for i in axis):
raise ValueError("cannot squeeze axis with size other than one")
axis = validate_axis(axis, a.ndim)
sl = tuple(0 if i in axis else slice(None) for i, s in enumerate(a.shape))
return a[sl]
@wraps(np.compress)
def compress(condition, a, axis=None):
if axis is None:
a = a.ravel()
axis = 0
axis = validate_axis(axis, a.ndim)
# Only coerce non-lazy values to numpy arrays
if not isinstance(condition, Array):
condition = np.array(condition, dtype=bool)
if condition.ndim != 1:
raise ValueError("Condition must be one dimensional")
if isinstance(condition, Array):
if len(condition) < a.shape[axis]:
a = a[tuple(slice(None, len(condition))
if i == axis else slice(None)
for i in range(a.ndim))]
inds = tuple(range(a.ndim))
out = atop(np.compress, inds, condition, (inds[axis],), a, inds,
axis=axis, dtype=a.dtype)
out._chunks = tuple((np.NaN,) * len(c) if i == axis else c
for i, c in enumerate(out.chunks))
return out
else:
# Optimized case when condition is known
if len(condition) < a.shape[axis]:
condition = condition.copy()
condition.resize(a.shape[axis])
slc = ((slice(None),) * axis + (condition, ) +
(slice(None),) * (a.ndim - axis - 1))
return a[slc]
@wraps(np.extract)
def extract(condition, arr):
if not isinstance(condition, Array):
condition = np.array(condition, dtype=bool)
return compress(condition.ravel(), arr.ravel())
@wraps(np.take)
def take(a, indices, axis=0):
axis = validate_axis(axis, a.ndim)
if isinstance(a, np.ndarray) and isinstance(indices, Array):
return _take_dask_array_from_numpy(a, indices, axis)
else:
return a[(slice(None),) * axis + (indices,)]
def _take_dask_array_from_numpy(a, indices, axis):
assert isinstance(a, np.ndarray)
assert isinstance(indices, Array)
return indices.map_blocks(lambda block: np.take(a, block, axis),
chunks=indices.chunks,
dtype=a.dtype)
@wraps(np.around)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def _asarray_isnull(values):
import pandas as pd
return np.asarray(pd.isnull(values))
def isnull(values):
""" pandas.isnull for dask arrays """
# eagerly raise ImportError, if pandas isn't available
import pandas as pd # noqa
return elemwise(_asarray_isnull, values, dtype='bool')
def notnull(values):
""" pandas.notnull for dask arrays """
return ~isnull(values)
@wraps(np.isclose)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(np.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype='bool')
@wraps(np.allclose)
def allclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
return isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=equal_nan).all()
def variadic_choose(a, *choices):
return np.choose(a, choices)
@wraps(np.choose)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
def _isnonzero_vec(v):
return bool(np.count_nonzero(v))
_isnonzero_vec = np.vectorize(_isnonzero_vec, otypes=[bool])
def isnonzero(a):
try:
np.zeros(tuple(), dtype=a.dtype).astype(bool)
except ValueError:
######################################################
# Handle special cases where conversion to bool does #
# not work correctly. #
# #
# xref: https://github.com/numpy/numpy/issues/9479 #
######################################################
return a.map_blocks(_isnonzero_vec, dtype=bool)
else:
return a.astype(bool)
@wraps(np.argwhere)
def argwhere(a):
from .creation import indices
a = asarray(a)
nz = isnonzero(a).flatten()
ind = indices(a.shape, dtype=np.intp, chunks=a.chunks)
if ind.ndim > 1:
ind = stack([ind[i].ravel() for i in range(len(ind))], axis=1)
ind = compress(nz, ind, axis=0)
return ind
@wraps(np.where)
def where(condition, x=None, y=None):
if (x is None) != (y is None):
raise ValueError("either both or neither of x and y should be given")
if (x is None) and (y is None):
return nonzero(condition)
if np.isscalar(condition):
dtype = result_type(x, y)
x = asarray(x)
y = asarray(y)
shape = broadcast_shapes(x.shape, y.shape)
out = x if condition else y
return broadcast_to(out, shape).astype(dtype)
else:
return elemwise(np.where, condition, x, y)
@wraps(np.count_nonzero)
def count_nonzero(a, axis=None):
return isnonzero(asarray(a)).astype(np.intp).sum(axis=axis)
@wraps(np.flatnonzero)
def flatnonzero(a):
return argwhere(asarray(a).ravel())[:, 0]
@wraps(np.nonzero)
def nonzero(a):
ind = argwhere(a)
if ind.ndim > 1:
return tuple(ind[:, i] for i in range(ind.shape[1]))
else:
return (ind,)
def _int_piecewise(x, *condlist, **kwargs):
return np.piecewise(
x, list(condlist), kwargs["funclist"],
*kwargs["func_args"], **kwargs["func_kw"]
)
@wraps(np.piecewise)
def piecewise(x, condlist, funclist, *args, **kw):
return map_blocks(
_int_piecewise,
x, *condlist,
dtype=x.dtype,
name="piecewise",
funclist=funclist, func_args=args, func_kw=kw
)
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes, trim_excess=False):
if (not trim_excess and
not all(bd % div == 0 for i, div in axes.items()
for bd in x.chunks[i])):
msg = "Coarsening factor does not align with block dimensions"
raise ValueError(msg)
if 'dask' in inspect.getfile(reduction):
reduction = getattr(np, reduction.__name__)
name = 'coarsen-' + tokenize(reduction, x, axes, trim_excess)
dsk = {(name,) + key[1:]: (chunk.coarsen, reduction, key, axes, trim_excess)
for key in flatten(x.__dask_keys__())}
chunks = tuple(tuple(int(bd // axes.get(i, 1)) for bd in bds)
for i, bds in enumerate(x.chunks))
dt = reduction(np.empty((1,) * x.ndim, dtype=x.dtype)).dtype
return Array(sharedict.merge(x.dask, (name, dsk)), name, chunks, dtype=dt)
def split_at_breaks(array, breaks, axis=0):
""" Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@wraps(np.insert)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
axis = validate_axis(axis,arr.ndim)
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
'da.insert only implemented for monotonic ``obj`` argument')
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, 'ndim', 0) == 0:
# we need to turn values into a dask array
name = 'values-' + tokenize(values)
dtype = getattr(values, 'dtype', type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(len(obj) if axis == n else s
for n, s in enumerate(arr.shape))
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd)
in enumerate(zip(arr.chunks,
values.chunks)))
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@wraps(np.average)
def average(a, axis=None, weights=None, returned=False):
# This was minimally modified from numpy.average
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
a = asanyarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size / avg.size)
else:
wgt = asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = broadcast_to(wgt, (a.ndim - 1) * (1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=result_dtype)
avg = multiply(a, wgt, dtype=result_dtype).sum(axis) / scl
if returned:
if scl.shape != avg.shape:
scl = broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
| gpl-3.0 |
Dbastos1710/binary_fibonacci | columns.py | 1 | 1424 | import numpy as np
def fib_iterative(n):
a, b = 0, 1
while n > 0:
a, b = b, a + b
n -= 1
return a
def format_bin(number):
return format(number, '080b')
n = 100
fib_list = [fib_iterative(i) for i in range(n + 1)]
fib_list_bin = [format_bin(i) for i in fib_list]
fib_list_bin_splited = []
for bin_number in fib_list_bin:
bin_list = []
for i in bin_number:
bin_list.append(int(i))
fib_list_bin_splited.append(bin_list)
splited_binary_table = np.array(fib_list_bin_splited)
def get_column(binary_table, column_num):
# numero total de colunas. -1 pois shape retorna o total, e com -1 a gente considera a primeira coluna como 0
total_columns = binary_table.shape[1] - 1
# total_columns - column_num inverte o indice de consulta na matriz
column = binary_table[:, total_columns - column_num]
# np.trim_zeros(column, 'f') remove os zeros da frente da linha
return np.trim_zeros(column, 'f')
def get_first_digits_from_column(binary_table, column_num):
column = get_column(binary_table, column_num)
column_bin_number = 2 ** column_num
total_patterns_to_get = 3 * column_bin_number
return column[:total_patterns_to_get]
line_patterns = get_first_digits_from_column(splited_binary_table, 4)
x = np.arange(len(line_patterns))
y = line_patterns
# Plot Tabela Binaria
import matplotlib.pyplot as plt
plt.bar(x, y, width=0.6)
plt.show()
| mit |
sjvasquez/AIChallenge | skip-thought/eval_trec.py | 2 | 3280 | '''
Evaluation code for the TREC dataset
'''
import numpy as np
import skipthoughts
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import KFold
from sklearn.utils import shuffle
def evaluate(model, k=10, seed=1234, evalcv=True, evaltest=False):
"""
Run experiment
k: number of CV folds
test: whether to evaluate on test set
"""
print 'Preparing data...'
traintext, testtext = load_data()
train, train_labels = prepare_data(traintext)
test, test_labels = prepare_data(testtext)
train_labels = prepare_labels(train_labels)
test_labels = prepare_labels(test_labels)
train, train_labels = shuffle(train, train_labels, random_state=seed)
print 'Computing training skipthoughts...'
trainF = skipthoughts.encode(model, train, verbose=False, use_eos=False)
if evalcv:
print 'Running cross-validation...'
interval = [2**t for t in range(0,9,1)] # coarse-grained
C = eval_kfold(trainF, train_labels, k=k, scan=interval, seed=seed)
if evaltest:
if not evalcv:
C = 128 # Best parameter found from CV
print 'Computing testing skipthoughts...'
testF = skipthoughts.encode(model, test, verbose=False, use_eos=False)
print 'Evaluating...'
clf = LogisticRegression(C=C)
clf.fit(trainF, train_labels)
yhat = clf.predict(testF)
print 'Test accuracy: ' + str(clf.score(testF, test_labels))
def load_data(loc='./data/'):
"""
Load the TREC question-type dataset
"""
train, test = [], []
with open(loc + 'train_5500.label', 'rb') as f:
for line in f:
train.append(line.strip())
with open(loc + 'TREC_10.label', 'rb') as f:
for line in f:
test.append(line.strip())
return train, test
def prepare_data(text):
"""
Prepare data
"""
labels = [t.split()[0] for t in text]
labels = [l.split(':')[0] for l in labels]
X = [t.split()[1:] for t in text]
X = [' '.join(t) for t in X]
return X, labels
def prepare_labels(labels):
"""
Process labels to numerical values
"""
d = {}
count = 0
setlabels = set(labels)
for w in setlabels:
d[w] = count
count += 1
idxlabels = np.array([d[w] for w in labels])
return idxlabels
def eval_kfold(features, labels, k=10, scan=[2**t for t in range(0,9,1)], seed=1234):
"""
Perform k-fold cross validation
"""
npts = len(features)
kf = KFold(npts, n_folds=k, random_state=seed)
scores = []
for s in scan:
scanscores = []
for train, test in kf:
# Split data
X_train = features[train]
y_train = labels[train]
X_test = features[test]
y_test = labels[test]
# Train classifier
clf = LogisticRegression(C=s)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
scanscores.append(score)
print (s, score)
# Append mean score
scores.append(np.mean(scanscores))
print scores
# Get the index of the best score
s_ind = np.argmax(scores)
s = scan[s_ind]
print (s_ind, s)
return s
| mit |
diydrones/ardupilot | Tools/LogAnalyzer/tests/TestOptFlow.py | 32 | 14968 | from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_TYPE to 10 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
MIT-LCP/mimic-code | mimic-cxr/dcm/export_metadata.py | 1 | 5987 | # This script sxtracts meta-data from DICOMs and place it into two files:
# (1) for sequence data, output it as a json
# (2) for tabular data, output it as a CSV (readable by pandas)
# Note that the function does *not* output values if they are longer than 100 characters.
# This avoids outputting look up tables.
import os
import argparse
import sys
import gzip
from pathlib import Path
import json
import pandas as pd
from tqdm import tqdm
import pydicom
parser = argparse.ArgumentParser(description='Extract meta-data from DICOMs')
parser.add_argument('--data', '-d',
default='./files',
help='path to DICOM format images')
parser.add_argument('--out', '-o', default='dicom-metadata.csv.gz',
help=('name out dataframe output, '
'(default: dicom-metadata.csv.gz), '
'note: this is a compressed format.'))
parser.add_argument('--json', '-j', default=None,
help=('name of the output json file, '
'(default: <output-stem>.json)'))
parser.add_argument('--number', '-n', type=int, default=None,
help=('limit the number of DICOMs to process '
' (default: None).'))
def recurse(ds):
"""
Recurses through sequences and adds them to a dictionary
Does not save elements longer than 100 elements, but
notes their existence in the final dictionary.
"""
tmp_dict = dict()
for elem in ds:
if elem.VR == 'SQ':
# do not include look up tables
if 'LUT' not in elem.name:
[recurse(item) for item in elem]
else:
e = elem.tag.group << 16 | elem.tag.element
# Save element value to a dictionary
# *unless* it is huge - these are usually images
if hasattr(elem.value, '__len__'):
if elem.value.__len__() > 100:
tmp_dict[e] = None
else:
if type(elem.value) is pydicom.multival.MultiValue:
tmp_dict[e] = list(elem.value)
else:
tmp_dict[e] = elem.value
else:
if type(elem.value) is pydicom.multival.MultiValue:
tmp_dict[e] = list(elem.value)
else:
tmp_dict[e] = elem.value
return tmp_dict
if __name__ == "__main__":
args = parser.parse_args()
base_path = Path(args.data)
out_filename = args.out
if args.json is not None:
json_filename = args.json
else:
json_filename = out_filename
if json_filename.endswith('.gz'):
json_filename = json_filename[0:-3]
if json_filename.endswith('.csv'):
json_filename = json_filename[0:-4]
json_filename += '.json'
# get list of all dicoms under the given path
files = list()
for h in os.listdir(base_path):
for pt in os.listdir(base_path / h):
for st in os.listdir(base_path / f'{h}{os.sep}{pt}'):
dcm_path = f'{base_path}{os.sep}{h}{os.sep}{pt}{os.sep}{st}'
dcms = os.listdir(dcm_path)
files.extend([f'{dcm_path}{os.sep}{d}' for d in dcms])
files.sort()
N = len(files)
print(f'Found {N} files.')
if args.number is not None:
if args.number < N:
# limit number of dicoms
print(f'Limiting parsing to {args.number} of {N} DICOMs.')
N = args.number
if N == 0:
print('No files to process. Exiting.')
sys.exit()
dicom_tabular_data = list()
with open(json_filename, 'w') as fp:
# initialize the array in the json file
fp.write('[\n')
for i in tqdm(range(N)):
if i > 0:
fp.write(',\n')
dicom_full_path = files[i]
# dicom filename is the last name in filepath
fn = dicom_full_path.split('/')[-1].split('.')[0]
# prepare the json output as a dictionary with this dicom fn as key
fp.write('{')
fp.write(f'"{fn}": ')
# load info from dicom
with open(dicom_full_path, 'rb') as dcm_fp:
plan = pydicom.dcmread(dcm_fp, stop_before_pixels=True)
field_dict = dict()
dicom_json = dict()
# go through each element
for elem in plan:
# index the dictionary using a long value of group, element
e = (elem.tag.group << 16) | elem.tag.element
# sequence data goes into JSON
if elem.VR == 'SQ':
# store number of items in the structured/flat data
field_dict[e] = elem.value.__len__()
# make a dict for the sequence, which will go into json
# don't store look up tables because
# they're huge and not human readable
if 'LUT' not in elem.name:
dicom_json[e] = [recurse(item) for item in elem]
else:
# three "real" data-types: number, string, or list of things
field_dict[e] = elem.value
field_dict['dicom'] = fn
dicom_tabular_data.append(field_dict)
# convert dictionary to json
js = json.dumps(dicom_json)
# write to json file
fp.write(js)
# finish the dicom dictionary
fp.write('}')
# end of array in json file
fp.write('\n]')
# combine list of dictionary into a dataframe
df = pd.DataFrame(dicom_tabular_data)
# make the dicom filename the index
df.set_index('dicom', inplace=True)
# write to file
if out_filename.endswith('.gz'):
df.to_csv(out_filename, sep=',', compression='gzip')
else:
df.to_csv(out_filename, sep=',')
| mit |
piskvorky/gensim | docs/src/auto_examples/tutorials/run_word2vec.py | 6 | 25525 | r"""
Word2Vec Model
==============
Introduces Gensim's Word2Vec model and demonstrates its use on the `Lee Evaluation Corpus
<https://hekyll.services.adelaide.edu.au/dspace/bitstream/2440/28910/1/hdl_28910.pdf>`_.
"""
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
###############################################################################
# In case you missed the buzz, Word2Vec is a widely used algorithm based on neural
# networks, commonly referred to as "deep learning" (though word2vec itself is rather shallow).
# Using large amounts of unannotated plain text, word2vec learns relationships
# between words automatically. The output are vectors, one vector per word,
# with remarkable linear relationships that allow us to do things like:
#
# * vec("king") - vec("man") + vec("woman") =~ vec("queen")
# * vec("Montreal Canadiens") – vec("Montreal") + vec("Toronto") =~ vec("Toronto Maple Leafs").
#
# Word2vec is very useful in `automatic text tagging
# <https://github.com/RaRe-Technologies/movie-plots-by-genre>`_\ , recommender
# systems and machine translation.
#
# This tutorial:
#
# #. Introduces ``Word2Vec`` as an improvement over traditional bag-of-words
# #. Shows off a demo of ``Word2Vec`` using a pre-trained model
# #. Demonstrates training a new model from your own data
# #. Demonstrates loading and saving models
# #. Introduces several training parameters and demonstrates their effect
# #. Discusses memory requirements
# #. Visualizes Word2Vec embeddings by applying dimensionality reduction
#
# Review: Bag-of-words
# --------------------
#
# .. Note:: Feel free to skip these review sections if you're already familiar with the models.
#
# You may be familiar with the `bag-of-words model
# <https://en.wikipedia.org/wiki/Bag-of-words_model>`_ from the
# :ref:`core_concepts_vector` section.
# This model transforms each document to a fixed-length vector of integers.
# For example, given the sentences:
#
# - ``John likes to watch movies. Mary likes movies too.``
# - ``John also likes to watch football games. Mary hates football.``
#
# The model outputs the vectors:
#
# - ``[1, 2, 1, 1, 2, 1, 1, 0, 0, 0, 0]``
# - ``[1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 1]``
#
# Each vector has 10 elements, where each element counts the number of times a
# particular word occurred in the document.
# The order of elements is arbitrary.
# In the example above, the order of the elements corresponds to the words:
# ``["John", "likes", "to", "watch", "movies", "Mary", "too", "also", "football", "games", "hates"]``.
#
# Bag-of-words models are surprisingly effective, but have several weaknesses.
#
# First, they lose all information about word order: "John likes Mary" and
# "Mary likes John" correspond to identical vectors. There is a solution: bag
# of `n-grams <https://en.wikipedia.org/wiki/N-gram>`__
# models consider word phrases of length n to represent documents as
# fixed-length vectors to capture local word order but suffer from data
# sparsity and high dimensionality.
#
# Second, the model does not attempt to learn the meaning of the underlying
# words, and as a consequence, the distance between vectors doesn't always
# reflect the difference in meaning. The ``Word2Vec`` model addresses this
# second problem.
#
# Introducing: the ``Word2Vec`` Model
# -----------------------------------
#
# ``Word2Vec`` is a more recent model that embeds words in a lower-dimensional
# vector space using a shallow neural network. The result is a set of
# word-vectors where vectors close together in vector space have similar
# meanings based on context, and word-vectors distant to each other have
# differing meanings. For example, ``strong`` and ``powerful`` would be close
# together and ``strong`` and ``Paris`` would be relatively far.
#
# The are two versions of this model and :py:class:`~gensim.models.word2vec.Word2Vec`
# class implements them both:
#
# 1. Skip-grams (SG)
# 2. Continuous-bag-of-words (CBOW)
#
# .. Important::
# Don't let the implementation details below scare you.
# They're advanced material: if it's too much, then move on to the next section.
#
# The `Word2Vec Skip-gram <http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model>`__
# model, for example, takes in pairs (word1, word2) generated by moving a
# window across text data, and trains a 1-hidden-layer neural network based on
# the synthetic task of given an input word, giving us a predicted probability
# distribution of nearby words to the input. A virtual `one-hot
# <https://en.wikipedia.org/wiki/One-hot>`__ encoding of words
# goes through a 'projection layer' to the hidden layer; these projection
# weights are later interpreted as the word embeddings. So if the hidden layer
# has 300 neurons, this network will give us 300-dimensional word embeddings.
#
# Continuous-bag-of-words Word2vec is very similar to the skip-gram model. It
# is also a 1-hidden-layer neural network. The synthetic training task now uses
# the average of multiple input context words, rather than a single word as in
# skip-gram, to predict the center word. Again, the projection weights that
# turn one-hot words into averageable vectors, of the same width as the hidden
# layer, are interpreted as the word embeddings.
#
###############################################################################
# Word2Vec Demo
# -------------
#
# To see what ``Word2Vec`` can do, let's download a pre-trained model and play
# around with it. We will fetch the Word2Vec model trained on part of the
# Google News dataset, covering approximately 3 million words and phrases. Such
# a model can take hours to train, but since it's already available,
# downloading and loading it with Gensim takes minutes.
#
# .. Important::
# The model is approximately 2GB, so you'll need a decent network connection
# to proceed. Otherwise, skip ahead to the "Training Your Own Model" section
# below.
#
# You may also check out an `online word2vec demo
# <http://radimrehurek.com/2014/02/word2vec-tutorial/#app>`_ where you can try
# this vector algebra for yourself. That demo runs ``word2vec`` on the
# **entire** Google News dataset, of **about 100 billion words**.
#
import gensim.downloader as api
wv = api.load('word2vec-google-news-300')
###############################################################################
# A common operation is to retrieve the vocabulary of a model. That is trivial:
for index, word in enumerate(wv.index_to_key):
if index == 10:
break
print(f"word #{index}/{len(wv.index_to_key)} is {word}")
###############################################################################
# We can easily obtain vectors for terms the model is familiar with:
#
vec_king = wv['king']
###############################################################################
# Unfortunately, the model is unable to infer vectors for unfamiliar words.
# This is one limitation of Word2Vec: if this limitation matters to you, check
# out the FastText model.
#
try:
vec_cameroon = wv['cameroon']
except KeyError:
print("The word 'cameroon' does not appear in this model")
###############################################################################
# Moving on, ``Word2Vec`` supports several word similarity tasks out of the
# box. You can see how the similarity intuitively decreases as the words get
# less and less similar.
#
pairs = [
('car', 'minivan'), # a minivan is a kind of car
('car', 'bicycle'), # still a wheeled vehicle
('car', 'airplane'), # ok, no wheels, but still a vehicle
('car', 'cereal'), # ... and so on
('car', 'communism'),
]
for w1, w2 in pairs:
print('%r\t%r\t%.2f' % (w1, w2, wv.similarity(w1, w2)))
###############################################################################
# Print the 5 most similar words to "car" or "minivan"
print(wv.most_similar(positive=['car', 'minivan'], topn=5))
###############################################################################
# Which of the below does not belong in the sequence?
print(wv.doesnt_match(['fire', 'water', 'land', 'sea', 'air', 'car']))
###############################################################################
# Training Your Own Model
# -----------------------
#
# To start, you'll need some data for training the model. For the following
# examples, we'll use the `Lee Evaluation Corpus
# <https://hekyll.services.adelaide.edu.au/dspace/bitstream/2440/28910/1/hdl_28910.pdf>`_
# (which you `already have
# <https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/test/test_data/lee_background.cor>`_
# if you've installed Gensim).
#
# This corpus is small enough to fit entirely in memory, but we'll implement a
# memory-friendly iterator that reads it line-by-line to demonstrate how you
# would handle a larger corpus.
#
from gensim.test.utils import datapath
from gensim import utils
class MyCorpus:
"""An iterator that yields sentences (lists of str)."""
def __iter__(self):
corpus_path = datapath('lee_background.cor')
for line in open(corpus_path):
# assume there's one document per line, tokens separated by whitespace
yield utils.simple_preprocess(line)
###############################################################################
# If we wanted to do any custom preprocessing, e.g. decode a non-standard
# encoding, lowercase, remove numbers, extract named entities... All of this can
# be done inside the ``MyCorpus`` iterator and ``word2vec`` doesn’t need to
# know. All that is required is that the input yields one sentence (list of
# utf8 words) after another.
#
# Let's go ahead and train a model on our corpus. Don't worry about the
# training parameters much for now, we'll revisit them later.
#
import gensim.models
sentences = MyCorpus()
model = gensim.models.Word2Vec(sentences=sentences)
###############################################################################
# Once we have our model, we can use it in the same way as in the demo above.
#
# The main part of the model is ``model.wv``\ , where "wv" stands for "word vectors".
#
vec_king = model.wv['king']
###############################################################################
# Retrieving the vocabulary works the same way:
for index, word in enumerate(wv.index_to_key):
if index == 10:
break
print(f"word #{index}/{len(wv.index_to_key)} is {word}")
###############################################################################
# Storing and loading models
# --------------------------
#
# You'll notice that training non-trivial models can take time. Once you've
# trained your model and it works as expected, you can save it to disk. That
# way, you don't have to spend time training it all over again later.
#
# You can store/load models using the standard gensim methods:
#
import tempfile
with tempfile.NamedTemporaryFile(prefix='gensim-model-', delete=False) as tmp:
temporary_filepath = tmp.name
model.save(temporary_filepath)
#
# The model is now safely stored in the filepath.
# You can copy it to other machines, share it with others, etc.
#
# To load a saved model:
#
new_model = gensim.models.Word2Vec.load(temporary_filepath)
###############################################################################
# which uses pickle internally, optionally ``mmap``\ ‘ing the model’s internal
# large NumPy matrices into virtual memory directly from disk files, for
# inter-process memory sharing.
#
# In addition, you can load models created by the original C tool, both using
# its text and binary formats::
#
# model = gensim.models.KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False)
# # using gzipped/bz2 input works too, no need to unzip
# model = gensim.models.KeyedVectors.load_word2vec_format('/tmp/vectors.bin.gz', binary=True)
#
###############################################################################
# Training Parameters
# -------------------
#
# ``Word2Vec`` accepts several parameters that affect both training speed and quality.
#
# min_count
# ---------
#
# ``min_count`` is for pruning the internal dictionary. Words that appear only
# once or twice in a billion-word corpus are probably uninteresting typos and
# garbage. In addition, there’s not enough data to make any meaningful training
# on those words, so it’s best to ignore them:
#
# default value of min_count=5
model = gensim.models.Word2Vec(sentences, min_count=10)
###############################################################################
#
# vector_size
# -----------
#
# ``vector_size`` is the number of dimensions (N) of the N-dimensional space that
# gensim Word2Vec maps the words onto.
#
# Bigger size values require more training data, but can lead to better (more
# accurate) models. Reasonable values are in the tens to hundreds.
#
# The default value of vector_size is 100.
model = gensim.models.Word2Vec(sentences, vector_size=200)
###############################################################################
# workers
# -------
#
# ``workers`` , the last of the major parameters (full list `here
# <http://radimrehurek.com/gensim/models/word2vec.html#gensim.models.word2vec.Word2Vec>`_)
# is for training parallelization, to speed up training:
#
# default value of workers=3 (tutorial says 1...)
model = gensim.models.Word2Vec(sentences, workers=4)
###############################################################################
# The ``workers`` parameter only has an effect if you have `Cython
# <http://cython.org/>`_ installed. Without Cython, you’ll only be able to use
# one core because of the `GIL
# <https://wiki.python.org/moin/GlobalInterpreterLock>`_ (and ``word2vec``
# training will be `miserably slow
# <http://rare-technologies.com/word2vec-in-python-part-two-optimizing/>`_\ ).
#
###############################################################################
# Memory
# ------
#
# At its core, ``word2vec`` model parameters are stored as matrices (NumPy
# arrays). Each array is **#vocabulary** (controlled by the ``min_count`` parameter)
# times **vector size** (the ``vector_size`` parameter) of floats (single precision aka 4 bytes).
#
# Three such matrices are held in RAM (work is underway to reduce that number
# to two, or even one). So if your input contains 100,000 unique words, and you
# asked for layer ``vector_size=200``\ , the model will require approx.
# ``100,000*200*4*3 bytes = ~229MB``.
#
# There’s a little extra memory needed for storing the vocabulary tree (100,000 words would
# take a few megabytes), but unless your words are extremely loooong strings, memory
# footprint will be dominated by the three matrices above.
#
###############################################################################
# Evaluating
# ----------
#
# ``Word2Vec`` training is an unsupervised task, there’s no good way to
# objectively evaluate the result. Evaluation depends on your end application.
#
# Google has released their testing set of about 20,000 syntactic and semantic
# test examples, following the “A is to B as C is to D” task. It is provided in
# the 'datasets' folder.
#
# For example a syntactic analogy of comparative type is ``bad:worse;good:?``.
# There are total of 9 types of syntactic comparisons in the dataset like
# plural nouns and nouns of opposite meaning.
#
# The semantic questions contain five types of semantic analogies, such as
# capital cities (``Paris:France;Tokyo:?``) or family members
# (``brother:sister;dad:?``).
#
###############################################################################
# Gensim supports the same evaluation set, in exactly the same format:
#
model.wv.evaluate_word_analogies(datapath('questions-words.txt'))
###############################################################################
#
# This ``evaluate_word_analogies`` method takes an `optional parameter
# <http://radimrehurek.com/gensim/models/keyedvectors.html#gensim.models.keyedvectors.KeyedVectors.evaluate_word_analogies>`_
# ``restrict_vocab`` which limits which test examples are to be considered.
#
###############################################################################
# In the December 2016 release of Gensim we added a better way to evaluate semantic similarity.
#
# By default it uses an academic dataset WS-353 but one can create a dataset
# specific to your business based on it. It contains word pairs together with
# human-assigned similarity judgments. It measures the relatedness or
# co-occurrence of two words. For example, 'coast' and 'shore' are very similar
# as they appear in the same context. At the same time 'clothes' and 'closet'
# are less similar because they are related but not interchangeable.
#
model.wv.evaluate_word_pairs(datapath('wordsim353.tsv'))
###############################################################################
# .. Important::
# Good performance on Google's or WS-353 test set doesn’t mean word2vec will
# work well in your application, or vice versa. It’s always best to evaluate
# directly on your intended task. For an example of how to use word2vec in a
# classifier pipeline, see this `tutorial
# <https://github.com/RaRe-Technologies/movie-plots-by-genre>`_.
#
###############################################################################
# Online training / Resuming training
# -----------------------------------
#
# Advanced users can load a model and continue training it with more sentences
# and `new vocabulary words <online_w2v_tutorial.ipynb>`_:
#
model = gensim.models.Word2Vec.load(temporary_filepath)
more_sentences = [
['Advanced', 'users', 'can', 'load', 'a', 'model',
'and', 'continue', 'training', 'it', 'with', 'more', 'sentences'],
]
model.build_vocab(more_sentences, update=True)
model.train(more_sentences, total_examples=model.corpus_count, epochs=model.epochs)
# cleaning up temporary file
import os
os.remove(temporary_filepath)
###############################################################################
# You may need to tweak the ``total_words`` parameter to ``train()``,
# depending on what learning rate decay you want to simulate.
#
# Note that it’s not possible to resume training with models generated by the C
# tool, ``KeyedVectors.load_word2vec_format()``. You can still use them for
# querying/similarity, but information vital for training (the vocab tree) is
# missing there.
#
###############################################################################
# Training Loss Computation
# -------------------------
#
# The parameter ``compute_loss`` can be used to toggle computation of loss
# while training the Word2Vec model. The computed loss is stored in the model
# attribute ``running_training_loss`` and can be retrieved using the function
# ``get_latest_training_loss`` as follows :
#
# instantiating and training the Word2Vec model
model_with_loss = gensim.models.Word2Vec(
sentences,
min_count=1,
compute_loss=True,
hs=0,
sg=1,
seed=42,
)
# getting the training loss value
training_loss = model_with_loss.get_latest_training_loss()
print(training_loss)
###############################################################################
# Benchmarks
# ----------
#
# Let's run some benchmarks to see effect of the training loss computation code
# on training time.
#
# We'll use the following data for the benchmarks:
#
# #. Lee Background corpus: included in gensim's test data
# #. Text8 corpus. To demonstrate the effect of corpus size, we'll look at the
# first 1MB, 10MB, 50MB of the corpus, as well as the entire thing.
#
import io
import os
import gensim.models.word2vec
import gensim.downloader as api
import smart_open
def head(path, size):
with smart_open.open(path) as fin:
return io.StringIO(fin.read(size))
def generate_input_data():
lee_path = datapath('lee_background.cor')
ls = gensim.models.word2vec.LineSentence(lee_path)
ls.name = '25kB'
yield ls
text8_path = api.load('text8').fn
labels = ('1MB', '10MB', '50MB', '100MB')
sizes = (1024 ** 2, 10 * 1024 ** 2, 50 * 1024 ** 2, 100 * 1024 ** 2)
for l, s in zip(labels, sizes):
ls = gensim.models.word2vec.LineSentence(head(text8_path, s))
ls.name = l
yield ls
input_data = list(generate_input_data())
###############################################################################
# We now compare the training time taken for different combinations of input
# data and model training parameters like ``hs`` and ``sg``.
#
# For each combination, we repeat the test several times to obtain the mean and
# standard deviation of the test duration.
#
# Temporarily reduce logging verbosity
logging.root.level = logging.ERROR
import time
import numpy as np
import pandas as pd
train_time_values = []
seed_val = 42
sg_values = [0, 1]
hs_values = [0, 1]
fast = True
if fast:
input_data_subset = input_data[:3]
else:
input_data_subset = input_data
for data in input_data_subset:
for sg_val in sg_values:
for hs_val in hs_values:
for loss_flag in [True, False]:
time_taken_list = []
for i in range(3):
start_time = time.time()
w2v_model = gensim.models.Word2Vec(
data,
compute_loss=loss_flag,
sg=sg_val,
hs=hs_val,
seed=seed_val,
)
time_taken_list.append(time.time() - start_time)
time_taken_list = np.array(time_taken_list)
time_mean = np.mean(time_taken_list)
time_std = np.std(time_taken_list)
model_result = {
'train_data': data.name,
'compute_loss': loss_flag,
'sg': sg_val,
'hs': hs_val,
'train_time_mean': time_mean,
'train_time_std': time_std,
}
print("Word2vec model #%i: %s" % (len(train_time_values), model_result))
train_time_values.append(model_result)
train_times_table = pd.DataFrame(train_time_values)
train_times_table = train_times_table.sort_values(
by=['train_data', 'sg', 'hs', 'compute_loss'],
ascending=[False, False, True, False],
)
print(train_times_table)
###############################################################################
#
# Visualising Word Embeddings
# ---------------------------
#
# The word embeddings made by the model can be visualised by reducing
# dimensionality of the words to 2 dimensions using tSNE.
#
# Visualisations can be used to notice semantic and syntactic trends in the data.
#
# Example:
#
# * Semantic: words like cat, dog, cow, etc. have a tendency to lie close by
# * Syntactic: words like run, running or cut, cutting lie close together.
#
# Vector relations like vKing - vMan = vQueen - vWoman can also be noticed.
#
# .. Important::
# The model used for the visualisation is trained on a small corpus. Thus
# some of the relations might not be so clear.
#
from sklearn.decomposition import IncrementalPCA # inital reduction
from sklearn.manifold import TSNE # final reduction
import numpy as np # array handling
def reduce_dimensions(model):
num_dimensions = 2 # final num dimensions (2D, 3D, etc)
# extract the words & their vectors, as numpy arrays
vectors = np.asarray(model.wv.vectors)
labels = np.asarray(model.wv.index_to_key) # fixed-width numpy strings
# reduce using t-SNE
tsne = TSNE(n_components=num_dimensions, random_state=0)
vectors = tsne.fit_transform(vectors)
x_vals = [v[0] for v in vectors]
y_vals = [v[1] for v in vectors]
return x_vals, y_vals, labels
x_vals, y_vals, labels = reduce_dimensions(model)
def plot_with_plotly(x_vals, y_vals, labels, plot_in_notebook=True):
from plotly.offline import init_notebook_mode, iplot, plot
import plotly.graph_objs as go
trace = go.Scatter(x=x_vals, y=y_vals, mode='text', text=labels)
data = [trace]
if plot_in_notebook:
init_notebook_mode(connected=True)
iplot(data, filename='word-embedding-plot')
else:
plot(data, filename='word-embedding-plot.html')
def plot_with_matplotlib(x_vals, y_vals, labels):
import matplotlib.pyplot as plt
import random
random.seed(0)
plt.figure(figsize=(12, 12))
plt.scatter(x_vals, y_vals)
#
# Label randomly subsampled 25 data points
#
indices = list(range(len(labels)))
selected_indices = random.sample(indices, 25)
for i in selected_indices:
plt.annotate(labels[i], (x_vals[i], y_vals[i]))
try:
get_ipython()
except Exception:
plot_function = plot_with_matplotlib
else:
plot_function = plot_with_plotly
plot_function(x_vals, y_vals, labels)
###############################################################################
# Conclusion
# ----------
#
# In this tutorial we learned how to train word2vec models on your custom data
# and also how to evaluate it. Hope that you too will find this popular tool
# useful in your Machine Learning tasks!
#
# Links
# -----
#
# - API docs: :py:mod:`gensim.models.word2vec`
# - `Original C toolkit and word2vec papers by Google <https://code.google.com/archive/p/word2vec/>`_.
#
| lgpl-2.1 |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/linear_model/ransac.py | 16 | 17217 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20
Use ``loss`` instead.
loss : string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the ``i``th value of the array corresponding to the loss
on `X[i]`.
If the loss on a sample is greater than the ``residual_threshold``, then
this sample is classified as an outlier.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight : array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' was deprecated in version 0.18 and "
"will be removed in version 0.20. Use 'loss' instead.",
DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
AlertaDengue/InfoDenguePredict | infodenguepredict/analysis/distance.py | 1 | 2802 | import numpy as np;
import pandas as pd
import seaborn as sns
import scipy.spatial.distance as spd
from infodenguepredict.predict_settings import *
# from scipy.signal import correlate
from infodenguepredict.data.infodengue import get_alerta_table, combined_data
def get_cities_from_state(state):
alerta_table = get_alerta_table(state=state)
cities_list = alerta_table.municipio_geocodigo.unique()
return cities_list
def alocate_data(state):
cities_list = list(get_cities_from_state(state))
bad_cities = []
for city in cities_list:
try:
full_city = combined_data(city, data_types=DATA_TYPES)
full_city.to_pickle('{}/city_{}.pkl'.format(TMP_PATH, city))
except TypeError as e:
print("Skipping: ", city)
bad_cities.append(city)
continue
for c in bad_cities:
cities_list.remove(c)
return cities_list
def correlation(df_1, df_2):
corr_list = []
for col in df_1.columns:
df = pd.concat((df_1[col], df_2[col]), axis=1).fillna(method='ffill')
corr = spd.pdist(df.T.as_matrix(), metric='correlation')
corr_list.append(corr[0])
return np.nanmean(corr_list)
def cross_correlation(df_1, df_2, max_lag=5):
corr_list = []
for col in df_1.columns:
corrs = [np.correlate(df_1[col], df_2[col].shift(lag)) for lag in range(max_lag)]
corr = np.argmax(corrs)
lag = range(max_lag)[corrs.index(corr)]
corr_list.append(corr)
return np.nanmean(corr_list)
def fix_distance_matrix(dists):
to_drop=[]
for pos, col in enumerate(dists.columns):
if len(dists[col].dropna())==pos+1:
to_drop.append(col)
dists.drop(to_drop, axis=1, inplace=True)
dists.drop(to_drop, axis=0, inplace=True)
return dists
def distance(cities_list, cols):
"""
Returns the correlation distance matrix for a list of cities.
:param cities_list: List of geocodes
:param cols: columns to calculate the correlation
:return:
"""
state_distances = pd.DataFrame(index=cities_list)
for pos, city_1 in enumerate(cities_list):
print("Calculating distance Matrix for ", city_1)
full_city_1 = pd.read_pickle('{}/city_{}.pkl'.format(TMP_PATH, city_1))[cols]
new_col = list(np.zeros(pos + 1))
for city_2 in cities_list[pos + 1:]:
full_city_2 = pd.read_pickle('{}/city_{}.pkl'.format(TMP_PATH, city_2))[cols]
dist = correlation(full_city_1, full_city_2)
new_col.append(dist)
state_distances[city_1] = new_col
state_distances = fix_distance_matrix(state_distances)
return state_distances
if __name__ == "__main__":
cities_list = get_cities_from_state(STATE)
distance(cities_list, CLUSTER_VARS)
| gpl-3.0 |
petercerno/good-morning | good_morning/good_morning.py | 1 | 23934 | # Copyright (c) 2015 Peter Cerno
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module for downloading financial data from financials.morningstar.com.
"""
import csv
import json
import numpy as np
import pandas as pd
import re
import urllib.request
from bs4 import BeautifulSoup
from datetime import date
class KeyRatiosDownloader(object):
u"""Downloads key ratios from http://financials.morningstar.com/
"""
def __init__(self, table_prefix = u'morningstar_'):
u"""Constructs the KeyRatiosDownloader instance.
:param table_prefix: Prefix of the MySQL tables.
"""
self._table_prefix = table_prefix
def download(self, ticker, conn = None, region = 'GBR', culture = 'en_US', currency = 'USD'):
u"""Downloads and returns key ratios for the given Morningstar ticker.
Downloads and returns an array of pandas.DataFrames containing the key
ratios for the given Morningstar ticker. If the MySQL connection is
specified then the downloaded key ratios are uploaded to the MySQL
database.
:param ticker: Morningstar ticker.
:param conn: MySQL connection.
:param region: Sets the region.
:param culture: Sets culture.
:param currency: Sets currency.
:return: List of pandas.DataFrames containing the key ratios.
"""
url = (r'http://financials.morningstar.com/ajax/exportKR2CSV.html?' +
r'&callback=?&t={t}®ion={reg}&culture={cult}&cur={cur}'.format(
t=ticker, reg=region, cult=culture, cur=currency))
with urllib.request.urlopen(url) as response:
tables = self._parse_tables(response)
response_structure = [
# Original Name, New pandas.DataFrame Name
(u'Financials', u'Key Financials'),
(u'Key Ratios -> Profitability', u'Key Margins % of Sales'),
(u'Key Ratios -> Profitability', u'Key Profitability'),
(u'Key Ratios -> Growth', None),
(u'Revenue %', u'Key Revenue %'),
(u'Operating Income %', u'Key Operating Income %'),
(u'Net Income %', u'Key Net Income %'),
(u'EPS %', u'Key EPS %'),
(u'Key Ratios -> Cash Flow', u'Key Cash Flow Ratios'),
(u'Key Ratios -> Financial Health',
u'Key Balance Sheet Items (in %)'),
(u'Key Ratios -> Financial Health',
u'Key Liquidity/Financial Health'),
(u'Key Ratios -> Efficiency Ratios', u'Key Efficiency Ratios')]
frames = self._parse_frames(tables, response_structure)
############################
# Error Handling for Ratios
############################
# Empty String
if len(ticker) == 0:
raise ValueError("You did not enter a ticker symbol. Please"
" try again.")
# Wrong ticker symbol
elif frames == "MorningStar could not find the ticker":
raise ValueError("MorningStar cannot find the ticker symbol "
"you entered or it is INVALID. Please try "
"again.")
currency = re.match(u'^.* ([A-Z]+) Mil$',
frames[0].index[0]).group(1)
frames[0].index.name += u' ' + currency
if conn:
self._upload_frames_to_db(ticker, frames, conn)
return frames
@staticmethod
def _parse_tables(response):
u"""Parses the given csv response from financials.morningstar.com.
:param response: Response from financials.morningstar.com.
:return: List of pairs, where the first item is the name of the table
(extracted from the response) and the second item is the corresponding
pandas.DataFrame table containing the data.
"""
# Regex pattern used to recognize csv lines containing financial data.
num_commas = 5
pat_commas = r'(.*,){%d,}' % num_commas
# Resulting array of pairs (table_name, table_frame).
tables = []
table_name = None
table_rows = None
for line in response:
line = line.decode(u'utf-8').strip()
match = re.match(pat_commas, line)
if match:
for row in csv.reader([line]):
table_rows.append(row)
else:
if table_name and table_rows:
tables.append([table_name, pd.DataFrame(table_rows)])
if line != u'':
table_name = line
table_rows = []
if table_name and table_rows:
tables.append([table_name, pd.DataFrame(table_rows)])
return tables
@staticmethod
def _parse_frames(tables, response_structure):
u"""Returns an array of processed pandas.DataFrames based on the
original list of tables and the special response_structure list.
:param tables: Original list of tables (obtained from _parse_tables).
:param response_structure: List of pairs (expected table name, new name
assigned to the corresponding (processed) pandas.DataFrame).
"""
#############################
# Error Handling
#############################
# Fail Early on Empty String
if len(tables) == 0:
return ("MorningStar could not find the ticker")
period_start = tables[0][1].ix[0][1]
period_month = pd.datetime.strptime(period_start, u'%Y-%m').month
#period_freq = pd.datetools.YearEnd(month=period_month)
period_freq = pd.tseries.offsets.YearEnd(month=period_month)
frames = []
for index, (check_name, frame_name) in enumerate(response_structure):
if frame_name and tables[index][0] == check_name:
frame = KeyRatiosDownloader._process_frame(
tables[index][1], frame_name, period_start, period_freq)
if frame is not None and frame.index.size > 0:
frames.append(frame)
return frames
@staticmethod
def _process_frame(frame, frame_name, period_start,
period_freq):
u"""Returns a processed pandas.DataFrame based on the original frame.
:param frame: Original pandas.DataFrame to be processed.
:param frame_name: New name assigned to the processed pandas.DataFrame.
:param period_start: Start of the period.
:param period_freq: Frequency of the period.
:return Processed pandas.DataFrame based on the original frame.
"""
output_frame = frame.set_index(frame[0])
del output_frame[0]
output_frame.index.name = frame_name
output_frame.columns = pd.period_range(period_start,
periods=len(output_frame.ix[0]),
freq=period_freq)
output_frame.columns.name = u'Period'
if re.match(r'^\d{4}-\d{2}$', output_frame.ix[0][0]):
output_frame.drop(output_frame.index[0], inplace=True)
output_frame.replace(u',', u'', regex=True, inplace=True)
output_frame.replace(u'^\s*$', u'NaN', regex=True, inplace=True)
return output_frame.astype(float)
def _upload_frames_to_db(self, ticker, frames,
conn):
u"""Uploads the given array of pandas.DataFrames to the MySQL database.
:param ticker: Morningstar ticker.
:param frames: Array of pandas.DataFrames to be uploaded.
:param conn: MySQL connection.
"""
for frame in frames:
table_name = self._get_db_table_name(frame)
if not _db_table_exists(table_name, conn):
_db_execute(self._get_db_create_table(frame), conn)
_db_execute(self._get_db_replace_values(ticker, frame), conn)
@staticmethod
def _get_db_name(name):
u"""Returns a new (cleaned) name that can be used in a MySQL database.
:param name: Original name.
:return Name that can be used in a MySQL database.
"""
name = (name.lower()
.replace(u'/', u' per ')
.replace(u'&', u' and ')
.replace(u'%', u' percent '))
name = re.sub(r'[^a-z0-9]', u' ', name)
name = re.sub(r'\s+', u' ', name).strip()
return name.replace(u' ', u'_')
def _get_db_table_name(self, frame):
u"""Returns the MySQL TABLE name for the given pandas.DataFrame.
:param frame: pandas.DataFrame.
:return MySQL TABLE name.
"""
return self._table_prefix + self._get_db_name(frame.index.name)
def _get_db_create_table(self, frame):
u"""Returns the MySQL CREATE TABLE statement for the given
pandas.DataFrame.
:param frame: pandas.DataFrame.
:return MySQL CREATE TABLE statement.
"""
columns = (u',\n'.
join([u' `%s` DECIMAL(20,5) DEFAULT NULL COMMENT "%s"' %
(self._get_db_name(name), name) for name in
frame.index.values]))
table_name = self._get_db_table_name(frame)
return (
u'CREATE TABLE `%s` (\n' % table_name +
u' `ticker` VARCHAR(50) NOT NULL COMMENT "Exchange:Ticker",\n' +
u' `period` DATE NOT NULL COMMENT "Period",\n' +
u'%s,\n' % columns +
u' PRIMARY KEY USING BTREE (`ticker`, `period`),\n' +
u' KEY `ix_ticker` USING BTREE (`ticker`))\n' +
u'ENGINE=MyISAM DEFAULT CHARSET=utf8\n' +
u'COMMENT = "%s"' % frame.index.name)
def _get_db_replace_values(self, ticker, frame):
u"""Returns the MySQL REPLACE INTO statement for the given
Morningstar ticker and the corresponding pandas.DataFrame.
:param ticker: Morningstar ticker.
:param frame: pandas.DataFrame.
:return MySQL REPLACE INTO statement.
"""
columns = ([u'`ticker`', u'`period`'] +
[u'`%s`' % self._get_db_name(name) for name in
frame.index.values])
return (
u'REPLACE INTO `%s`\n' % self._get_db_table_name(frame) +
u' (%s)\nVALUES\n' % u',\n '.join(columns) +
u',\n'.join([u'("' + ticker + u'", "' + column.strftime(u'%Y-%m-%d') +
u'", ' +
u', '.join([u'NULL' if np.isnan(x) else u'%.5f' % x
for x in frame[column].values]) +
u')' for column in frame.columns]))
class FinancialsDownloader(object):
u"""Downloads financials from http://financials.morningstar.com/
"""
def __init__(self, table_prefix = u'morningstar_'):
u"""Constructs the FinancialsDownloader instance.
:param table_prefix: Prefix of the MySQL tables.
"""
self._table_prefix = table_prefix
def download(self, ticker, conn = None):
u"""Downloads and returns a dictionary containing pandas.DataFrames
representing the financials (i.e. income statement, balance sheet,
cash flow) for the given Morningstar ticker. If the MySQL connection
is specified then the downloaded financials are uploaded to the MySQL
database.
:param ticker: Morningstar ticker.
:param conn: MySQL connection.
:return Dictionary containing pandas.DataFrames representing the
financials for the given Morningstar ticker.
"""
result = {}
##########################
# Error Handling
##########################
# Empty String
if len(ticker) == 0:
raise ValueError("You did not enter a ticker symbol. Please"
" try again.")
for report_type, table_name in [
(u'is', u'income_statement'),
(u'bs', u'balance_sheet'),
(u'cf', u'cash_flow')]:
frame = self._download(ticker, report_type)
result[table_name] = frame
if conn:
self._upload_frame(
frame, ticker, self._table_prefix + table_name, conn)
if conn:
self._upload_unit(ticker, self._table_prefix + u'unit', conn)
result[u'period_range'] = self._period_range
result[u'fiscal_year_end'] = self._fiscal_year_end
result[u'currency'] = self._currency
return result
def _download(self, ticker, report_type):
u"""Downloads and returns a pandas.DataFrame corresponding to the
given Morningstar ticker and the given type of the report.
:param ticker: Morningstar ticker.
:param report_type: Type of the report ('is', 'bs', 'cf').
:return pandas.DataFrame corresponding to the given Morningstar ticker
and the given type of the report.
"""
url = (r'http://financials.morningstar.com/ajax/' +
r'ReportProcess4HtmlAjax.html?&t=' + ticker +
r'®ion=usa&culture=en-US&cur=USD' +
r'&reportType=' + report_type + r'&period=12' +
r'&dataType=A&order=asc&columnYear=5&rounding=3&view=raw')
with urllib.request.urlopen(url) as response:
json_text = response.read().decode(u'utf-8')
##############################
# Error Handling
##############################
# Wrong ticker
if len(json_text)==0:
raise ValueError("MorningStar cannot find the ticker symbol "
"you entered or it is INVALID. Please try "
"again.")
json_data = json.loads(json_text)
result_soup = BeautifulSoup(json_data[u'result'],u'html.parser')
return self._parse(result_soup)
def _parse(self, soup):
u"""Extracts and returns a pandas.DataFrame corresponding to the
given parsed HTML response from financials.morningstar.com.
:param soup: Parsed HTML response by BeautifulSoup.
:return pandas.DataFrame corresponding to the given parsed HTML response
from financials.morningstar.com.
"""
# Left node contains the labels.
left = soup.find(u'div', u'left').div
# Main node contains the (raw) data.
main = soup.find(u'div', u'main').find(u'div', u'rf_table')
year = main.find(u'div', {u'id': u'Year'})
self._year_ids = [node.attrs[u'id'] for node in year]
period_month = pd.datetime.strptime(year.div.text, u'%Y-%m').month
self._period_range = pd.period_range(
year.div.text, periods=len(self._year_ids),
# freq=pd.datetools.YearEnd(month=period_month))
freq = pd.tseries.offsets.YearEnd(month=period_month))
unit = left.find(u'div', {u'id': u'unitsAndFiscalYear'})
self._fiscal_year_end = int(unit.attrs[u'fyenumber'])
self._currency = unit.attrs[u'currency']
self._data = []
self._label_index = 0
self._read_labels(left)
self._data_index = 0
self._read_data(main)
return pd.DataFrame(self._data,
columns=[u'parent_index', u'title'] + list(
self._period_range))
def _read_labels(self, root_node, parent_label_index = None):
u"""Recursively reads labels from the parsed HTML response.
"""
for node in root_node:
if node.has_attr(u'class') and u'r_content' in node.attrs[u'class']:
self._read_labels(node, self._label_index - 1)
if (node.has_attr(u'id') and
node.attrs[u'id'].startswith(u'label') and
not node.attrs[u'id'].endswith(u'padding') and
(not node.has_attr(u'style') or
u'display:none' not in node.attrs[u'style'])):
label_id = node.attrs[u'id'][6:]
label_title = (node.div.attrs[u'title']
if node.div.has_attr(u'title')
else node.div.text)
self._data.append({
u'id': label_id,
u'index': self._label_index,
u'parent_index': (parent_label_index
if parent_label_index is not None
else self._label_index),
u'title': label_title})
self._label_index += 1
def _read_data(self, root_node):
u"""Recursively reads data from the parsed HTML response.
"""
for node in root_node:
if node.has_attr(u'class') and u'r_content' in node.attrs[u'class']:
self._read_data(node)
if (node.has_attr(u'id') and
node.attrs[u'id'].startswith(u'data') and
not node.attrs[u'id'].endswith(u'padding') and
(not node.has_attr(u'style') or
u'display:none' not in node.attrs[u'style'])):
data_id = node.attrs[u'id'][5:]
while (self._data_index < len(self._data) and
self._data[self._data_index][u'id'] != data_id):
# In some cases we do not have data for all labels.
self._data_index += 1
assert(self._data_index < len(self._data) and
self._data[self._data_index][u'id'] == data_id)
for (i, child) in enumerate(node.children):
try:
value = float(child.attrs[u'rawvalue'])
except ValueError:
value = None
self._data[self._data_index][
self._period_range[i]] = value
self._data_index += 1
def _upload_frame(self, frame, ticker, table_name,
conn):
u"""Uploads the given pandas.DataFrame to the MySQL database.
:param frame: pandas.DataFrames to be uploaded.
:param ticker: Morningstar ticker.
:param table_name: Name of the MySQL table.
:param conn: MySQL connection.
"""
if not _db_table_exists(table_name, conn):
_db_execute(self._get_db_create_table(table_name), conn)
_db_execute(self._get_db_replace_values(
ticker, frame, table_name), conn)
def _upload_unit(self, ticker, table_name,
conn):
u"""Uploads the fiscal_year_end and the currency to the MySQL database.
:param ticker: Morningstar ticker.
:param table_name: Name of the MySQL table.
:param conn: MySQL connection.
"""
if not _db_table_exists(table_name, conn):
_db_execute(
u'CREATE TABLE `%s` (\n' % table_name +
u' `ticker` varchar(50) NOT NULL\n' +
u' COMMENT "Exchange:Ticker",\n' +
u' `fiscal_year_end` int(10) unsigned NOT NULL\n' +
u' COMMENT "Fiscal Year End Month",\n' +
u' `currency` varchar(50) NOT NULL\n' +
u' COMMENT "Currency",\n' +
u' PRIMARY KEY USING BTREE (`ticker`))\n' +
u'ENGINE=MyISAM DEFAULT CHARSET=utf8', conn)
_db_execute(
u'REPLACE INTO `%s`\n' % table_name +
u' (`ticker`, `fiscal_year_end`, `currency`)\nVALUES\n' +
u'("%s", %d, "%s")' % (
ticker, self._fiscal_year_end, self._currency), conn)
@staticmethod
def _get_db_create_table(table_name):
u"""Returns the MySQL CREATE TABLE statement for the given table_name.
:param table_name: Name of the MySQL table.
:return MySQL CREATE TABLE statement.
"""
year = date.today().year
year_range = xrange(year - 6, year + 2)
columns = u',\n'.join(
[u' `year_%d` DECIMAL(20,5) DEFAULT NULL ' % year +
u'COMMENT "Year %d"' % year
for year in year_range])
return (
u'CREATE TABLE `%s` (\n' % table_name +
u' `ticker` VARCHAR(50) NOT NULL COMMENT "Exchange:Ticker",\n' +
u' `id` int(10) unsigned NOT NULL COMMENT "Id",\n' +
u' `parent_id` int(10) unsigned NOT NULL COMMENT "Parent Id",\n' +
u' `item` varchar(500) NOT NULL COMMENT "Item",\n' +
u'%s,\n' % columns +
u' PRIMARY KEY USING BTREE (`ticker`, `id`),\n' +
u' KEY `ix_ticker` USING BTREE (`ticker`))\n' +
u'ENGINE=MyISAM DEFAULT CHARSET=utf8')
@staticmethod
def _get_db_replace_values(ticker, frame,
table_name):
u"""Returns the MySQL REPLACE INTO statement for the given
Morningstar ticker and the corresponding pandas.DataFrame.
:param ticker: Morningstar ticker.
:param frame: pandas.DataFrame.
:param table_name: Name of the MySQL table.
:return MySQL REPLACE INTO statement.
"""
columns = [u'`ticker`', u'`id`, `parent_id`, `item`'] + \
[u'`year_%d`' % period.year for period in
frame.columns[2:]]
return (
u'REPLACE INTO `%s`\n' % table_name +
u' (%s)\nVALUES\n' % u', '.join(columns) +
u',\n'.join([u'("' + ticker + u'", %d, %d, "%s", ' %
(index, frame.ix[index, u'parent_index'],
frame.ix[index, u'title']) +
u', '.join(
[u'NULL' if np.isnan(frame.ix[index, period])
else u'%.5f' % frame.ix[index, period]
for period in frame.columns[2:]]) + u')'
for index in frame.index]))
def _db_table_exists(table_name, conn):
u"""Helper method for checking whether the given MySQL table exists.
:param table_name: Name of the MySQL table to be checked.
:param conn: MySQL connection.
:return True iff the given MySQL table exists.
"""
cursor = conn.cursor()
cursor.execute(u"""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_name = '{0}'""".format(table_name))
table_exists = cursor.fetchone()[0] == 1
cursor.close()
return table_exists
def _db_execute(query, conn):
u"""Helper method for executing the given MySQL non-query.
:param query: MySQL query to be executed.
:param conn: MySQL connection.
"""
cursor = conn.cursor()
cursor.execute(query)
cursor.close()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.