repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bnaul/scikit-learn | sklearn/compose/tests/test_column_transformer.py | 2 | 52309 | """
Test the ColumnTransformer.
"""
import re
import pickle
import warnings
import numpy as np
from scipy import sparse
import pytest
from numpy.testing import assert_allclose
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import assert_almost_equal
from sklearn.base import BaseEstimator
from sklearn.compose import (
ColumnTransformer, make_column_transformer, make_column_selector
)
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import StandardScaler, Normalizer, OneHotEncoder
from sklearn.feature_extraction import DictVectorizer
class Trans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# 1D Series -> 2D DataFrame
if hasattr(X, 'to_frame'):
return X.to_frame()
# 1D array -> 2D array
if X.ndim == 1:
return np.atleast_2d(X).T
return X
class DoubleTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X):
return 2*X
class SparseMatrixTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
n_samples = len(X)
return sparse.eye(n_samples, n_samples).tocsr()
class TransNo2D(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
class TransRaise(BaseEstimator):
def fit(self, X, y=None):
raise ValueError("specific message")
def transform(self, X, y=None):
raise ValueError("specific message")
def test_column_transformer():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first1D = np.array([0, 1, 2])
X_res_second1D = np.array([2, 4, 6])
X_res_first = X_res_first1D.reshape(-1, 1)
X_res_both = X_array
cases = [
# single column 1D / 2D
(0, X_res_first),
([0], X_res_first),
# list-like
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
]
for selection, res in cases:
ct = ColumnTransformer([('trans', Trans(), selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer([('trans', Trans(), lambda x: selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
ct = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])])
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
# test with transformer_weights
transformer_weights = {'trans1': .1, 'trans2': 10}
both = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])],
transformer_weights=transformer_weights)
res = np.vstack([transformer_weights['trans1'] * X_res_first1D,
transformer_weights['trans2'] * X_res_second1D]).T
assert_array_equal(both.fit_transform(X_array), res)
assert_array_equal(both.fit(X_array).transform(X_array), res)
assert len(both.transformers_) == 2
both = ColumnTransformer([('trans', Trans(), [0, 1])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)
assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)
assert len(both.transformers_) == 1
def test_column_transformer_dataframe():
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_both = X_array
cases = [
# String keys: label based
# scalar
('first', X_res_first),
# list
(['first'], X_res_first),
(['first', 'second'], X_res_both),
# slice
(slice('first', 'second'), X_res_both),
# int keys: positional
# scalar
(0, X_res_first),
# list
([0], X_res_first),
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
(pd.Series([True, False], index=['first', 'second']), X_res_first),
]
for selection, res in cases:
ct = ColumnTransformer([('trans', Trans(), selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer([('trans', Trans(), lambda X: selection)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
ct = ColumnTransformer([('trans1', Trans(), ['first']),
('trans2', Trans(), ['second'])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
ct = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', Trans(), [1])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# test with transformer_weights
transformer_weights = {'trans1': .1, 'trans2': 10}
both = ColumnTransformer([('trans1', Trans(), ['first']),
('trans2', Trans(), ['second'])],
transformer_weights=transformer_weights)
res = np.vstack([transformer_weights['trans1'] * X_df['first'],
transformer_weights['trans2'] * X_df['second']]).T
assert_array_equal(both.fit_transform(X_df), res)
assert_array_equal(both.fit(X_df).transform(X_df), res)
assert len(both.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# test multiple columns
both = ColumnTransformer([('trans', Trans(), ['first', 'second'])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert ct.transformers_[-1][0] != 'remainder'
both = ColumnTransformer([('trans', Trans(), [0, 1])],
transformer_weights={'trans': .1})
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert ct.transformers_[-1][0] != 'remainder'
# ensure pandas object is passes through
class TransAssert(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
assert isinstance(X, (pd.DataFrame, pd.Series))
if isinstance(X, pd.Series):
X = X.to_frame()
return X
ct = ColumnTransformer([('trans', TransAssert(), 'first')],
remainder='drop')
ct.fit_transform(X_df)
ct = ColumnTransformer([('trans', TransAssert(), ['first', 'second'])])
ct.fit_transform(X_df)
# integer column spec + integer column names -> still use positional
X_df2 = X_df.copy()
X_df2.columns = [1, 0]
ct = ColumnTransformer([('trans', Trans(), 0)], remainder='drop')
assert_array_equal(ct.fit_transform(X_df2), X_res_first)
assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'drop'
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize("pandas", [True, False], ids=['pandas', 'numpy'])
@pytest.mark.parametrize("column", [[], np.array([False, False])],
ids=['list', 'bool'])
def test_column_transformer_empty_columns(pandas, column):
# test case that ensures that the column transformer does also work when
# a given transformer doesn't have any columns to work on
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_both = X_array
if pandas:
pd = pytest.importorskip('pandas')
X = pd.DataFrame(X_array, columns=['first', 'second'])
else:
X = X_array
ct = ColumnTransformer([('trans1', Trans(), [0, 1]),
('trans2', Trans(), column)])
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2
assert isinstance(ct.transformers_[1][1], Trans)
ct = ColumnTransformer([('trans1', Trans(), column),
('trans2', Trans(), [0, 1])])
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2
assert isinstance(ct.transformers_[0][1], Trans)
ct = ColumnTransformer([('trans', Trans(), column)],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2 # including remainder
assert isinstance(ct.transformers_[0][1], Trans)
fixture = np.array([[], [], []])
ct = ColumnTransformer([('trans', Trans(), column)],
remainder='drop')
assert_array_equal(ct.fit_transform(X), fixture)
assert_array_equal(ct.fit(X).transform(X), fixture)
assert len(ct.transformers_) == 2 # including remainder
assert isinstance(ct.transformers_[0][1], Trans)
def test_column_transformer_sparse_array():
X_sparse = sparse.eye(3, 2).tocsr()
# no distinction between 1D and 2D
X_res_first = X_sparse[:, 0]
X_res_both = X_sparse
for col in [0, [0], slice(0, 1)]:
for remainder, res in [('drop', X_res_first),
('passthrough', X_res_both)]:
ct = ColumnTransformer([('trans', Trans(), col)],
remainder=remainder,
sparse_threshold=0.8)
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
res)
for col in [[0, 1], slice(0, 2)]:
ct = ColumnTransformer([('trans', Trans(), col)],
sparse_threshold=0.8)
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
X_res_both)
def test_column_transformer_list():
X_list = [
[1, float('nan'), 'a'],
[0, 0, 'b']
]
expected_result = np.array([
[1, float('nan'), 1, 0],
[-1, 0, 0, 1],
])
ct = ColumnTransformer([
('numerical', StandardScaler(), [0, 1]),
('categorical', OneHotEncoder(), [2]),
])
assert_array_equal(ct.fit_transform(X_list), expected_result)
assert_array_equal(ct.fit(X_list).transform(X_list), expected_result)
def test_column_transformer_sparse_stacking():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
col_trans = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', SparseMatrixTrans(), 1)],
sparse_threshold=0.8)
col_trans.fit(X_array)
X_trans = col_trans.transform(X_array)
assert sparse.issparse(X_trans)
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
assert len(col_trans.transformers_) == 2
assert col_trans.transformers_[-1][0] != 'remainder'
col_trans = ColumnTransformer([('trans1', Trans(), [0]),
('trans2', SparseMatrixTrans(), 1)],
sparse_threshold=0.1)
col_trans.fit(X_array)
X_trans = col_trans.transform(X_array)
assert not sparse.issparse(X_trans)
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))
def test_column_transformer_mixed_cols_sparse():
df = np.array([['a', 1, True],
['b', 2, False]],
dtype='O')
ct = make_column_transformer(
(OneHotEncoder(), [0]),
('passthrough', [1, 2]),
sparse_threshold=1.0
)
# this shouldn't fail, since boolean can be coerced into a numeric
# See: https://github.com/scikit-learn/scikit-learn/issues/11912
X_trans = ct.fit_transform(df)
assert X_trans.getformat() == 'csr'
assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1],
[0, 1, 2, 0]]))
ct = make_column_transformer(
(OneHotEncoder(), [0]),
('passthrough', [0]),
sparse_threshold=1.0
)
with pytest.raises(ValueError,
match="For a sparse output, all columns should"):
# this fails since strings `a` and `b` cannot be
# coerced into a numeric.
ct.fit_transform(df)
def test_column_transformer_sparse_threshold():
X_array = np.array([['a', 'b'], ['A', 'B']], dtype=object).T
# above data has sparsity of 4 / 8 = 0.5
# apply threshold even if all sparse
col_trans = ColumnTransformer([('trans1', OneHotEncoder(), [0]),
('trans2', OneHotEncoder(), [1])],
sparse_threshold=0.2)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# mixed -> sparsity of (4 + 2) / 8 = 0.75
for thres in [0.75001, 1]:
col_trans = ColumnTransformer(
[('trans1', OneHotEncoder(sparse=True), [0]),
('trans2', OneHotEncoder(sparse=False), [1])],
sparse_threshold=thres)
res = col_trans.fit_transform(X_array)
assert sparse.issparse(res)
assert col_trans.sparse_output_
for thres in [0.75, 0]:
col_trans = ColumnTransformer(
[('trans1', OneHotEncoder(sparse=True), [0]),
('trans2', OneHotEncoder(sparse=False), [1])],
sparse_threshold=thres)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# if nothing is sparse -> no sparse
for thres in [0.33, 0, 1]:
col_trans = ColumnTransformer(
[('trans1', OneHotEncoder(sparse=False), [0]),
('trans2', OneHotEncoder(sparse=False), [1])],
sparse_threshold=thres)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
def test_column_transformer_error_msg_1D():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
col_trans = ColumnTransformer([('trans', StandardScaler(), 0)])
assert_raise_message(ValueError, "1D data passed to a transformer",
col_trans.fit, X_array)
assert_raise_message(ValueError, "1D data passed to a transformer",
col_trans.fit_transform, X_array)
col_trans = ColumnTransformer([('trans', TransRaise(), 0)])
for func in [col_trans.fit, col_trans.fit_transform]:
assert_raise_message(ValueError, "specific message", func, X_array)
def test_2D_transformer_output():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([('trans1', 'drop', 0),
('trans2', TransNo2D(), 1)])
assert_raise_message(ValueError, "the 'trans2' transformer should be 2D",
ct.fit_transform, X_array)
# because fit is also doing transform, this raises already on fit
assert_raise_message(ValueError, "the 'trans2' transformer should be 2D",
ct.fit, X_array)
def test_2D_transformer_output_pandas():
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['col1', 'col2'])
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([('trans1', TransNo2D(), 'col1')])
assert_raise_message(ValueError, "the 'trans1' transformer should be 2D",
ct.fit_transform, X_df)
# because fit is also doing transform, this raises already on fit
assert_raise_message(ValueError, "the 'trans1' transformer should be 2D",
ct.fit, X_df)
@pytest.mark.parametrize("remainder", ['drop', 'passthrough'])
def test_column_transformer_invalid_columns(remainder):
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# general invalid
for col in [1.5, ['string', 1], slice(1, 's'), np.array([1.])]:
ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)
assert_raise_message(ValueError, "No valid specification",
ct.fit, X_array)
# invalid for arrays
for col in ['string', ['string', 'other'], slice('a', 'b')]:
ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)
assert_raise_message(ValueError, "Specifying the columns",
ct.fit, X_array)
# transformed n_features does not match fitted n_features
col = [0, 1]
ct = ColumnTransformer([('trans', Trans(), col)], remainder=remainder)
ct.fit(X_array)
X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T
msg = ("Given feature/column names or counts do not match the ones for "
"the data given during fit.")
with pytest.warns(FutureWarning, match=msg):
ct.transform(X_array_more) # Should accept added columns, for now
X_array_fewer = np.array([[0, 1, 2], ]).T
err_msg = 'Number of features'
with pytest.raises(ValueError, match=err_msg):
ct.transform(X_array_fewer)
def test_column_transformer_invalid_transformer():
class NoTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def predict(self, X):
return X
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
ct = ColumnTransformer([('trans', NoTrans(), [0])])
assert_raise_message(TypeError,
"All estimators should implement fit and transform",
ct.fit, X_array)
def test_make_column_transformer():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer((scaler, 'first'), (norm, ['second']))
names, transformers, columns = zip(*ct.transformers)
assert names == ("standardscaler", "normalizer")
assert transformers == (scaler, norm)
assert columns == ('first', ['second'])
def test_make_column_transformer_pandas():
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
norm = Normalizer()
ct1 = ColumnTransformer([('norm', Normalizer(), X_df.columns)])
ct2 = make_column_transformer((norm, X_df.columns))
assert_almost_equal(ct1.fit_transform(X_df),
ct2.fit_transform(X_df))
def test_make_column_transformer_kwargs():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer((scaler, 'first'), (norm, ['second']),
n_jobs=3, remainder='drop',
sparse_threshold=0.5)
assert ct.transformers == make_column_transformer(
(scaler, 'first'), (norm, ['second'])).transformers
assert ct.n_jobs == 3
assert ct.remainder == 'drop'
assert ct.sparse_threshold == 0.5
# invalid keyword parameters should raise an error message
assert_raise_message(
TypeError,
"make_column_transformer() got an unexpected "
"keyword argument 'transformer_weights'",
make_column_transformer, (scaler, 'first'), (norm, ['second']),
transformer_weights={'pca': 10, 'Transf': 1}
)
def test_make_column_transformer_remainder_transformer():
scaler = StandardScaler()
norm = Normalizer()
remainder = StandardScaler()
ct = make_column_transformer((scaler, 'first'), (norm, ['second']),
remainder=remainder)
assert ct.remainder == remainder
def test_column_transformer_get_set_params():
ct = ColumnTransformer([('trans1', StandardScaler(), [0]),
('trans2', StandardScaler(), [1])])
exp = {'n_jobs': None,
'remainder': 'drop',
'sparse_threshold': 0.3,
'trans1': ct.transformers[0][1],
'trans1__copy': True,
'trans1__with_mean': True,
'trans1__with_std': True,
'trans2': ct.transformers[1][1],
'trans2__copy': True,
'trans2__with_mean': True,
'trans2__with_std': True,
'transformers': ct.transformers,
'transformer_weights': None,
'verbose': False}
assert ct.get_params() == exp
ct.set_params(trans1__with_mean=False)
assert not ct.get_params()['trans1__with_mean']
ct.set_params(trans1='passthrough')
exp = {'n_jobs': None,
'remainder': 'drop',
'sparse_threshold': 0.3,
'trans1': 'passthrough',
'trans2': ct.transformers[1][1],
'trans2__copy': True,
'trans2__with_mean': True,
'trans2__with_std': True,
'transformers': ct.transformers,
'transformer_weights': None,
'verbose': False}
assert ct.get_params() == exp
def test_column_transformer_named_estimators():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer([('trans1', StandardScaler(), [0]),
('trans2', StandardScaler(with_std=False), [1])])
assert not hasattr(ct, 'transformers_')
ct.fit(X_array)
assert hasattr(ct, 'transformers_')
assert isinstance(ct.named_transformers_['trans1'], StandardScaler)
assert isinstance(ct.named_transformers_.trans1, StandardScaler)
assert isinstance(ct.named_transformers_['trans2'], StandardScaler)
assert isinstance(ct.named_transformers_.trans2, StandardScaler)
assert not ct.named_transformers_.trans2.with_std
# check it are fitted transformers
assert ct.named_transformers_.trans1.mean_ == 1.
def test_column_transformer_cloning():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer([('trans', StandardScaler(), [0])])
ct.fit(X_array)
assert not hasattr(ct.transformers[0][1], 'mean_')
assert hasattr(ct.transformers_[0][1], 'mean_')
ct = ColumnTransformer([('trans', StandardScaler(), [0])])
ct.fit_transform(X_array)
assert not hasattr(ct.transformers[0][1], 'mean_')
assert hasattr(ct.transformers_[0][1], 'mean_')
def test_column_transformer_get_feature_names():
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer([('trans', Trans(), [0, 1])])
# raise correct error when not fitted
with pytest.raises(NotFittedError):
ct.get_feature_names()
# raise correct error when no feature names are available
ct.fit(X_array)
assert_raise_message(AttributeError,
"Transformer trans (type Trans) does not provide "
"get_feature_names", ct.get_feature_names)
# working example
X = np.array([[{'a': 1, 'b': 2}, {'a': 3, 'b': 4}],
[{'c': 5}, {'c': 6}]], dtype=object).T
ct = ColumnTransformer(
[('col' + str(i), DictVectorizer(), i) for i in range(2)])
ct.fit(X)
assert ct.get_feature_names() == ['col0__a', 'col0__b', 'col1__c']
# drop transformer
ct = ColumnTransformer(
[('col0', DictVectorizer(), 0), ('col1', 'drop', 1)])
ct.fit(X)
assert ct.get_feature_names() == ['col0__a', 'col0__b']
# passthrough transformer
ct = ColumnTransformer([('trans', 'passthrough', [0, 1])])
ct.fit(X)
assert ct.get_feature_names() == ['x0', 'x1']
ct = ColumnTransformer([('trans', DictVectorizer(), 0)],
remainder='passthrough')
ct.fit(X)
assert ct.get_feature_names() == ['trans__a', 'trans__b', 'x1']
ct = ColumnTransformer([('trans', 'passthrough', [1])],
remainder='passthrough')
ct.fit(X)
assert ct.get_feature_names() == ['x1', 'x0']
ct = ColumnTransformer([('trans', 'passthrough', lambda x: [1])],
remainder='passthrough')
ct.fit(X)
assert ct.get_feature_names() == ['x1', 'x0']
ct = ColumnTransformer([('trans', 'passthrough', np.array([False, True]))],
remainder='passthrough')
ct.fit(X)
assert ct.get_feature_names() == ['x1', 'x0']
ct = ColumnTransformer([('trans', 'passthrough', slice(1, 2))],
remainder='passthrough')
ct.fit(X)
assert ct.get_feature_names() == ['x1', 'x0']
def test_column_transformer_get_feature_names_dataframe():
# passthough transformer with a dataframe
pd = pytest.importorskip('pandas')
X = np.array([[{'a': 1, 'b': 2}, {'a': 3, 'b': 4}],
[{'c': 5}, {'c': 6}]], dtype=object).T
X_df = pd.DataFrame(X, columns=['col0', 'col1'])
ct = ColumnTransformer([('trans', 'passthrough', ['col0', 'col1'])])
ct.fit(X_df)
assert ct.get_feature_names() == ['col0', 'col1']
ct = ColumnTransformer([('trans', 'passthrough', [0, 1])])
ct.fit(X_df)
assert ct.get_feature_names() == ['col0', 'col1']
ct = ColumnTransformer([('col0', DictVectorizer(), 0)],
remainder='passthrough')
ct.fit(X_df)
assert ct.get_feature_names() == ['col0__a', 'col0__b', 'col1']
ct = ColumnTransformer([('trans', 'passthrough', ['col1'])],
remainder='passthrough')
ct.fit(X_df)
assert ct.get_feature_names() == ['col1', 'col0']
ct = ColumnTransformer([('trans', 'passthrough',
lambda x: x[['col1']].columns)],
remainder='passthrough')
ct.fit(X_df)
assert ct.get_feature_names() == ['col1', 'col0']
ct = ColumnTransformer([('trans', 'passthrough', np.array([False, True]))],
remainder='passthrough')
ct.fit(X_df)
assert ct.get_feature_names() == ['col1', 'col0']
ct = ColumnTransformer([('trans', 'passthrough', slice(1, 2))],
remainder='passthrough')
ct.fit(X_df)
assert ct.get_feature_names() == ['col1', 'col0']
ct = ColumnTransformer([('trans', 'passthrough', [1])],
remainder='passthrough')
ct.fit(X_df)
assert ct.get_feature_names() == ['col1', 'col0']
def test_column_transformer_special_strings():
# one 'drop' -> ignore
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer(
[('trans1', Trans(), [0]), ('trans2', 'drop', [1])])
exp = np.array([[0.], [1.], [2.]])
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# all 'drop' -> return shape 0 array
ct = ColumnTransformer(
[('trans1', 'drop', [0]), ('trans2', 'drop', [1])])
assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0))
assert_array_equal(ct.fit_transform(X_array).shape, (3, 0))
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# 'passthrough'
X_array = np.array([[0., 1., 2.], [2., 4., 6.]]).T
ct = ColumnTransformer(
[('trans1', Trans(), [0]), ('trans2', 'passthrough', [1])])
exp = X_array
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != 'remainder'
# None itself / other string is not valid
for val in [None, 'other']:
ct = ColumnTransformer(
[('trans1', Trans(), [0]), ('trans2', None, [1])])
assert_raise_message(TypeError, "All estimators should implement",
ct.fit_transform, X_array)
assert_raise_message(TypeError, "All estimators should implement",
ct.fit, X_array)
def test_column_transformer_remainder():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_second = np.array([2, 4, 6]).reshape(-1, 1)
X_res_both = X_array
# default drop
ct = ColumnTransformer([('trans1', Trans(), [0])])
assert_array_equal(ct.fit_transform(X_array), X_res_first)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'drop'
assert_array_equal(ct.transformers_[-1][2], [1])
# specify passthrough
ct = ColumnTransformer([('trans', Trans(), [0])], remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
# column order is not preserved (passed through added to end)
ct = ColumnTransformer([('trans1', Trans(), [1])],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_both[:, ::-1])
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both[:, ::-1])
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [0])
# passthrough when all actual transformers are skipped
ct = ColumnTransformer([('trans1', 'drop', [0])],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_second)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_second)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
# error on invalid arg
ct = ColumnTransformer([('trans1', Trans(), [0])], remainder=1)
assert_raise_message(
ValueError,
"remainder keyword needs to be one of \'drop\', \'passthrough\', "
"or estimator.", ct.fit, X_array)
assert_raise_message(
ValueError,
"remainder keyword needs to be one of \'drop\', \'passthrough\', "
"or estimator.", ct.fit_transform, X_array)
# check default for make_column_transformer
ct = make_column_transformer((Trans(), [0]))
assert ct.remainder == 'drop'
@pytest.mark.parametrize("key", [[0], np.array([0]), slice(0, 1),
np.array([True, False])])
def test_column_transformer_remainder_numpy(key):
# test different ways that columns are specified with passthrough
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_both = X_array
ct = ColumnTransformer([('trans1', Trans(), key)],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize(
"key", [[0], slice(0, 1), np.array([True, False]), ['first'], 'pd-index',
np.array(['first']), np.array(['first'], dtype=object),
slice(None, 'first'), slice('first', 'first')])
def test_column_transformer_remainder_pandas(key):
# test different ways that columns are specified with passthrough
pd = pytest.importorskip('pandas')
if isinstance(key, str) and key == 'pd-index':
key = pd.Index(['first'])
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
X_res_both = X_array
ct = ColumnTransformer([('trans1', Trans(), key)],
remainder='passthrough')
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][1] == 'passthrough'
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize("key", [[0], np.array([0]), slice(0, 1),
np.array([True, False, False])])
def test_column_transformer_remainder_transformer(key):
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
X_res_both = X_array.copy()
# second and third columns are doubled when remainder = DoubleTrans
X_res_both[:, 1:3] *= 2
ct = ColumnTransformer([('trans1', Trans(), key)],
remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_no_remaining_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
ct = ColumnTransformer([('trans1', Trans(), [0, 1, 2])],
remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_array)
assert_array_equal(ct.fit(X_array).transform(X_array), X_array)
assert len(ct.transformers_) == 1
assert ct.transformers_[-1][0] != 'remainder'
def test_column_transformer_drops_all_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
# columns are doubled when remainder = DoubleTrans
X_res_both = 2 * X_array.copy()[:, 1:3]
ct = ColumnTransformer([('trans1', 'drop', [0])],
remainder=DoubleTrans())
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], DoubleTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_sparse_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
ct = ColumnTransformer([('trans1', Trans(), [0])],
remainder=SparseMatrixTrans(),
sparse_threshold=0.8)
X_trans = ct.fit_transform(X_array)
assert sparse.issparse(X_trans)
# SparseMatrixTrans creates 3 features for each column. There is
# one column in ``transformers``, thus:
assert X_trans.shape == (3, 3 + 1)
exp_array = np.hstack(
(X_array[:, 0].reshape(-1, 1), np.eye(3)))
assert_array_equal(X_trans.toarray(), exp_array)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_drop_all_sparse_remainder_transformer():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).T
ct = ColumnTransformer([('trans1', 'drop', [0])],
remainder=SparseMatrixTrans(),
sparse_threshold=0.8)
X_trans = ct.fit_transform(X_array)
assert sparse.issparse(X_trans)
# SparseMatrixTrans creates 3 features for each column, thus:
assert X_trans.shape == (3, 3)
assert_array_equal(X_trans.toarray(), np.eye(3))
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == 'remainder'
assert isinstance(ct.transformers_[-1][1], SparseMatrixTrans)
assert_array_equal(ct.transformers_[-1][2], [1, 2])
def test_column_transformer_get_set_params_with_remainder():
ct = ColumnTransformer([('trans1', StandardScaler(), [0])],
remainder=StandardScaler())
exp = {'n_jobs': None,
'remainder': ct.remainder,
'remainder__copy': True,
'remainder__with_mean': True,
'remainder__with_std': True,
'sparse_threshold': 0.3,
'trans1': ct.transformers[0][1],
'trans1__copy': True,
'trans1__with_mean': True,
'trans1__with_std': True,
'transformers': ct.transformers,
'transformer_weights': None,
'verbose': False}
assert ct.get_params() == exp
ct.set_params(remainder__with_std=False)
assert not ct.get_params()['remainder__with_std']
ct.set_params(trans1='passthrough')
exp = {'n_jobs': None,
'remainder': ct.remainder,
'remainder__copy': True,
'remainder__with_mean': True,
'remainder__with_std': False,
'sparse_threshold': 0.3,
'trans1': 'passthrough',
'transformers': ct.transformers,
'transformer_weights': None,
'verbose': False}
assert ct.get_params() == exp
def test_column_transformer_no_estimators():
X_array = np.array([[0, 1, 2],
[2, 4, 6],
[8, 6, 4]]).astype('float').T
ct = ColumnTransformer([], remainder=StandardScaler())
params = ct.get_params()
assert params['remainder__with_mean']
X_trans = ct.fit_transform(X_array)
assert X_trans.shape == X_array.shape
assert len(ct.transformers_) == 1
assert ct.transformers_[-1][0] == 'remainder'
assert ct.transformers_[-1][2] == [0, 1, 2]
@pytest.mark.parametrize(
['est', 'pattern'],
[(ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])],
remainder=DoubleTrans()),
(r'\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n'
r'\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n'
r'\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$'
)),
(ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])],
remainder='passthrough'),
(r'\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n'
r'\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n'
r'\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$'
)),
(ColumnTransformer([('trans1', Trans(), [0]), ('trans2', 'drop', [1])],
remainder='passthrough'),
(r'\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n'
r'\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$'
)),
(ColumnTransformer([('trans1', Trans(), [0]),
('trans2', 'passthrough', [1])],
remainder='passthrough'),
(r'\[ColumnTransformer\].*\(1 of 3\) Processing trans1.* total=.*\n'
r'\[ColumnTransformer\].*\(2 of 3\) Processing trans2.* total=.*\n'
r'\[ColumnTransformer\].*\(3 of 3\) Processing remainder.* total=.*\n$'
)),
(ColumnTransformer([('trans1', Trans(), [0])], remainder='passthrough'),
(r'\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n'
r'\[ColumnTransformer\].*\(2 of 2\) Processing remainder.* total=.*\n$'
)),
(ColumnTransformer([('trans1', Trans(), [0]), ('trans2', Trans(), [1])],
remainder='drop'),
(r'\[ColumnTransformer\].*\(1 of 2\) Processing trans1.* total=.*\n'
r'\[ColumnTransformer\].*\(2 of 2\) Processing trans2.* total=.*\n$')),
(ColumnTransformer([('trans1', Trans(), [0])], remainder='drop'),
(r'\[ColumnTransformer\].*\(1 of 1\) Processing trans1.* total=.*\n$'))])
@pytest.mark.parametrize('method', ['fit', 'fit_transform'])
def test_column_transformer_verbose(est, pattern, method, capsys):
X_array = np.array([[0, 1, 2], [2, 4, 6], [8, 6, 4]]).T
func = getattr(est, method)
est.set_params(verbose=False)
func(X_array)
assert not capsys.readouterr().out, 'Got output for verbose=False'
est.set_params(verbose=True)
func(X_array)
assert re.match(pattern, capsys.readouterr()[0])
def test_column_transformer_no_estimators_set_params():
ct = ColumnTransformer([]).set_params(n_jobs=2)
assert ct.n_jobs == 2
def test_column_transformer_callable_specifier():
# assert that function gets the full array
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([[0, 1, 2]]).T
def func(X):
assert_array_equal(X, X_array)
return [0]
ct = ColumnTransformer([('trans', Trans(), func)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_array), X_res_first)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_first)
assert callable(ct.transformers[0][2])
assert ct.transformers_[0][2] == [0]
def test_column_transformer_callable_specifier_dataframe():
# assert that function gets the full dataframe
pd = pytest.importorskip('pandas')
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([[0, 1, 2]]).T
X_df = pd.DataFrame(X_array, columns=['first', 'second'])
def func(X):
assert_array_equal(X.columns, X_df.columns)
assert_array_equal(X.values, X_df.values)
return ['first']
ct = ColumnTransformer([('trans', Trans(), func)],
remainder='drop')
assert_array_equal(ct.fit_transform(X_df), X_res_first)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_first)
assert callable(ct.transformers[0][2])
assert ct.transformers_[0][2] == ['first']
def test_column_transformer_negative_column_indexes():
X = np.random.randn(2, 2)
X_categories = np.array([[1], [2]])
X = np.concatenate([X, X_categories], axis=1)
ohe = OneHotEncoder()
tf_1 = ColumnTransformer([('ohe', ohe, [-1])], remainder='passthrough')
tf_2 = ColumnTransformer([('ohe', ohe, [2])], remainder='passthrough')
assert_array_equal(tf_1.fit_transform(X), tf_2.fit_transform(X))
@pytest.mark.parametrize("explicit_colname", ['first', 'second'])
def test_column_transformer_reordered_column_names_remainder(explicit_colname):
"""Regression test for issue #14223: 'Named col indexing fails with
ColumnTransformer remainder on changing DataFrame column ordering'
Should raise error on changed order combined with remainder.
Should allow for added columns in `transform` input DataFrame
as long as all preceding columns match.
"""
pd = pytest.importorskip('pandas')
X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_fit_df = pd.DataFrame(X_fit_array, columns=['first', 'second'])
X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T
X_trans_df = pd.DataFrame(X_trans_array, columns=['second', 'first'])
tf = ColumnTransformer([('bycol', Trans(), explicit_colname)],
remainder=Trans())
tf.fit(X_fit_df)
err_msg = 'Column ordering must be equal'
warn_msg = ("Given feature/column names or counts do not match the ones "
"for the data given during fit.")
with pytest.raises(ValueError, match=err_msg):
tf.transform(X_trans_df)
# No error for added columns if ordering is identical
X_extended_df = X_fit_df.copy()
X_extended_df['third'] = [3, 6, 9]
with pytest.warns(FutureWarning, match=warn_msg):
tf.transform(X_extended_df) # No error should be raised, for now
# No 'columns' AttributeError when transform input is a numpy array
X_array = X_fit_array.copy()
err_msg = 'Specifying the columns'
with pytest.raises(ValueError, match=err_msg):
tf.transform(X_array)
def test_feature_name_validation():
"""Tests if the proper warning/error is raised if the columns do not match
during fit and transform."""
pd = pytest.importorskip("pandas")
X = np.ones(shape=(3, 2))
X_extra = np.ones(shape=(3, 3))
df = pd.DataFrame(X, columns=['a', 'b'])
df_extra = pd.DataFrame(X_extra, columns=['a', 'b', 'c'])
tf = ColumnTransformer([('bycol', Trans(), ['a', 'b'])])
tf.fit(df)
msg = ("Given feature/column names or counts do not match the ones for "
"the data given during fit.")
with pytest.warns(FutureWarning, match=msg):
tf.transform(df_extra)
tf = ColumnTransformer([('bycol', Trans(), [0])])
tf.fit(df)
with pytest.warns(FutureWarning, match=msg):
tf.transform(X_extra)
with warnings.catch_warnings(record=True) as warns:
tf.transform(X)
assert not warns
tf = ColumnTransformer([('bycol', Trans(), ['a'])],
remainder=Trans())
tf.fit(df)
with pytest.warns(FutureWarning, match=msg):
tf.transform(df_extra)
tf = ColumnTransformer([('bycol', Trans(), [0, -1])])
tf.fit(df)
msg = "At least one negative column was used to"
with pytest.raises(RuntimeError, match=msg):
tf.transform(df_extra)
tf = ColumnTransformer([('bycol', Trans(), slice(-1, -3, -1))])
tf.fit(df)
with pytest.raises(RuntimeError, match=msg):
tf.transform(df_extra)
with warnings.catch_warnings(record=True) as warns:
tf.transform(df)
assert not warns
@pytest.mark.parametrize("array_type", [np.asarray, sparse.csr_matrix])
def test_column_transformer_mask_indexing(array_type):
# Regression test for #14510
# Boolean array-like does not behave as boolean array with NumPy < 1.12
# and sparse matrices as well
X = np.transpose([[1, 2, 3], [4, 5, 6], [5, 6, 7], [8, 9, 10]])
X = array_type(X)
column_transformer = ColumnTransformer(
[('identity', FunctionTransformer(), [False, True, False, True])]
)
X_trans = column_transformer.fit_transform(X)
assert X_trans.shape == (3, 2)
def test_n_features_in():
# make sure n_features_in is what is passed as input to the column
# transformer.
X = [[1, 2], [3, 4], [5, 6]]
ct = ColumnTransformer([('a', DoubleTrans(), [0]),
('b', DoubleTrans(), [1])])
assert not hasattr(ct, 'n_features_in_')
ct.fit(X)
assert ct.n_features_in_ == 2
@pytest.mark.parametrize('cols, pattern, include, exclude', [
(['col_int', 'col_float'], None, np.number, None),
(['col_int', 'col_float'], None, None, object),
(['col_int', 'col_float'], None, [int, float], None),
(['col_str'], None, [object], None),
(['col_str'], None, object, None),
(['col_float'], None, float, None),
(['col_float'], 'at$', [np.number], None),
(['col_int'], None, [int], None),
(['col_int'], '^col_int', [np.number], None),
(['col_float', 'col_str'], 'float|str', None, None),
(['col_str'], '^col_s', None, [int]),
([], 'str$', float, None),
(['col_int', 'col_float', 'col_str'], None, [np.number, object], None),
])
def test_make_column_selector_with_select_dtypes(cols, pattern, include,
exclude):
pd = pytest.importorskip('pandas')
X_df = pd.DataFrame({
'col_int': np.array([0, 1, 2], dtype=int),
'col_float': np.array([0.0, 1.0, 2.0], dtype=float),
'col_str': ["one", "two", "three"],
}, columns=['col_int', 'col_float', 'col_str'])
selector = make_column_selector(
dtype_include=include, dtype_exclude=exclude, pattern=pattern)
assert_array_equal(selector(X_df), cols)
def test_column_transformer_with_make_column_selector():
# Functional test for column transformer + column selector
pd = pytest.importorskip('pandas')
X_df = pd.DataFrame({
'col_int': np.array([0, 1, 2], dtype=int),
'col_float': np.array([0.0, 1.0, 2.0], dtype=float),
'col_cat': ["one", "two", "one"],
'col_str': ["low", "middle", "high"]
}, columns=['col_int', 'col_float', 'col_cat', 'col_str'])
X_df['col_str'] = X_df['col_str'].astype('category')
cat_selector = make_column_selector(dtype_include=['category', object])
num_selector = make_column_selector(dtype_include=np.number)
ohe = OneHotEncoder()
scaler = StandardScaler()
ct_selector = make_column_transformer((ohe, cat_selector),
(scaler, num_selector))
ct_direct = make_column_transformer((ohe, ['col_cat', 'col_str']),
(scaler, ['col_float', 'col_int']))
X_selector = ct_selector.fit_transform(X_df)
X_direct = ct_direct.fit_transform(X_df)
assert_allclose(X_selector, X_direct)
def test_make_column_selector_error():
selector = make_column_selector(dtype_include=np.number)
X = np.array([[0.1, 0.2]])
msg = ("make_column_selector can only be applied to pandas dataframes")
with pytest.raises(ValueError, match=msg):
selector(X)
def test_make_column_selector_pickle():
pd = pytest.importorskip('pandas')
X_df = pd.DataFrame({
'col_int': np.array([0, 1, 2], dtype=int),
'col_float': np.array([0.0, 1.0, 2.0], dtype=float),
'col_str': ["one", "two", "three"],
}, columns=['col_int', 'col_float', 'col_str'])
selector = make_column_selector(dtype_include=[object])
selector_picked = pickle.loads(pickle.dumps(selector))
assert_array_equal(selector(X_df), selector_picked(X_df))
@pytest.mark.parametrize(
'empty_col', [[], np.array([], dtype=int), lambda x: []],
ids=['list', 'array', 'callable']
)
def test_feature_names_empty_columns(empty_col):
pd = pytest.importorskip('pandas')
df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
ct = ColumnTransformer(
transformers=[
("ohe", OneHotEncoder(), ["col1", "col2"]),
("empty_features", OneHotEncoder(), empty_col),
],
)
ct.fit(df)
assert ct.get_feature_names() == ['ohe__x0_a', 'ohe__x0_b', 'ohe__x1_z']
| bsd-3-clause |
samueljackson92/major-project | src/mia/plotting.py | 1 | 14059 | """
Various plotting utility functions.
"""
import logging
import os.path
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import pandas as pd
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from skimage import io, transform
from mia.utils import transform_2d
logger = logging.getLogger(__name__)
def plot_multiple_images(images):
"""Plot a list of images on horizontal subplots
:param images: the list of images to plot
"""
fig = plt.figure()
num_images = len(images)
for i, image in enumerate(images):
fig.add_subplot(1, num_images, i+1)
plt.imshow(image, cmap=cm.Greys_r)
plt.show()
def plot_region_props(image, regions):
"""Plot the output of skimage.regionprops along with the image.
Original code from
http://scikit-image.org/docs/dev/auto_examples/plot_regionprops.html
:param image: image to plot
:param regions: the regions output from the regionprops function
"""
fig, ax = plt.subplots()
ax.imshow(image, cmap=plt.cm.gray)
for props in regions:
minr, minc, maxr, maxc = props
bx = (minc, maxc, maxc, minc, minc)
by = (minr, minr, maxr, maxr, minr)
ax.plot(bx, by, '-b', linewidth=2.5)
plt.show()
def plot_linear_structure(img, line_image):
"""Plot the line image generated from linear structure detection
:param img: the image that structure was detected in
:param line_image: the line image generated from img
"""
line_image = np.ma.masked_where(line_image == 0, line_image)
fig, ax = plt.subplots()
ax.imshow(img, interpolation='nearest', cmap=plt.cm.gray)
ax.imshow(line_image, cmap=plt.cm.autumn)
ax.grid(False)
plt.show()
def plot_blobs(img, blobs):
"""Plot the output of blob detection on an image.
:param img: the image to plot
:param blobs: list of blobs found in the image
"""
fig, ax = plt.subplots(1, 1)
ax.imshow(img, interpolation='nearest', cmap=plt.cm.gray)
ax.set_axis_off()
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color='red', linewidth=2, fill=False)
ax.add_patch(c)
plt.show()
def plot_image_orthotope(image_orthotope, titles=None):
""" Plot an image orthotope
:param image_orthotope: the orthotope of images to plot
:param titles: titles for each of the images in orthotope
"""
fig, ax = plt.subplots(*image_orthotope.shape[:2])
if titles is not None:
title_iter = titles.__iter__()
for i in range(image_orthotope.shape[0]):
for j in range(image_orthotope.shape[1]):
if titles is not None:
ax[i][j].set_title(title_iter.next())
ax[i][j].imshow(image_orthotope[i][j], cmap=plt.cm.gray)
ax[i][j].axis('off')
plt.show()
def plot_risk_classes(data_frame, column_name):
""" Plot a histogram of the selected column for each risk class
:param data_frame: the data frame containing the features
:param column_name: the column to use
"""
groups = data_frame.groupby('class')
fig, axs = plt.subplots(2, 2)
axs = axs.flatten()
for i, (index, frame) in enumerate(groups):
frame.hist(column=column_name, ax=axs[i])
axs[i].set_title("Risk %d" % (index))
axs[i].set_xlabel(column_name)
axs[i].set_ylabel('count')
plt.subplots_adjust(wspace=0.75, hspace=0.75)
def plot_risk_classes_single(data_frame, column_name):
""" Plot a histogram of the selected column for each risk class as a single
histogram
This is essentially the same as the plot_risk_classes function except that
the all risk classes are plotted in the same subplot.
:param data_frame: the data frame containing the features
:param column_name: the column to use
"""
blobs = data_frame.groupby('class')
for index, b in blobs:
b[column_name].hist(label=str(index))
plt.legend(loc='upper right')
def plot_scatter_2d(data_frame, columns=[0, 1], labels=None, annotate=False,
**kwargs):
""" Create a scatter plot from a pandas data frame
:param data_frame: data frame containing the data to plot
:param columns: the columns to use for each axis. Must be exactly 2
:param label_name: name of the column containing the class label for the
image
:param annotate: whether to annotate the plot with the index
"""
if len(columns) != 2:
raise ValueError("Number of columns must be exactly 2")
ax = data_frame.plot(kind='scatter', x=columns[0], y=columns[1],
c=labels, cmap=plt.cm.Spectral_r, **kwargs)
if annotate:
def annotate_df(row):
ax.text(row.values[0], row.values[1], row.name, fontsize=10)
data_frame.apply(annotate_df, axis=1)
return ax
def plot_scatter_3d(data_frame, columns=[0, 1, 2], labels=None, ax=None,
**kwargs):
""" Create a 3D scatter plot from a pandas data frame
:param data_frame: data frame containing the data to plot
:param columns: the columns to use for each axis. Must be exactly 3
:param labels: the labels used to colour the dataset by class.
"""
if len(columns) != 3:
raise ValueError("Number of columns must be exactly 3")
df = data_frame[columns]
data = df.as_matrix().T
fig = plt.figure()
if ax is None:
ax = fig.add_subplot(111, projection='3d')
ax.scatter(*data, c=labels, cmap=cm.Spectral_r, **kwargs)
ax.set_xlabel(columns[0])
ax.set_ylabel(columns[1])
ax.set_zlabel(columns[2])
return ax
def plot_mapping_3d(m, real_index, phantom_index, labels):
""" Create a 3D scatter plot from a pandas data frame containing two
datasets
:param data_frame: data frame containing the data to plot
:param real_index: indicies of the real images.
:param real_index: indicies of the synthetic images.
:param labels: the labels used to colour the dataset by class.
"""
hologic_map = m.loc[real_index]
phantom_map = m.loc[phantom_index]
hol_labels = labels[hologic_map.index]
syn_labels = labels[phantom_map.index]
ax = plot_scatter_3d(hologic_map, labels=hol_labels, s=10)
ax = plot_scatter_3d(phantom_map, labels=syn_labels, ax=ax,
marker='^', s=50)
return ax
def plot_mapping_2d(m, real_index, phantom_index, labels):
""" Create a 2D scatter plot from a pandas data frame containing two
datasets
:param data_frame: data frame containing the data to plot
:param real_index: indicies of the real images.
:param real_index: indicies of the synthetic images.
:param labels: the labels used to colour the dataset by class.
"""
hologic_map = m.loc[real_index]
phantom_map = m.loc[phantom_index]
hol_labels = labels[hologic_map.index]
syn_labels = labels[phantom_map.index]
ax = plot_scatter_2d(hologic_map, labels=hol_labels, s=10)
ax = plot_scatter_2d(phantom_map, labels=syn_labels, ax=ax,
marker='^', s=50)
return ax
def plot_scattermatrix(data_frame, label_name=None):
""" Create a scatter plot matrix from a pandas data frame
:param data_frame: data frame containing the lower dimensional mapping
:param label_name: name of the column containing the class label for the
image
"""
column_names = filter(lambda x: x != label_name, data_frame.columns.values)
sns.pairplot(data_frame, hue=label_name, size=1.5, vars=column_names)
plt.show()
def plot_median_image_matrix(data_frame, img_path, label_name=None,
raw_features_csv=None, output_file=None):
"""Plot images from a dataset according to their position defined by the
lower dimensional mapping
This rebins the points in the mapping using a 2D histogram then takes
the median point in each bin. The image corresponding to this point is
then plotted for that bin.
:param data_frame: data frame defining the lower dimensional mapping
:param img_path: path to find the images in
:param label_name: name of the column in the data frame containing the
class labels
:param output_file: file name to output the resulting image to
"""
blobs = None
if raw_features_csv is not None:
blobs = _load_blobs(raw_features_csv) if raw_features_csv else None
grid = _bin_data_frame_2d(data_frame)
axes_iter = _prepare_figure(len(grid))
transform_2d(_prepare_median_image, grid, img_path, blobs, axes_iter)
logger.info("Saving image")
plt.savefig(output_file, bbox_inches='tight', dpi=3000)
def _load_blobs(raw_features_csv):
"""Load blobs froma raw features CSV file
:param raw_features_csv: name of the CSV file.
:returns: DataFrame containing the blobs.
"""
features = pd.DataFrame.from_csv(raw_features_csv)
return features[['x', 'y', 'radius', 'image_name']]
def _prepare_figure(size):
"""Create a figure of the given size with zero whitespace and return the
axes
:param size: size of the image
:returns axes: axes for each square in the plot.
"""
fig, axs = plt.subplots(size, size,
gridspec_kw={"wspace": 0, "hspace": 0})
axs = np.array(axs).flatten()
return iter(axs)
def _prepare_median_image(img_name, path, blobs_df, axs_iter):
"""Prepare a image to be shown withing a squared area of a mapping.
:param img_name: name of the image
:param path: path of the image
:param blobs_df: data frame containing each of the blobs in the image.
:param axes_iter: iterate to the axes in the plot.
"""
scale_factor = 8
ax = axs_iter.next()
ax.set_axis_off()
ax.set_aspect('auto')
if img_name == '':
_add_blank_image_to_axis(ax, scale_factor)
else:
logger.info("Loading image %s" % img_name)
path = os.path.join(path, img_name)
img = _load_image(path, scale_factor)
_add_image_to_axis(img, ax, img_name)
if blobs_df is not None:
blobs = _select_blobs(blobs_df, img_name, scale_factor)
_add_blobs_to_axis(ax, blobs)
def _select_blobs(blobs_df, img_name, scale_factor):
"""Select all of the blobs in a given image and scale them to the correct
size
:param blobs_df: data frame containg the location and radius of the blobs.
:param img_name: name of the current image.
:param scale_factor: size to rescale the blobs to.
:returns: DataFrame -- data frame containing the rescaled blobs.
"""
b = blobs_df[blobs_df['image_name'] == img_name]
b = b[['x', 'y', 'radius']].as_matrix()
b /= scale_factor
return b
def _load_image(path, scale_factor):
"""Load an image and scale it to a given size.
:param path: loaction of the image on disk.
:param scale_factor: size to rescale the image to.
:returns: ndarray -- the image that was loaded.
"""
img = io.imread(path, as_grey=True)
img = transform.resize(img, (img.shape[0]/scale_factor,
img.shape[1]/scale_factor))
return img
def _add_image_to_axis(img, ax, img_name):
"""Add an image to a specific axis on the plot
:param img: the image to add
:param ax: the axis to add the image to.
:param img_name: the name of the image.
"""
ax.imshow(img, interpolation='nearest', cmap=plt.cm.gray)
ax.text(20, 0, img_name, style='italic', fontsize=3, color='white')
def _add_blank_image_to_axis(ax, scale_factor):
"""Add a blank image to a specific axis on the plot
:param ax: the axis to add the image to.
:param scale_factor: size to scale the blank image to.
"""
img = np.ones((3328/scale_factor, 2560/scale_factor))
ax.imshow(img, interpolation='nearest', cmap=plt.cm.gray)
def _add_blobs_to_axis(ax, blobs):
"""Draw blobs on an image in the figure.
:param ax: the axis to add the blobs to.
:param blobs: data frame containing the blobs.
"""
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color='red', linewidth=2, fill=False)
ax.add_patch(c)
def _bin_data_frame_2d(data_frame):
"""Create a 2D histogram of each of the points in the data frame.
For each bin find the image which is median distance in both directions.
:param data_frame: the data frame containing the mapping.
:return: ndarray -- with each value containing the image name.
"""
hist, xedges, yedges = np.histogram2d(data_frame['0'], data_frame['1'])
grid = []
for x_bounds in zip(xedges, xedges[1:]):
row = []
for y_bounds in zip(yedges, yedges[1:]):
entries = _find_points_in_bounds(data_frame, x_bounds, y_bounds)
name = _find_median_image_name(entries)
row.append(name)
grid.append(row)
return np.rot90(np.array(grid))
def _find_median_image_name(data_frame):
"""Find the median image name from a particular bin
:param data_frame: the data frame containing the mapping.
:return: string -- the image name.
"""
name = ''
num_rows = data_frame.shape[0]
if num_rows > 0:
sorted_df = data_frame.sort(['0', '1'])
med_df = sorted_df.iloc[[num_rows/2]]
name = med_df.index.values[0]
return name
def _find_points_in_bounds(data_frame, x_bounds, y_bounds):
"""Find the data points what lie within a given bin
:param data_frame: data frame containing the mapping.
:param x_bounds: the x bounds of this bin
:param y_bounds: the y bounds of this bin
:returns: DataFrame -- data frame containing the values in this bin.
"""
xlower, xupper = x_bounds
ylower, yupper = y_bounds
xbounds = (data_frame['0'] >= xlower) & (data_frame['0'] < xupper)
ybounds = (data_frame['1'] >= ylower) & (data_frame['1'] < yupper)
return data_frame[xbounds & ybounds]
| mit |
tomsilver/nupic | nupic/research/monitor_mixin/monitor_mixin_base.py | 7 | 5503 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
MonitorMixinBase class used in monitor mixin framework.
"""
import abc
import numpy
from prettytable import PrettyTable
from nupic.research.monitor_mixin.plot import Plot
class MonitorMixinBase(object):
"""
Base class for MonitorMixin. Each subclass will be a mixin for a particular
algorithm.
All arguments, variables, and methods in monitor mixin classes should be
prefixed with "mm" (to avoid collision with the classes they mix in to).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
"""
Note: If you set the kwarg "mmName", then pretty-printing of traces and
metrics will include the name you specify as a tag before every title.
"""
self.mmName = kwargs.get("mmName")
if "mmName" in kwargs:
del kwargs["mmName"]
super(MonitorMixinBase, self).__init__(*args, **kwargs)
# Mapping from key (string) => trace (Trace)
self._mmTraces = None
self._mmData = None
self.mmClearHistory()
def mmClearHistory(self):
"""
Clears the stored history.
"""
self._mmTraces = {}
self._mmData = {}
@staticmethod
def mmPrettyPrintTraces(traces, breakOnResets=None):
"""
Returns pretty-printed table of traces.
@param traces (list) Traces to print in table
@param breakOnResets (BoolsTrace) Trace of resets to break table on
@return (string) Pretty-printed table of traces.
"""
assert len(traces) > 0, "No traces found"
table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces])
for i in xrange(len(traces[0].data)):
if breakOnResets and breakOnResets.data[i]:
table.add_row(["<reset>"] * (len(traces) + 1))
table.add_row([i] +
[trace.prettyPrintDatum(trace.data[i]) for trace in traces])
return table.get_string().encode("utf-8")
@staticmethod
def mmPrettyPrintMetrics(metrics, sigFigs=5):
"""
Returns pretty-printed table of metrics.
@param metrics (list) Traces to print in table
@param sigFigs (int) Number of significant figures to print
@return (string) Pretty-printed table of metrics.
"""
assert len(metrics) > 0, "No metrics found"
table = PrettyTable(["Metric", "mean", "standard deviation",
"min", "max", "sum", ])
for metric in metrics:
table.add_row([metric.prettyPrintTitle()] + metric.getStats())
return table.get_string().encode("utf-8")
def mmGetDefaultTraces(self, verbosity=1):
"""
Returns list of default traces. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default traces
"""
return []
def mmGetDefaultMetrics(self, verbosity=1):
"""
Returns list of default metrics. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default metrics
"""
return []
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="",
showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1)
plot.add2DArray(data, xlabel="Time", ylabel=activityType)
return plot
| gpl-3.0 |
oncokb/oncokb-annotator | OncoKBPlots.py | 1 | 2808 | #!/usr/bin/python
import argparse
from AnnotatorCore import *
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('OncoKBPlots')
import matplotlib.pyplot as plt
def main(argv):
params = {
"catogerycolumn": argv.catogery_column, # -c
"thresholdcat": argv.threshold_cat, # -n
}
if argv.help:
log.info('\n'
'OncoKBPlots.py -i <annotated clinical file> -o <output PDF file> [-c <categorization column, '
'e.g. CANCER_TYPE>] [-s sample list filter] [-n threshold of # samples in a category] [-l comma separated levels to include]\n'
' Essential clinical columns:\n'
' SAMPLE_ID: sample ID\n'
' HIGHEST_LEVEL: Highest OncoKB levels\n'
' Supported levels (-l): \n'
' LEVEL_1,LEVEL_2,LEVEL_3A,LEVEL_3B,LEVEL_4,ONCOGENIC,VUS')
sys.exit()
if argv.input_file == '' or argv.output_file == '':
log.info('for help: python OncoKBPlots.py -h')
sys.exit(2)
if argv.sample_ids_filter:
setsampleidsfileterfile(argv.sample_ids_filter)
if argv.levels:
params["levels"] = re.split(',', argv.levels)
log.info('annotating %s ...' % argv.input_file)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
plotclinicalactionability(ax1, argv.input_file, argv.output_file, params)
# ax.yaxis.grid(linestyle="dotted", color="lightgray") # horizontal lines
# plt.margins(0.01)
plotclinicalactionability(ax1, args.input_file, args.output_file, params)
plotimplications(ax2, 'HIGHEST_DX_LEVEL', 'OncoKB Diagnostic Implications', dxLevels, args.input_file, argv.output_file, params)
plotimplications(ax3, 'HIGHEST_PX_LEVEL', 'OncoKB Prognostic Implications', pxLevels, args.input_file, argv.output_file, params)
plt.subplots_adjust(left=0.2, bottom=0.3)
plt.gcf().text(0.90, 0.1, "Generated by OncoKB\n[Chakravarty et al., JCO PO 2017]", fontsize=6,
horizontalalignment='right', verticalalignment='bottom')
fig.tight_layout()
fig.savefig(argv.output_file, bbox_inches='tight')
log.info('done!')
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='help', action="store_true", default=False)
parser.add_argument('-i', dest='input_file', default='', type=str)
parser.add_argument('-o', dest='output_file', default='', type=str)
parser.add_argument('-c', dest='catogery_column', default='CANCER_TYPE', type=str)
parser.add_argument('-s', dest='sample_ids_filter', default='', type=str)
parser.add_argument('-n', dest='threshold_cat', default=0, type=int)
parser.add_argument('-l', dest='levels', default='', type=str)
parser.set_defaults(func=main)
args = parser.parse_args()
args.func(args)
| agpl-3.0 |
lin-credible/scikit-learn | sklearn/svm/classes.py | 37 | 39951 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
tseaver/google-cloud-python | automl/google/cloud/automl_v1beta1/tables/gcs_client.py | 3 | 5631 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wraps the Google Cloud Storage client library for use in tables helper."""
import logging
import time
from google.api_core import exceptions
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
try:
from google.cloud import storage
except ImportError: # pragma: NO COVER
storage = None
_LOGGER = logging.getLogger(__name__)
_PANDAS_REQUIRED = "pandas is required to verify type DataFrame."
_STORAGE_REQUIRED = (
"google-cloud-storage is required to create a Google Cloud Storage client."
)
class GcsClient(object):
"""Uploads Pandas DataFrame to a bucket in Google Cloud Storage."""
def __init__(self, bucket_name=None, client=None, credentials=None, project=None):
"""Constructor.
Args:
bucket_name (Optional[str]): The name of Google Cloud Storage
bucket for this client to send requests to.
client (Optional[storage.Client]): A Google Cloud Storage Client
instance.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
project (Optional[str]): The project ID of the GCP project to
attach to the underlying storage client. If none is specified,
the client will attempt to ascertain the credentials from the
environment.
"""
if storage is None:
raise ImportError(_STORAGE_REQUIRED)
if client is not None:
self.client = client
elif credentials is not None:
self.client = storage.Client(credentials=credentials, project=project)
else:
self.client = storage.Client()
self.bucket_name = bucket_name
def ensure_bucket_exists(self, project, region):
"""Checks if a bucket named '{project}-automl-tables-staging' exists.
If this bucket doesn't exist, creates one.
If this bucket already exists in `project`, do nothing.
If this bucket exists in a different project that we don't have
access to, creates a bucket named
'{project}-automl-tables-staging-{create_timestamp}' because bucket's
name must be globally unique.
Save the created bucket's name and reuse this for future requests.
Args:
project (str): The ID of the project that stores the bucket.
region (str): The region of the bucket.
Returns:
A string representing the created bucket name.
"""
if self.bucket_name is None:
self.bucket_name = "{}-automl-tables-staging".format(project)
try:
self.client.get_bucket(self.bucket_name)
except (exceptions.Forbidden, exceptions.NotFound) as e:
if isinstance(e, exceptions.Forbidden):
used_bucket_name = self.bucket_name
self.bucket_name = used_bucket_name + "-{}".format(int(time.time()))
_LOGGER.warning(
"Created a bucket named {} because a bucket named {} already exists in a different project.".format(
self.bucket_name, used_bucket_name
)
)
bucket = self.client.bucket(self.bucket_name)
bucket.create(project=project, location=region)
return self.bucket_name
def upload_pandas_dataframe(self, dataframe, uploaded_csv_name=None):
"""Uploads a Pandas DataFrame as CSV to the bucket.
Args:
dataframe (pandas.DataFrame): The Pandas Dataframe to be uploaded.
uploaded_csv_name (Optional[str]): The name for the uploaded CSV.
Returns:
A string representing the GCS URI of the uploaded CSV.
"""
if pandas is None:
raise ImportError(_PANDAS_REQUIRED)
if not isinstance(dataframe, pandas.DataFrame):
raise ValueError("'dataframe' must be a pandas.DataFrame instance.")
if self.bucket_name is None:
raise ValueError("Must ensure a bucket exists before uploading data.")
if uploaded_csv_name is None:
uploaded_csv_name = "automl-tables-dataframe-{}.csv".format(
int(time.time())
)
# Setting index to False to ignore exporting the data index:
# 1. The resulting column name for the index column is empty, AutoML
# Tables does not allow empty column name
# 2. The index is not an useful training information
csv_string = dataframe.to_csv(index=False)
bucket = self.client.get_bucket(self.bucket_name)
blob = bucket.blob(uploaded_csv_name)
blob.upload_from_string(csv_string)
return "gs://{}/{}".format(self.bucket_name, uploaded_csv_name)
| apache-2.0 |
ClimbsRocks/scikit-learn | sklearn/tests/test_common.py | 6 | 9522 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import re
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.decomposition import ProjectedGradientNMF
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check, name, Estimator
else:
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin))]
for name, Classifier in linear_classifiers:
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = dict((name, ispkg)
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__,
prefix='sklearn.'))
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert_equal(missing_tests, [],
'{0} do not have `tests` subpackages. Perhaps they require '
'__init__.py or an add_subpackage directive in the parent '
'setup.py'.format(missing_tests))
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
if issubclass(Estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
estimator = Estimator()
else:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
if isinstance(estimator, ProjectedGradientNMF):
# The ProjectedGradientNMF class is deprecated
with ignore_warnings():
yield check_transformer_n_iter, name, estimator
else:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False,
include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
# If class is deprecated, ignore deprecated warnings
if hasattr(Estimator.__init__, "deprecated_original"):
with ignore_warnings():
yield check_get_params_invariance, name, Estimator
else:
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
crichardson17/AGN_SED | scripts/Infrared_Temp_Dens.py | 1 | 7137 |
#Import required modules
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import pandas as pd
import os
#List all the subdirectories within directory that have the data that we want
def filelist(directory):
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.lin'):
print (file)
rootdirectory='C:/Users/chris_000/Documents/GitHub/AGN_SED/Cloudy_Data'
filelist(rootdirectory)
#Generate a CSV file containing all the relevant data points
Output_File=r'C:/Users/chris_000/Documents/GitHub/AGN_SED/All_Emissions.csv' #Create the output file
normSource=os.path.normpath(rootdirectory)
dfs=[] #Create an empty array for our Data
d=pd.DataFrame()
d=d.reset_index()
d2=pd.DataFrame({'Temperature': [10**4,10**5, 10**6, 10**7]},dtype=float) #Create a Dataframe of labels for each file used
for root, dirs, files in os.walk(rootdirectory, topdown=False):
for name in files:
if name.startswith('Linear_Fit_ax219') and name.endswith('.lin'):
print name
#only read columns from list cols
dfs.append(pd.read_csv(os.path.join(root, name), delimiter="\t", usecols=['TOTL 4861A','O 3 5007A', 'NE 5 3426A', 'NE 3 3869A',
'TOTL 4363A', 'O 1 6300A', 'H 1 6563A','N 2 6584A','S 2 6720A' , 'HE 2 4686A','TOTL 3727A', 'S II 6716A', 'S II 6731A',
'NE 3 3869A','AR 3 7135A','HE 1 5876A','TOTL 4363A','O 3 4959A','O II 3726A', 'O II 3729A','O 4 25.88m','NE 2 12.81m','NE 5 14.32m','NE 5 24.31m','NE 3 15.55m','S 4 10.51m']))
d = pd.concat(dfs, ignore_index=True)
elif name.startswith('Linear_Fit_ax117') and name.endswith('.lin'):
print name
#only read columns from list cols
dfs.append(pd.read_csv(os.path.join(root, name), delimiter="\t", usecols=['TOTL 4861A','O 3 5007A', 'NE 5 3426A', 'NE 3 3869A',
'TOTL 4363A', 'O 1 6300A', 'H 1 6563A','N 2 6584A','S 2 6720A' , 'HE 2 4686A','TOTL 3727A', 'S II 6716A', 'S II 6731A',
'NE 3 3869A','AR 3 7135A','HE 1 5876A','TOTL 4363A','O 3 4959A','O II 3726A', 'O II 3729A','S 4 10.51m','O 4 25.88m','NE 2 12.81m','NE 5 14.32m','NE 5 24.31m','NE 3 15.55m']))
d = pd.concat(dfs, ignore_index=True)
elif name.startswith('Hden25') and name.endswith('.lin'):
print name
#only read columns from list cols
dfs.append(pd.read_csv(os.path.join(root, name), delimiter="\t", usecols=['TOTL 4861A','O 3 5007A', 'NE 5 3426A', 'NE 3 3869A',
'TOTL 4363A', 'O 1 6300A', 'H 1 6563A','N 2 6584A','S 2 6720A' , 'HE 2 4686A','TOTL 3727A', 'S II 6716A', 'S II 6731A',
'NE 3 3869A','AR 3 7135A','HE 1 5876A','TOTL 4363A','O 3 4959A','O II 3726A', 'S 4 10.51m','O II 3729A','O 4 25.88m','NE 2 12.81m','NE 5 14.32m','NE 5 24.31m','NE 3 15.55m']))
d = pd.concat(dfs, ignore_index=True)
d['Temperature']=d2
d['O IV / Ne II'] = np.log10(d['O 4 25.88m'] / d['NE 2 12.81m'])
d['Ne V / Ne II'] = np.log10(d['NE 5 14.32m'] / d['NE 2 12.81m'])
d['Ne V / Ne III'] = np.log10(d['NE 5 14.32m'] / d['NE 3 15.55m'])
Dasyra2011_Data=np.genfromtxt('C:/Users/chris_000/Documents/GitHub/AGN_SED/ir_data/dasyra2011/dasyra2011_Type1.csv', skip_header=1, delimiter = ',',dtype=float,unpack=True)
Dasyra2011_2_Data = np.genfromtxt('C:/Users/chris_000/Documents/GitHub/AGN_SED/ir_data/dasyra2011/dasyra2011_Type2.csv', skip_header=1, delimiter = ',',dtype=float,unpack=True)
Weaver2010_Data = np.genfromtxt('C:/Users/chris_000/Documents/GitHub/AGN_SED/ir_data/weaver2010/weaver2010.csv', skip_header=1, delimiter = ',',dtype=float,unpack=True)
fig = plt.figure()
ax1 = plt.subplot()
baseNe52431 = (np.log10((d['NE 5 24.31m'].get_value(0),d['NE 5 24.31m'].get_value(3),d['NE 5 24.31m'].get_value(6),d['NE 5 24.31m'].get_value(9))))
linNe52431 = (np.log10((d['NE 5 24.31m'].get_value(1),d['NE 5 24.31m'].get_value(4),d['NE 5 24.31m'].get_value(7),d['NE 5 24.31m'].get_value(10))))
lin21Ne52431 = (np.log10((d['NE 5 24.31m'].get_value(2),d['NE 5 24.31m'].get_value(5),d['NE 5 24.31m'].get_value(8),d['NE 5 24.31m'].get_value(11))))
baseNe51432 = (np.log10(d['NE 5 14.32m'].get_value(0)),np.log10(d['NE 5 14.32m'].get_value(3)),np.log10(d['NE 5 14.32m'].get_value(6)),np.log10(d['NE 5 14.32m'].get_value(9)))
linNe51432 = (np.log10(d['NE 5 14.32m'].get_value(1)),np.log10(d['NE 5 14.32m'].get_value(4)),np.log10(d['NE 5 14.32m'].get_value(7)),np.log10(d['NE 5 14.32m'].get_value(10)))
lin21Ne51432 = (np.log10(d['NE 5 14.32m'].get_value(2)),np.log10(d['NE 5 14.32m'].get_value(5)),np.log10(d['NE 5 14.32m'].get_value(8)),np.log10(d['NE 5 14.32m'].get_value(11)))
ax1.scatter(np.log10(Weaver2010_Data[11]),np.log10(Weaver2010_Data[7]),edgecolor = '', marker = '^')
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(0)),np.log10(d['NE 5 24.31m']).get_value(0), marker = "s",c='#FF5D5D', s = 30, label = "10^4")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(3)),np.log10(d['NE 5 24.31m']).get_value(3), marker = "s",c='#FF0000', s = 30, label = "10^5")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(6)),np.log10(d['NE 5 24.31m']).get_value(6), marker = "s",c='#C60000', s = 30, label = "10^6")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(9)),np.log10(d['NE 5 24.31m']).get_value(9), marker = "s",c='#9B0000', s = 30, label = "10^7")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(1)),np.log10(d['NE 5 24.31m']).get_value(1), marker = "s",c='#7056C5', s = 30, label = "10^4 ax=1.17")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(4)),np.log10(d['NE 5 24.31m']).get_value(4), marker = "s",c='#3914AF', s = 30, label = "10^5 ax=1.17")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(7)),np.log10(d['NE 5 24.31m']).get_value(7), marker = "s",c='#2B0E87', s = 30, label = "10^6 ax=1.17")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(10)),np.log10(d['NE 5 24.31m']).get_value(10), marker = "s",c='#200969', s = 30, label = "10^7 ax=1.17")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(2)),np.log10(d['NE 5 24.31m']).get_value(2), marker = "s",c='#50DA50', s = 30, label = "10^4 ax=2.19")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(5)),np.log10(d['NE 5 24.31m']).get_value(5), marker = "s",c='#00CC00', s = 30, label = "10^5 ax=2.19")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(8)),np.log10(d['NE 5 24.31m']).get_value(8), marker = "s",c='#009D00', s = 30, label = "10^6 ax=2.19")
ax1.scatter(np.log10(d['NE 5 14.32m'].get_value(11)),np.log10(d['NE 5 24.31m']).get_value(11), marker = "s",c='#007A00', s = 30, label = "10^7 ax=2.19")
ax1.plot(baseNe51432,baseNe52431, c = '0')
ax1.plot(linNe51432,linNe52431, c = '0')
ax1.plot(lin21Ne51432,lin21Ne52431, c = '0')
ax1.set_xlabel(r'Log$_{10}$([Ne V] $\lambda$ 14.32 $\mu$m')
ax1.set_ylabel(r'Log$_{10}$([Ne V] $\lambda$ 24.32 $\mu$m')
ax1.legend(loc = 'upper left', prop = {'size': 12})
plt.suptitle('AGN Infrared Temperature/Density Diagnostic Plots: Metallicity = 1.5, Efrac = 0.01, Phi(h) = 10.4771, n(h) = 2.5')
plt.show()
| gpl-3.0 |
mmottahedi/neuralnilm_prototype | scripts/e569.py | 2 | 14563 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation, MultiSource)
from neuralnilm.experiment import (run_experiment, init_experiment,
change_dir, configure_logger)
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer, BLSTMLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import (sigmoid, rectify, tanh, identity, softmax)
from lasagne.objectives import squared_error, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer,
DimshuffleLayer, DropoutLayer, ConcatLayer, PadLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
# PATH = "/home/jack/experiments/neuralnilm/figures"
UKDALE_FILENAME = '/data/dk3810/ukdale.h5'
SKIP_PROBABILITY_FOR_TARGET = 0.5
INDEPENDENTLY_CENTER_INPUTS = True
WINDOW_PER_BUILDING = {
1: ("2013-04-12", "2014-12-15"),
2: ("2013-05-22", "2013-10-03 06:16:00"),
3: ("2013-02-27", "2013-04-01 06:15:05"),
4: ("2013-03-09", "2013-09-24 06:15:14"),
5: ("2014-06-29", "2014-09-01")
}
INPUT_STATS = {
'mean': np.array([297.87216187], dtype=np.float32),
'std': np.array([374.43884277], dtype=np.float32)
}
def get_source(appliance, logger, target_is_start_and_end_and_mean=False,
is_rnn=False, window_per_building=WINDOW_PER_BUILDING,
source_type='multisource',
filename=UKDALE_FILENAME):
"""
Parameters
----------
source_type : {'multisource', 'real_appliance_source'}
Returns
-------
Source
"""
N_SEQ_PER_BATCH = 64
TRAIN_BUILDINGS_REAL = None
if appliance == 'microwave':
SEQ_LENGTH = 288
TRAIN_BUILDINGS = [1, 2]
VALIDATION_BUILDINGS = [5]
APPLIANCES = [
'microwave',
['fridge freezer', 'fridge', 'freezer'],
'dish washer',
'kettle',
['washer dryer', 'washing machine']
]
MAX_APPLIANCE_POWERS = [3000, 300, 2500, 3100, 2500]
ON_POWER_THRESHOLDS = [ 200, 50, 10, 2000, 20]
MIN_ON_DURATIONS = [ 12, 60, 1800, 12, 1800]
MIN_OFF_DURATIONS = [ 30, 12, 1800, 0, 160]
elif appliance == 'washing machine':
SEQ_LENGTH = 1024
TRAIN_BUILDINGS = [1, 5]
VALIDATION_BUILDINGS = [2]
APPLIANCES = [
['washer dryer', 'washing machine'],
['fridge freezer', 'fridge', 'freezer'],
'dish washer',
'kettle',
'microwave'
]
MAX_APPLIANCE_POWERS = [2500, 300, 2500, 3100, 3000]
ON_POWER_THRESHOLDS = [ 20, 50, 10, 2000, 200]
MIN_ON_DURATIONS = [1800, 60, 1800, 12, 12]
MIN_OFF_DURATIONS = [ 160, 12, 1800, 0, 30]
if is_rnn:
N_SEQ_PER_BATCH = 16
elif appliance == 'fridge':
SEQ_LENGTH = 512
TRAIN_BUILDINGS = [1, 2, 4]
VALIDATION_BUILDINGS = [5]
APPLIANCES = [
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'dish washer',
'kettle',
'microwave'
]
MAX_APPLIANCE_POWERS = [ 300, 2500, 2500, 3100, 3000]
ON_POWER_THRESHOLDS = [ 50, 20, 10, 2000, 200]
MIN_ON_DURATIONS = [ 60, 1800, 1800, 12, 12]
MIN_OFF_DURATIONS = [ 12, 160, 1800, 0, 30]
if is_rnn:
N_SEQ_PER_BATCH = 16
elif appliance == 'kettle':
SEQ_LENGTH = 128
TRAIN_BUILDINGS = [1, 2, 3, 4]
# House 3's mains often doesn't include kettle!
TRAIN_BUILDINGS_REAL = [1, 2, 4]
VALIDATION_BUILDINGS = [5]
APPLIANCES = [
'kettle',
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'dish washer',
'microwave'
]
MAX_APPLIANCE_POWERS = [3100, 300, 2500, 2500, 3000]
ON_POWER_THRESHOLDS = [2000, 50, 20, 10, 200]
MIN_ON_DURATIONS = [ 12, 60, 1800, 1800, 12]
MIN_OFF_DURATIONS = [ 0, 12, 160, 1800, 30]
elif appliance == 'dish washer':
SEQ_LENGTH = 1024 + 512
TRAIN_BUILDINGS = [1, 2]
VALIDATION_BUILDINGS = [5]
APPLIANCES = [
'dish washer',
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'kettle',
'microwave'
]
MAX_APPLIANCE_POWERS = [2500, 300, 2500, 3100, 3000]
ON_POWER_THRESHOLDS = [ 10, 50, 20, 2000, 200]
MIN_ON_DURATIONS = [1800, 60, 1800, 12, 12]
MIN_OFF_DURATIONS = [1800, 12, 160, 0, 30]
if is_rnn:
N_SEQ_PER_BATCH = 16
TARGET_APPLIANCE = APPLIANCES[0]
MAX_TARGET_POWER = MAX_APPLIANCE_POWERS[0]
ON_POWER_THRESHOLD = ON_POWER_THRESHOLDS[0]
MIN_ON_DURATION = MIN_ON_DURATIONS[0]
MIN_OFF_DURATION = MIN_OFF_DURATIONS[0]
if TRAIN_BUILDINGS_REAL is None:
TRAIN_BUILDINGS_REAL = TRAIN_BUILDINGS
real_appliance_source1 = RealApplianceSource(
logger=logger,
filename=filename,
appliances=APPLIANCES,
max_appliance_powers=MAX_APPLIANCE_POWERS,
on_power_thresholds=ON_POWER_THRESHOLDS,
min_on_durations=MIN_ON_DURATIONS,
min_off_durations=MIN_OFF_DURATIONS,
divide_input_by_max_input_power=False,
window_per_building=window_per_building,
seq_length=SEQ_LENGTH,
output_one_appliance=True,
train_buildings=TRAIN_BUILDINGS,
validation_buildings=VALIDATION_BUILDINGS,
n_seq_per_batch=N_SEQ_PER_BATCH,
skip_probability=0.75,
skip_probability_for_first_appliance=SKIP_PROBABILITY_FOR_TARGET,
standardise_input=True,
input_stats=INPUT_STATS,
independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
target_is_start_and_end_and_mean=target_is_start_and_end_and_mean
)
if source_type != 'multisource':
return real_appliance_source1
same_location_source1 = SameLocation(
logger=logger,
filename=filename,
target_appliance=TARGET_APPLIANCE,
window_per_building=window_per_building,
seq_length=SEQ_LENGTH,
train_buildings=TRAIN_BUILDINGS_REAL,
validation_buildings=VALIDATION_BUILDINGS,
n_seq_per_batch=N_SEQ_PER_BATCH,
skip_probability=SKIP_PROBABILITY_FOR_TARGET,
standardise_input=True,
offset_probability=1,
divide_target_by=MAX_TARGET_POWER,
input_stats=INPUT_STATS,
independently_center_inputs=INDEPENDENTLY_CENTER_INPUTS,
on_power_threshold=ON_POWER_THRESHOLD,
min_on_duration=MIN_ON_DURATION,
min_off_duration=MIN_OFF_DURATION,
include_all=False,
allow_incomplete=False,
target_is_start_and_end_and_mean=target_is_start_and_end_and_mean
)
multi_source = MultiSource(
sources=[
{
'source': real_appliance_source1,
'train_probability': 0.5,
'validation_probability': 0
},
{
'source': same_location_source1,
'train_probability': 0.5,
'validation_probability': 1
}
],
standardisation_source=same_location_source1
)
return multi_source
def only_train_on_real_data(net, iteration):
net.logger.info(
"Iteration {}: Now only training on real data.".format(iteration))
net.source.sources[0]['train_probability'] = 0.0
net.source.sources[1]['train_probability'] = 1.0
def net_dict_ae_rnn(seq_length):
NUM_FILTERS = 8
return dict(
epochs=None,
save_plot_interval=1000,
loss_function=lambda x, t: squared_error(x, t).mean(),
updates_func=nesterov_momentum,
learning_rate=1e-2,
learning_rate_changes_by_iteration={
105000: 1e-3
},
do_save_activations=True,
auto_reshape=False,
plotter=Plotter(
n_seq_to_plot=32,
n_training_examples_to_plot=16
),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 128,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (-1, (seq_length - 3), NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{ # DeConv
'type': Conv1DLayer,
'num_filters': 1,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1), # back to (batch, time, features)
'label': 'AE_output'
}
],
layer_changes={
100001: {
'new_layers': [
{
'type': ConcatLayer,
'axis': 2,
'incomings': ['input', 'AE_output']
},
{
'type': BLSTMLayer,
'num_units': 128,
'merge_mode': 'concatenate',
'grad_clipping': 10.0,
'gradient_steps': 500
},
{
'type': BLSTMLayer,
'num_units': 256,
'merge_mode': 'concatenate',
'grad_clipping': 10.0,
'gradient_steps': 500
},
{
'type': ReshapeLayer,
'shape': (64 * seq_length, 512)
},
{
'type': DenseLayer,
'num_units': 128,
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': 1,
'nonlinearity': None
}
]
}
}
)
net_dict_ae_rnn.name = 'ae_rnn'
def exp_a(name, net_dict, multi_source):
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=multi_source,
))
net = Net(**net_dict_copy)
net.plotter.max_target_power = multi_source.sources[1]['source'].divide_target_by
return net
def main():
for net_dict_func in [net_dict_ae_rnn]:
for appliance in ['microwave']:
full_exp_name = NAME + '_' + appliance + '_' + net_dict_func.name
change_dir(PATH, full_exp_name)
configure_logger(full_exp_name)
logger = logging.getLogger(full_exp_name)
global multi_source
multi_source = get_source(
appliance,
logger,
is_rnn=True
)
seq_length = multi_source.sources[0]['source'].seq_length
net_dict = net_dict_func(seq_length)
epochs = net_dict.pop('epochs')
try:
net = exp_a(full_exp_name, net_dict, multi_source)
net.experiment_name = 'e567_microwave_ae'
net.set_csv_filenames()
net.load_params(iteration=100000, path='/data/dk3810/figures/e567_microwave_ae')
net.experiment_name = full_exp_name
net.set_csv_filenames()
run_experiment(net, epochs=epochs)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e569.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
mjudsp/Tsallis | sklearn/manifold/setup.py | 99 | 1243 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
htygithub/bokeh | bokeh/compat/mplexporter/tools.py | 75 | 1732 | """
Tools for matplotlib plot exporting
"""
def ipynb_vega_init():
"""Initialize the IPython notebook display elements
This function borrows heavily from the excellent vincent package:
http://github.com/wrobstory/vincent
"""
try:
from IPython.core.display import display, HTML
except ImportError:
print('IPython Notebook could not be loaded.')
require_js = '''
if (window['d3'] === undefined) {{
require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }});
require(["d3"], function(d3) {{
window.d3 = d3;
{0}
}});
}};
if (window['topojson'] === undefined) {{
require.config(
{{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }}
);
require(["topojson"], function(topojson) {{
window.topojson = topojson;
}});
}};
'''
d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js"
d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/"
"d3.layout.cloud.js")
topojson_js_url = "http://d3js.org/topojson.v1.min.js"
vega_js_url = 'http://trifacta.github.com/vega/vega.js'
dep_libs = '''$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$([IPython.events]).trigger("vega_loaded.vincent");
})
})
})
});''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url,
topojson_js_url, vega_js_url)
load_js = require_js.format(dep_libs)
html = '<script>'+load_js+'</script>'
display(HTML(html))
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| mit |
OrkoHunter/networkx | networkx/convert.py | 12 | 13210 | """Functions to convert NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph is through the
graph constuctor. The constructor calls the to_networkx_graph() function
which attempts to guess the input type and convert it automatically.
Examples
--------
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
nx_pygraphviz, nx_pydot
"""
# Copyright (C) 2006-2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import warnings
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['to_networkx_graph',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist']
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
return nx.Graph()
try:
create_using.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return create_using
def to_networkx_graph(data,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
data : a object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(data,"adj"):
try:
result= from_dict_of_dicts(data.adj,\
create_using=create_using,\
multigraph_input=data.is_multigraph())
if hasattr(data,'graph'): # data.graph should be dict-like
result.graph.update(data.graph)
if hasattr(data,'node'): # data.node should be dict-like
result.node.update( (n,dd.copy()) for n,dd in data.node.items() )
return result
except:
raise nx.NetworkXError("Input is not a correct NetworkX graph.")
# pygraphviz agraph
if hasattr(data,"is_strict"):
try:
return nx.from_agraph(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a correct pygraphviz graph.")
# dict of dicts/lists
if isinstance(data,dict):
try:
return from_dict_of_dicts(data,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(data,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if (isinstance(data,list)
or isinstance(data,tuple)
or hasattr(data,'next')
or hasattr(data, '__next__')):
try:
return from_edgelist(data,create_using=create_using)
except:
raise nx.NetworkXError("Input is not a valid edge list")
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
try:
return nx.from_pandas_dataframe(data, create_using=create_using)
except:
msg = "Input is not a correct Pandas DataFrame."
raise nx.NetworkXError(msg)
except ImportError:
msg = 'pandas not found, skipping conversion test.'
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data,numpy.matrix) or \
isinstance(data,numpy.ndarray):
try:
return nx.from_numpy_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct numpy matrix or array.")
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data,"format"):
try:
return nx.from_scipy_sparse_matrix(data,create_using=create_using)
except:
raise nx.NetworkXError(\
"Input is not a correct scipy sparse matrix type.")
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise nx.NetworkXError(\
"Input is not a known data type for conversion.")
return
def convert_to_undirected(G):
"""Return a new undirected representation of the graph G."""
return G.to_undirected()
def convert_to_directed(G):
"""Return a new directed representation of the graph G."""
return G.to_directed()
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.items():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.items()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].items() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.items()
for v,datadict in nbrs.items()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add((v,u))
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.items():
for v,datadict in nbrs.items():
if (u,v) not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add((v,u))
else: # not a multigraph to multigraph transfer
if G.is_multigraph() and not G.is_directed():
# d can have both representations u-v, v-u in dict. Only add one.
# We don't need this check for digraphs since we add both directions,
# or for Graph() since it is done implicitly (parallel edges not allowed)
seen=set()
for u,nbrs in d.items():
for v,data in nbrs.items():
if (u,v) not in seen:
G.add_edge(u,v,attr_dict=data)
seen.add((v,u))
else:
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.items()
for v,data in nbrs.items()) )
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G
| bsd-3-clause |
soulmachine/scikit-learn | sklearn/tests/test_random_projection.py | 7 | 13949 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
"""Check basic properties of random matrix generation"""
for random_matrix in all_random_matrix:
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
for random_matrix in all_sparse_random_matrix:
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
"""Check some statical properties of Gaussian random matrix"""
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
"""Check some statical properties of sparse random matrix"""
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(UserWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
awanke/bokeh | bokeh/charts/builder/tests/test_step_builder.py | 33 | 2495 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Step
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestStep(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [2, 3, 7, 5, 26]
xyvalues['pypy'] = [12, 33, 47, 15, 126]
xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [ 2., 2., 3., 3., 7., 7., 5., 5., 26.]
y_jython = [ 22., 22.,43., 43., 10., 10., 25., 25., 26.]
y_pypy = [ 12., 12., 33., 33., 47., 47., 15., 15., 126.]
x = [0, 1, 1, 2, 2, 3, 3, 4, 4]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
Nyker510/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
bnaul/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 17 | 3334 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1, L2 and Elastic-Net penalty are used for different values of C. We can see
that large values of C give more freedom to the model. Conversely, smaller
values of C constrain the model more. In the L1 penalty case, this leads to
sparser solutions. As expected, the Elastic-Net penalty sparsity is between
that of L1 and L2.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
X, y = datasets.load_digits(return_X_y=True)
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(int)
l1_ratio = 0.5 # L1 weight in the Elastic-Net regularization
fig, axes = plt.subplots(3, 3)
# Set regularization parameter
for i, (C, axes_row) in enumerate(zip((1, 0.1, 0.01), axes)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01, solver='saga')
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01, solver='saga')
clf_en_LR = LogisticRegression(C=C, penalty='elasticnet', solver='saga',
l1_ratio=l1_ratio, tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
clf_en_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
coef_en_LR = clf_en_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
sparsity_en_LR = np.mean(coef_en_LR == 0) * 100
print("C=%.2f" % C)
print("{:<40} {:.2f}%".format("Sparsity with L1 penalty:", sparsity_l1_LR))
print("{:<40} {:.2f}%".format("Sparsity with Elastic-Net penalty:",
sparsity_en_LR))
print("{:<40} {:.2f}%".format("Sparsity with L2 penalty:", sparsity_l2_LR))
print("{:<40} {:.2f}".format("Score with L1 penalty:",
clf_l1_LR.score(X, y)))
print("{:<40} {:.2f}".format("Score with Elastic-Net penalty:",
clf_en_LR.score(X, y)))
print("{:<40} {:.2f}".format("Score with L2 penalty:",
clf_l2_LR.score(X, y)))
if i == 0:
axes_row[0].set_title("L1 penalty")
axes_row[1].set_title("Elastic-Net\nl1_ratio = %s" % l1_ratio)
axes_row[2].set_title("L2 penalty")
for ax, coefs in zip(axes_row, [coef_l1_LR, coef_en_LR, coef_l2_LR]):
ax.imshow(np.abs(coefs.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
ax.set_xticks(())
ax.set_yticks(())
axes_row[0].set_ylabel('C = %s' % C)
plt.show()
| bsd-3-clause |
elgambitero/FreeCAD_sf_master | src/Mod/Plot/plotSeries/TaskPanel.py | 26 | 17784 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD as App
import FreeCADGui as Gui
from PySide import QtGui, QtCore
import Plot
from plotUtils import Paths
import matplotlib
from matplotlib.lines import Line2D
import matplotlib.colors as Colors
class TaskPanel:
def __init__(self):
self.ui = Paths.modulePath() + "/plotSeries/TaskPanel.ui"
self.skip = False
self.item = 0
self.plt = None
def accept(self):
return True
def reject(self):
return True
def clicked(self, index):
pass
def open(self):
pass
def needsFullSpace(self):
return True
def isAllowedAlterSelection(self):
return False
def isAllowedAlterView(self):
return True
def isAllowedAlterDocument(self):
return False
def helpRequested(self):
pass
def setupUi(self):
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
self.form = form
self.retranslateUi()
self.fillStyles()
self.updateUI()
QtCore.QObject.connect(
form.items,
QtCore.SIGNAL("currentRowChanged(int)"),
self.onItem)
QtCore.QObject.connect(
form.label,
QtCore.SIGNAL("editingFinished()"),
self.onData)
QtCore.QObject.connect(
form.isLabel,
QtCore.SIGNAL("stateChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.style,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.marker,
QtCore.SIGNAL("currentIndexChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.width,
QtCore.SIGNAL("valueChanged(double)"),
self.onData)
QtCore.QObject.connect(
form.size,
QtCore.SIGNAL("valueChanged(int)"),
self.onData)
QtCore.QObject.connect(
form.color,
QtCore.SIGNAL("pressed()"),
self.onColor)
QtCore.QObject.connect(
form.remove,
QtCore.SIGNAL("pressed()"),
self.onRemove)
QtCore.QObject.connect(
Plot.getMdiArea(),
QtCore.SIGNAL("subWindowActivated(QMdiSubWindow*)"),
self.onMdiArea)
return False
def getMainWindow(self):
toplevel = QtGui.qApp.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise RuntimeError("No main window found")
def widget(self, class_id, name):
"""Return the selected widget.
Keyword arguments:
class_id -- Class identifier
name -- Name of the widget
"""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
return form.findChild(class_id, name)
def retranslateUi(self):
"""Set the user interface locale strings."""
self.form.setWindowTitle(QtGui.QApplication.translate(
"plot_series",
"Configure series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"No label",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setText(
QtGui.QApplication.translate(
"plot_series",
"Remove serie",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "styleLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLabel, "markerLabel").setText(
QtGui.QApplication.translate(
"plot_series",
"Marker",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QListWidget, "items").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"List of available series",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QLineEdit, "label").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line title",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QCheckBox, "isLabel").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"If checked serie will not be considered for legend",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "lineStyle").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QComboBox, "markers").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker style",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QDoubleSpinBox, "lineWidth").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line width",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QSpinBox, "markerSize").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Marker size",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "color").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Line and marker color",
None,
QtGui.QApplication.UnicodeUTF8))
self.widget(QtGui.QPushButton, "remove").setToolTip(
QtGui.QApplication.translate(
"plot_series",
"Removes this serie",
None,
QtGui.QApplication.UnicodeUTF8))
def fillStyles(self):
"""Fill the style combo boxes with the availabel ones."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
# Line styles
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
string = "\'" + str(style) + "\'"
string += " (" + Line2D.lineStyles[style] + ")"
form.style.addItem(string)
# Markers
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
string = "\'" + str(marker) + "\'"
string += " (" + Line2D.markers[marker] + ")"
form.marker.addItem(string)
def onItem(self, row):
"""Executed when the selected item is modified."""
if not self.skip:
self.skip = True
self.item = row
self.updateUI()
self.skip = False
def onData(self):
"""Executed when the selected item data is modified."""
if not self.skip:
self.skip = True
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Set label
serie = Plot.series()[self.item]
if(form.isLabel.isChecked()):
serie.name = None
form.label.setEnabled(False)
else:
serie.name = form.label.text()
form.label.setEnabled(True)
# Set line style and marker
style = form.style.currentIndex()
linestyles = Line2D.lineStyles.keys()
serie.line.set_linestyle(linestyles[style])
marker = form.marker.currentIndex()
markers = Line2D.markers.keys()
serie.line.set_marker(markers[marker])
# Set line width and marker size
serie.line.set_linewidth(form.width.value())
serie.line.set_markersize(form.size.value())
plt.update()
# Regenerate series labels
self.setList()
self.skip = False
def onColor(self):
""" Executed when color pallete is requested. """
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.color = self.widget(QtGui.QPushButton, "color")
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Show widget to select color
col = QtGui.QColorDialog.getColor()
# Send color to widget and serie
if col.isValid():
serie = plt.series[self.item]
form.color.setStyleSheet(
"background-color: rgb({}, {}, {});".format(col.red(),
col.green(),
col.blue()))
serie.line.set_color((col.redF(), col.greenF(), col.blueF()))
plt.update()
def onRemove(self):
"""Executed when the data serie must be removed."""
plt = Plot.getPlot()
if not plt:
self.updateUI()
return
# Ensure that selected serie exist
if self.item >= len(Plot.series()):
self.updateUI()
return
# Remove serie
Plot.removeSerie(self.item)
self.setList()
self.updateUI()
plt.update()
def onMdiArea(self, subWin):
"""Executed when a new window is selected on the mdi area.
Keyword arguments:
subWin -- Selected window.
"""
plt = Plot.getPlot()
if plt != subWin:
self.updateUI()
def updateUI(self):
""" Setup UI controls values if possible """
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.label = self.widget(QtGui.QLineEdit, "label")
form.isLabel = self.widget(QtGui.QCheckBox, "isLabel")
form.style = self.widget(QtGui.QComboBox, "lineStyle")
form.marker = self.widget(QtGui.QComboBox, "markers")
form.width = self.widget(QtGui.QDoubleSpinBox, "lineWidth")
form.size = self.widget(QtGui.QSpinBox, "markerSize")
form.color = self.widget(QtGui.QPushButton, "color")
form.remove = self.widget(QtGui.QPushButton, "remove")
plt = Plot.getPlot()
form.items.setEnabled(bool(plt))
form.label.setEnabled(bool(plt))
form.isLabel.setEnabled(bool(plt))
form.style.setEnabled(bool(plt))
form.marker.setEnabled(bool(plt))
form.width.setEnabled(bool(plt))
form.size.setEnabled(bool(plt))
form.color.setEnabled(bool(plt))
form.remove.setEnabled(bool(plt))
if not plt:
self.plt = plt
form.items.clear()
return
self.skip = True
# Refill list
if self.plt != plt or len(Plot.series()) != form.items.count():
self.plt = plt
self.setList()
# Ensure that have series
if not len(Plot.series()):
form.label.setEnabled(False)
form.isLabel.setEnabled(False)
form.style.setEnabled(False)
form.marker.setEnabled(False)
form.width.setEnabled(False)
form.size.setEnabled(False)
form.color.setEnabled(False)
form.remove.setEnabled(False)
return
# Set label
serie = Plot.series()[self.item]
if serie.name is None:
form.isLabel.setChecked(True)
form.label.setEnabled(False)
form.label.setText("")
else:
form.isLabel.setChecked(False)
form.label.setText(serie.name)
# Set line style and marker
form.style.setCurrentIndex(0)
linestyles = Line2D.lineStyles.keys()
for i in range(0, len(linestyles)):
style = linestyles[i]
if style == serie.line.get_linestyle():
form.style.setCurrentIndex(i)
form.marker.setCurrentIndex(0)
markers = Line2D.markers.keys()
for i in range(0, len(markers)):
marker = markers[i]
if marker == serie.line.get_marker():
form.marker.setCurrentIndex(i)
# Set line width and marker size
form.width.setValue(serie.line.get_linewidth())
form.size.setValue(serie.line.get_markersize())
# Set color
color = Colors.colorConverter.to_rgb(serie.line.get_color())
form.color.setStyleSheet("background-color: rgb({}, {}, {});".format(
int(color[0] * 255),
int(color[1] * 255),
int(color[2] * 255)))
self.skip = False
def setList(self):
"""Setup the UI control values if it is possible."""
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskPanel")
form.items = self.widget(QtGui.QListWidget, "items")
form.items.clear()
series = Plot.series()
for i in range(0, len(series)):
serie = series[i]
string = 'serie ' + str(i) + ': '
if serie.name is None:
string = string + '\"No label\"'
else:
string = string + serie.name
form.items.addItem(string)
# Ensure that selected item is correct
if len(series) and self.item >= len(series):
self.item = len(series) - 1
form.items.setCurrentIndex(self.item)
def createTask():
panel = TaskPanel()
Gui.Control.showDialog(panel)
if panel.setupUi():
Gui.Control.closeDialog(panel)
return None
return panel
| lgpl-2.1 |
obarquero/intro_machine_learning_udacity | Projects/ud120-projects-master/choose_your_own/class_vis.py | 23 | 1649 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import pylab as pl
def prettyPicture(clf, X_test, y_test):
x_min = 0.0; x_max = 1.0
y_min = 0.0; y_max = 1.0
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
h = .01 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.pcolormesh(xx, yy, Z, cmap=pl.cm.seismic)
# Plot also the test points
grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==0]
bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==0]
grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==1]
bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==1]
plt.scatter(grade_sig, bumpy_sig, color = "b", label="fast")
plt.scatter(grade_bkg, bumpy_bkg, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.savefig("test.png")
import base64
import json
import subprocess
def output_image(name, format, bytes):
image_start = "BEGIN_IMAGE_f9825uweof8jw9fj4r8"
image_end = "END_IMAGE_0238jfw08fjsiufhw8frs"
data = {}
data['name'] = name
data['format'] = format
data['bytes'] = base64.encodestring(bytes)
print image_start+json.dumps(data)+image_end
| gpl-2.0 |
sanketloke/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
kagayakidan/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
low-sky/cohrscld | virparam_sfe.py | 1 | 1904 | import numpy as np
from astropy.table import Table
import scipy.stats as ss
import matplotlib.pyplot as plt
import astropy.units as u
import astropy.constants as con
t = Table.read('/Users/erik/code/cohrscld/output_catalog_withsfr.fits')
apix_sr = 8.46159e-10
mlum = t['mlum_msun']
cloud_irlum = (t['ir_luminosity'] - t['bg_lum']) * 6e11 * apix_sr
sfe = cloud_irlum / mlum
R0 = 8.5e3
rgal = (R0**2 + t['distance']**2 -
2 * R0 * t['distance'] * np.cos(t['x_coor'] * np.pi / 180))**0.5
alpha = ((5 * t['sigv_kms']**2 * (u.km / u.s)**2 * t['radius'] * u.pc) *\
(con.G * t['mlum_msun'] * u.M_sun)**(-1)).to(u.dimensionless_unscaled)
x = alpha
y = sfe.data
fig, (ax1) = plt.subplots(1)
fig.set_size_inches(5, 4)
idx = mlum > 1e3
val, edges, _ = ss.binned_statistic(np.log10(x[idx]),
np.log10(y[idx]),
statistic=np.nanmedian, bins=10)
histdata, xedge, yedge = np.histogram2d(np.log10(x[idx]),
np.log10(y[idx]),
range=[[-1, 3], [-2, 2]],
bins=40)
ax1.scatter(x[idx], y[idx], edgecolor='k',
facecolor='none', zorder=-99)
ax1.plot(1e1**(0.5 * (edges[1:] + edges[0:-1])), 1e1**val, color='green', lw=3)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_xlabel(r'$\alpha$', size=16)
ax1.set_ylabel(r'$L_{\mathrm{IR}}/M_{\mathrm{CO}}\ (L_{\odot}/M_{\odot})$',
size=16)
# ax1.set_xlim(1e1**])
ax1.set_ylim([1e-2, 1e2])
ax1.set_xlim([1e-1, 1e3])
histdata[histdata < 3] = np.nan
ax1.grid()
cax = ax1.imshow(histdata.T, extent=(1e-1, 1e3, 1e-2, 1e2), origin='lower',
interpolation='nearest', cmap='inferno', vmin=2, aspect='auto')
cb = fig.colorbar(cax)
cb.set_label(r'Number')
fig.tight_layout()
plt.savefig('sfe_virparam.png', dpi=300)
plt.close(fig)
plt.clf()
| gpl-3.0 |
SEL-Columbia/formhub | utils/bamboo.py | 7 | 5673 |
import StringIO
import unicodecsv
from pybamboo.dataset import Dataset
from pybamboo.connection import Connection
from pybamboo.exceptions import ErrorParsingBambooData
from odk_viewer.models import ParsedInstance
from odk_viewer.pandas_mongo_bridge import (CSVDataFrameBuilder,
NoRecordsFoundError)
from restservice.models import RestService
def get_bamboo_url(xform):
try:
service = list(RestService.objects.filter(xform=xform, name='bamboo')).pop()
except IndexError:
return 'http://bamboo.io'
return service.service_url
def delete_bamboo_dataset(xform):
if not xform.bamboo_dataset:
return False
try:
dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)),
dataset_id=xform.bamboo_dataset)
return dataset.delete()
except ErrorParsingBambooData:
return False
def ensure_rest_service(xform):
''' creates Bamboo RestService if doesn't exist '''
bb_url = get_bamboo_url(xform)
services = RestService.objects.filter(xform=xform, name='bamboo')
# do nothing if there's already a restservice for that.
if services.filter(service_url=bb_url).count():
return True
# there is no service ; let's create a default one.
if not services.count():
RestService.objects.create(xform=xform,
name='bamboo',
service_url=bb_url)
return True
# we have existing services with non-default URL
# do nothing as the user probably knows what to do.
return False
def get_new_bamboo_dataset(xform, force_last=False):
dataset_id = u''
try:
content_data = get_csv_data(xform, force_last=force_last)
dataset = Dataset(connection=Connection(url=get_bamboo_url(xform)),
content=content_data,
na_values=['n/a'])
except NoRecordsFoundError:
return dataset_id
if dataset.id:
return dataset.id
return dataset_id
def get_csv_data(xform, force_last=False):
def getbuff():
return StringIO.StringIO()
def get_headers_from(csv_data):
csv_data.seek(0)
header_row = csv_data.readline()
csv_data.read()
return header_row.split(',')
def get_csv_data_manual(xform,
only_last=False, with_header=True,
headers_to_use=None):
# TODO: find out a better way to handle this
# when form has only one submission, CSVDFB is empty.
# we still want to create the BB ds with row 1
# so we extract is and CSV it.
pifilter = ParsedInstance.objects.filter(instance__xform=xform) \
.order_by('-instance__date_modified')
if pifilter.count() == 0:
raise NoRecordsFoundError
else:
# we should only do it for count == 1 but eh.
csv_buf = getbuff()
if only_last:
pifilter = [pifilter[0]]
rows = [pi.to_dict_for_mongo() for pi in pifilter]
if headers_to_use is None:
headers_to_use = [key for key in rows[0].keys()
if not key.startswith('_')]
w = unicodecsv.DictWriter(csv_buf, fieldnames=headers_to_use,
extrasaction='ignore',
lineterminator='\n',
encoding='utf-8')
if with_header:
w.writeheader()
w.writerows(rows)
csv_buf.flush()
if not csv_buf.len:
raise NoRecordsFoundError
return csv_buf.getvalue()
# setup an IO stream
buff = getbuff()
# prepare/generate a standard CSV export.
# note that it omits the current submission (if called from rest)
csv_dataframe_builder = CSVDataFrameBuilder(xform.user.username,
xform.id_string)
try:
csv_dataframe_builder.export_to(buff)
if force_last:
# requested to add last submission to the buffer
buff.write(get_csv_data_manual(xform,
only_last=True, with_header=False,
headers_to_use=
get_headers_from(buff)))
except NoRecordsFoundError:
# verify that we don't have a single submission before giving up
get_csv_data_manual(xform, with_header=True)
if buff.len:
# rewrite CSV header so that meta fields (starting with _ or meta)
# are prefixed to ensure that the dataset will be joinable to
# another formhub dataset
prefix = (u'%(id_string)s_%(id)s'
% {'id_string': xform.id_string, 'id': xform.id})
new_buff = getbuff()
buff.seek(0)
reader = unicodecsv.reader(buff, encoding='utf-8')
writer = unicodecsv.writer(new_buff, encoding='utf-8')
is_header = True
for row in reader:
if is_header:
is_header = False
for idx, col in enumerate(row):
if col.startswith('_') or col.startswith('meta_')\
or col.startswith('meta/'):
row[idx] = (u'%(prefix)s%(col)s'
% {'prefix': prefix, 'col': col})
writer.writerow(row)
return new_buff.getvalue()
else:
raise NoRecordsFoundError
| bsd-2-clause |
ChanChiChoi/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
uber/ludwig | tests/integration_tests/utils.py | 1 | 17734 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import multiprocessing
import os
import random
import shutil
import sys
import traceback
import unittest
import uuid
from distutils.util import strtobool
import cloudpickle
import numpy as np
import pandas as pd
from ludwig.api import LudwigModel
from ludwig.constants import VECTOR, COLUMN, NAME, PROC_COLUMN
from ludwig.data.dataset_synthesizer import DATETIME_FORMATS
from ludwig.data.dataset_synthesizer import build_synthetic_dataset
from ludwig.experiment import experiment_cli
from ludwig.features.feature_utils import compute_feature_hash
from ludwig.utils.data_utils import read_csv, replace_file_extension
ENCODERS = [
'embed', 'rnn', 'parallel_cnn', 'cnnrnn', 'stacked_parallel_cnn',
'stacked_cnn', 'transformer'
]
HF_ENCODERS_SHORT = ['distilbert']
HF_ENCODERS = [
'bert',
'gpt',
'gpt2',
##'transformer_xl',
'xlnet',
'xlm',
'roberta',
'distilbert',
'ctrl',
'camembert',
'albert',
't5',
'xlmroberta',
'longformer',
'flaubert',
'electra',
'mt5'
]
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError("If set, {} must be yes or no.".format(key))
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truth value to run them.
"""
if not _run_slow_tests:
test_case = unittest.skip("Skipping: this test is too slow")(test_case)
return test_case
def generate_data(
input_features,
output_features,
filename='test_csv.csv',
num_examples=25,
):
"""
Helper method to generate synthetic data based on input, output feature
specs
:param num_examples: number of examples to generate
:param input_features: schema
:param output_features: schema
:param filename: path to the file where data is stored
:return:
"""
features = input_features + output_features
df = build_synthetic_dataset(num_examples, features)
data = [next(df) for _ in range(num_examples)]
dataframe = pd.DataFrame(data[1:], columns=data[0])
dataframe.to_csv(filename, index=False)
return filename
def random_string(length=5):
return uuid.uuid4().hex[:length].upper()
def numerical_feature(normalization=None, **kwargs):
feature = {
'name': 'num_' + random_string(),
'type': 'numerical',
'preprocessing': {
'normalization': normalization
}
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def category_feature(**kwargs):
feature = {
'type': 'category',
'name': 'category_' + random_string(),
'vocab_size': 10,
'embedding_size': 5
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def text_feature(**kwargs):
feature = {
'name': 'text_' + random_string(),
'type': 'text',
'reduce_input': None,
'vocab_size': 5,
'min_len': 7,
'max_len': 7,
'embedding_size': 8,
'state_size': 8
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def set_feature(**kwargs):
feature = {
'type': 'set',
'name': 'set_' + random_string(),
'vocab_size': 10,
'max_len': 5,
'embedding_size': 5
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def sequence_feature(**kwargs):
feature = {
'type': 'sequence',
'name': 'sequence_' + random_string(),
'vocab_size': 10,
'max_len': 7,
'encoder': 'embed',
'embedding_size': 8,
'fc_size': 8,
'state_size': 8,
'num_filters': 8,
'hidden_size': 8
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def image_feature(folder, **kwargs):
feature = {
'type': 'image',
'name': 'image_' + random_string(),
'encoder': 'resnet',
'preprocessing': {
'in_memory': True,
'height': 12,
'width': 12,
'num_channels': 3
},
'resnet_size': 8,
'destination_folder': folder,
'fc_size': 8,
'num_filters': 8
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def audio_feature(folder, **kwargs):
feature = {
'name': 'audio_' + random_string(),
'type': 'audio',
'preprocessing': {
'audio_feature': {
'type': 'fbank',
'window_length_in_s': 0.04,
'window_shift_in_s': 0.02,
'num_filter_bands': 80
},
'audio_file_length_limit_in_s': 3.0
},
'encoder': 'stacked_cnn',
'should_embed': False,
'conv_layers': [
{
'filter_size': 400,
'pool_size': 16,
'num_filters': 32,
'regularize': 'false'
},
{
'filter_size': 40,
'pool_size': 10,
'num_filters': 64,
'regularize': 'false'
}
],
'fc_size': 256,
'destination_folder': folder
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def timeseries_feature(**kwargs):
feature = {
'name': 'timeseries_' + random_string(),
'type': 'timeseries',
'max_len': 7
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def binary_feature(**kwargs):
feature = {
'name': 'binary_' + random_string(),
'type': 'binary'
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def bag_feature(**kwargs):
feature = {
'name': 'bag_' + random_string(),
'type': 'bag',
'max_len': 5,
'vocab_size': 10,
'embedding_size': 5
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def date_feature(**kwargs):
feature = {
'name': 'date_' + random_string(),
'type': 'date',
'preprocessing': {
'datetime_format': random.choice(list(DATETIME_FORMATS.keys()))
}
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def h3_feature(**kwargs):
feature = {
'name': 'h3_' + random_string(),
'type': 'h3'
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def vector_feature(**kwargs):
feature = {
'type': VECTOR,
'vector_size': 5,
'name': 'vector_' + random_string()
}
feature.update(kwargs)
feature[COLUMN] = feature[NAME]
feature[PROC_COLUMN] = compute_feature_hash(feature)
return feature
def run_experiment(
input_features,
output_features,
skip_save_processed_input=True,
**kwargs
):
"""
Helper method to avoid code repetition in running an experiment. Deletes
the data saved to disk after running the experiment
:param input_features: list of input feature dictionaries
:param output_features: list of output feature dictionaries
**kwargs you may also pass extra parameters to the experiment as keyword
arguments
:return: None
"""
config = None
if input_features is not None and output_features is not None:
# This if is necessary so that the caller can call with
# config_file (and not config)
config = {
'input_features': input_features,
'output_features': output_features,
'combiner': {
'type': 'concat',
'fc_size': 14
},
'training': {'epochs': 2}
}
args = {
'config': config,
'skip_save_training_description': True,
'skip_save_training_statistics': True,
'skip_save_processed_input': skip_save_processed_input,
'skip_save_progress': True,
'skip_save_unprocessed_output': True,
'skip_save_model': True,
'skip_save_predictions': True,
'skip_save_eval_stats': True,
'skip_collect_predictions': True,
'skip_collect_overall_stats': True,
'skip_save_log': True
}
args.update(kwargs)
_, _, _, _, exp_dir_name = experiment_cli(**args)
shutil.rmtree(exp_dir_name, ignore_errors=True)
def generate_output_features_with_dependencies(main_feature, dependencies):
# helper function to generate multiple output features specifications
# with dependencies, support for 'test_experiment_multiple_seq_seq` unit test
# Parameters:
# main_feature: feature identifier, valid values 'feat1', 'feat2', 'feat3'
# dependencies: list of dependencies for 'main_feature', do not li
# Example:
# generate_output_features_with_dependencies('feat2', ['feat1', 'feat3'])
output_features = [
category_feature(vocab_size=2, reduce_input='sum'),
sequence_feature(vocab_size=10, max_len=5),
numerical_feature()
]
# value portion of dictionary is a tuple: (position, feature_name)
# position: location of output feature in the above output_features list
# feature_name: Ludwig generated feature name
feature_names = {
'feat1': (0, output_features[0]['name']),
'feat2': (1, output_features[1]['name']),
'feat3': (2, output_features[2]['name'])
}
# generate list of dependencies with real feature names
generated_dependencies = [feature_names[feat_name][1]
for feat_name in dependencies]
# specify dependencies for the main_feature
output_features[feature_names[main_feature][0]]['dependencies'] = \
generated_dependencies
return output_features
def _subproc_wrapper(fn, queue, *args, **kwargs):
fn = cloudpickle.loads(fn)
try:
results = fn(*args, **kwargs)
except Exception as e:
traceback.print_exc(file=sys.stderr)
results = e
queue.put(results)
def spawn(fn):
def wrapped_fn(*args, **kwargs):
ctx = multiprocessing.get_context('spawn')
queue = ctx.Queue()
p = ctx.Process(
target=_subproc_wrapper,
args=(cloudpickle.dumps(fn), queue, *args),
kwargs=kwargs)
p.start()
p.join()
results = queue.get()
if isinstance(results, Exception):
raise RuntimeError(
f'Spawned subprocess raised {type(results).__name__}, '
f'check log output above for stack trace.')
return results
return wrapped_fn
def run_api_experiment(input_features, output_features, data_csv):
"""
Helper method to avoid code repetition in running an experiment
:param input_features: input schema
:param output_features: output schema
:param data_csv: path to data
:return: None
"""
config = {
'input_features': input_features,
'output_features': output_features,
'combiner': {'type': 'concat', 'fc_size': 14},
'training': {'epochs': 2}
}
model = LudwigModel(config)
output_dir = None
try:
# Training with csv
_, _, output_dir = model.train(
dataset=data_csv,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True
)
model.predict(dataset=data_csv)
model_dir = os.path.join(output_dir, 'model')
loaded_model = LudwigModel.load(model_dir)
# Necessary before call to get_weights() to materialize the weights
loaded_model.predict(dataset=data_csv)
model_weights = model.model.get_weights()
loaded_weights = loaded_model.model.get_weights()
for model_weight, loaded_weight in zip(model_weights, loaded_weights):
assert np.allclose(model_weight, loaded_weight)
finally:
# Remove results/intermediate data saved to disk
shutil.rmtree(output_dir, ignore_errors=True)
try:
# Training with dataframe
data_df = read_csv(data_csv)
_, _, output_dir = model.train(
dataset=data_df,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True
)
model.predict(dataset=data_df)
finally:
shutil.rmtree(output_dir, ignore_errors=True)
def create_data_set_to_use(data_format, raw_data):
# helper function for generating training and test data with specified format
# handles all data formats except for hdf5
# assumes raw_data is a csv dataset generated by
# tests.integration_tests.utils.generate_data() function
# support for writing to a fwf dataset based on this stackoverflow posting:
# https://stackoverflow.com/questions/16490261/python-pandas-write-dataframe-to-fixed-width-file-to-fwf
from tabulate import tabulate
def to_fwf(df, fname):
content = tabulate(df.values.tolist(), list(df.columns),
tablefmt="plain")
open(fname, "w").write(content)
pd.DataFrame.to_fwf = to_fwf
dataset_to_use = None
if data_format == 'csv':
dataset_to_use = raw_data
elif data_format in {'df', 'dict'}:
dataset_to_use = pd.read_csv(raw_data)
if data_format == 'dict':
dataset_to_use = dataset_to_use.to_dict(orient='list')
elif data_format == 'excel':
dataset_to_use = replace_file_extension(raw_data, 'xlsx')
pd.read_csv(raw_data).to_excel(
dataset_to_use,
index=False
)
elif data_format == 'excel_xls':
dataset_to_use = replace_file_extension(raw_data, 'xls')
pd.read_csv(raw_data).to_excel(
dataset_to_use,
index=False
)
elif data_format == 'feather':
dataset_to_use = replace_file_extension(raw_data, 'feather')
pd.read_csv(raw_data).to_feather(
dataset_to_use
)
elif data_format == 'fwf':
dataset_to_use = replace_file_extension(raw_data, 'fwf')
pd.read_csv(raw_data).to_fwf(
dataset_to_use
)
elif data_format == 'html':
dataset_to_use = replace_file_extension(raw_data, 'html')
pd.read_csv(raw_data).to_html(
dataset_to_use,
index=False
)
elif data_format == 'json':
dataset_to_use = replace_file_extension(raw_data, 'json')
pd.read_csv(raw_data).to_json(
dataset_to_use,
orient='records'
)
elif data_format == 'jsonl':
dataset_to_use = replace_file_extension(raw_data, 'jsonl')
pd.read_csv(raw_data).to_json(
dataset_to_use,
orient='records',
lines=True
)
elif data_format == 'parquet':
dataset_to_use = replace_file_extension(raw_data, 'parquet')
pd.read_csv(raw_data).to_parquet(
dataset_to_use,
index=False
)
elif data_format == 'pickle':
dataset_to_use = replace_file_extension(raw_data, 'pickle')
pd.read_csv(raw_data).to_pickle(
dataset_to_use
)
elif data_format == 'stata':
dataset_to_use = replace_file_extension(raw_data, 'stata')
pd.read_csv(raw_data).to_stata(
dataset_to_use
)
elif data_format == 'tsv':
dataset_to_use = replace_file_extension(raw_data, 'tsv')
pd.read_csv(raw_data).to_csv(
dataset_to_use,
sep='\t',
index=False
)
else:
ValueError(
"'{}' is an unrecognized data format".format(data_format)
)
return dataset_to_use
| apache-2.0 |
lucidfrontier45/scikit-learn | sklearn/utils/arpack.py | 2 | 64316 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatable.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatable.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatable.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatable. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error"
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real or complex square matrix.
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Other Parameters
----------------
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
import warnings
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Other Parameters
----------------
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print vecs.shape
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
import warnings
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
def matvec_XH_X(x):
return XH.dot(X.dot(x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
u = X.dot(v) / s
vh = herm(v)
else:
u = eigvec
vh = herm(X.dot(u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
anurag313/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
flacjacket/sympy | sympy/mpmath/visualization.py | 18 | 9212 | """
Plotting (requires matplotlib)
"""
from colorsys import hsv_to_rgb, hls_to_rgb
from .libmp import NoConvergence
from .libmp.backend import xrange
class VisualizationMethods(object):
plot_ignore = (ValueError, ArithmeticError, ZeroDivisionError, NoConvergence)
def plot(ctx, f, xlim=[-5,5], ylim=None, points=200, file=None, dpi=None,
singularities=[], axes=None):
r"""
Shows a simple 2D plot of a function `f(x)` or list of functions
`[f_0(x), f_1(x), \ldots, f_n(x)]` over a given interval
specified by *xlim*. Some examples::
plot(lambda x: exp(x)*li(x), [1, 4])
plot([cos, sin], [-4, 4])
plot([fresnels, fresnelc], [-4, 4])
plot([sqrt, cbrt], [-4, 4])
plot(lambda t: zeta(0.5+t*j), [-20, 20])
plot([floor, ceil, abs, sign], [-5, 5])
Points where the function raises a numerical exception or
returns an infinite value are removed from the graph.
Singularities can also be excluded explicitly
as follows (useful for removing erroneous vertical lines)::
plot(cot, ylim=[-5, 5]) # bad
plot(cot, ylim=[-5, 5], singularities=[-pi, 0, pi]) # good
For parts where the function assumes complex values, the
real part is plotted with dashes and the imaginary part
is plotted with dots.
.. note :: This function requires matplotlib (pylab).
"""
if file:
axes = None
fig = None
if not axes:
import pylab
fig = pylab.figure()
axes = fig.add_subplot(111)
if not isinstance(f, (tuple, list)):
f = [f]
a, b = xlim
colors = ['b', 'r', 'g', 'm', 'k']
for n, func in enumerate(f):
x = ctx.arange(a, b, (b-a)/float(points))
segments = []
segment = []
in_complex = False
for i in xrange(len(x)):
try:
if i != 0:
for sing in singularities:
if x[i-1] <= sing and x[i] >= sing:
raise ValueError
v = func(x[i])
if ctx.isnan(v) or abs(v) > 1e300:
raise ValueError
if hasattr(v, "imag") and v.imag:
re = float(v.real)
im = float(v.imag)
if not in_complex:
in_complex = True
segments.append(segment)
segment = []
segment.append((float(x[i]), re, im))
else:
if in_complex:
in_complex = False
segments.append(segment)
segment = []
if hasattr(v, "real"):
v = v.real
segment.append((float(x[i]), v))
except ctx.plot_ignore:
if segment:
segments.append(segment)
segment = []
if segment:
segments.append(segment)
for segment in segments:
x = [s[0] for s in segment]
y = [s[1] for s in segment]
if not x:
continue
c = colors[n % len(colors)]
if len(segment[0]) == 3:
z = [s[2] for s in segment]
axes.plot(x, y, '--'+c, linewidth=3)
axes.plot(x, z, ':'+c, linewidth=3)
else:
axes.plot(x, y, c, linewidth=3)
axes.set_xlim([float(_) for _ in xlim])
if ylim:
axes.set_ylim([float(_) for _ in ylim])
axes.set_xlabel('x')
axes.set_ylabel('f(x)')
axes.grid(True)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def default_color_function(ctx, z):
if ctx.isinf(z):
return (1.0, 1.0, 1.0)
if ctx.isnan(z):
return (0.5, 0.5, 0.5)
pi = 3.1415926535898
a = (float(ctx.arg(z)) + ctx.pi) / (2*ctx.pi)
a = (a + 0.5) % 1.0
b = 1.0 - float(1/(1.0+abs(z)**0.3))
return hls_to_rgb(a, b, 0.8)
def cplot(ctx, f, re=[-5,5], im=[-5,5], points=2000, color=None,
verbose=False, file=None, dpi=None, axes=None):
"""
Plots the given complex-valued function *f* over a rectangular part
of the complex plane specified by the pairs of intervals *re* and *im*.
For example::
cplot(lambda z: z, [-2, 2], [-10, 10])
cplot(exp)
cplot(zeta, [0, 1], [0, 50])
By default, the complex argument (phase) is shown as color (hue) and
the magnitude is show as brightness. You can also supply a
custom color function (*color*). This function should take a
complex number as input and return an RGB 3-tuple containing
floats in the range 0.0-1.0.
To obtain a sharp image, the number of points may need to be
increased to 100,000 or thereabout. Since evaluating the
function that many times is likely to be slow, the 'verbose'
option is useful to display progress.
.. note :: This function requires matplotlib (pylab).
"""
if color is None:
color = ctx.default_color_function
import pylab
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = fig.add_subplot(111)
rea, reb = re
ima, imb = im
dre = reb - rea
dim = imb - ima
M = int(ctx.sqrt(points*dre/dim)+1)
N = int(ctx.sqrt(points*dim/dre)+1)
x = pylab.linspace(rea, reb, M)
y = pylab.linspace(ima, imb, N)
# Note: we have to be careful to get the right rotation.
# Test with these plots:
# cplot(lambda z: z if z.real < 0 else 0)
# cplot(lambda z: z if z.imag < 0 else 0)
w = pylab.zeros((N, M, 3))
for n in xrange(N):
for m in xrange(M):
z = ctx.mpc(x[m], y[n])
try:
v = color(f(z))
except ctx.plot_ignore:
v = (0.5, 0.5, 0.5)
w[n,m] = v
if verbose:
print(n, "of", N)
rea, reb, ima, imb = [float(_) for _ in [rea, reb, ima, imb]]
axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower')
axes.set_xlabel('Re(z)')
axes.set_ylabel('Im(z)')
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \
wireframe=False, file=None, dpi=None, axes=None):
"""
Plots the surface defined by `f`.
If `f` returns a single component, then this plots the surface
defined by `z = f(x,y)` over the rectangular domain with
`x = u` and `y = v`.
If `f` returns three components, then this plots the parametric
surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`.
For example, to plot a simple function::
>>> from mpmath import *
>>> f = lambda x, y: sin(x+y)*cos(y)
>>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP
Plotting a donut::
>>> r, R = 1, 2.5
>>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)]
>>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP
.. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher.
"""
import pylab
import mpl_toolkits.mplot3d as mplot3d
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = mplot3d.axes3d.Axes3D(fig)
ua, ub = u
va, vb = v
du = ub - ua
dv = vb - va
if not isinstance(points, (list, tuple)):
points = [points, points]
M, N = points
u = pylab.linspace(ua, ub, M)
v = pylab.linspace(va, vb, N)
x, y, z = [pylab.zeros((M, N)) for i in xrange(3)]
xab, yab, zab = [[0, 0] for i in xrange(3)]
for n in xrange(N):
for m in xrange(M):
fdata = f(ctx.convert(u[m]), ctx.convert(v[n]))
try:
x[m,n], y[m,n], z[m,n] = fdata
except TypeError:
x[m,n], y[m,n], z[m,n] = u[m], v[n], fdata
for c, cab in [(x[m,n], xab), (y[m,n], yab), (z[m,n], zab)]:
if c < cab[0]:
cab[0] = c
if c > cab[1]:
cab[1] = c
if wireframe:
axes.plot_wireframe(x, y, z, rstride=4, cstride=4)
else:
axes.plot_surface(x, y, z, rstride=4, cstride=4)
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
if keep_aspect:
dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]]
maxd = max(dx, dy, dz)
if dx < maxd:
delta = maxd - dx
axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0)
if dy < maxd:
delta = maxd - dy
axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0)
if dz < maxd:
delta = maxd - dz
axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
VisualizationMethods.plot = plot
VisualizationMethods.default_color_function = default_color_function
VisualizationMethods.cplot = cplot
VisualizationMethods.splot = splot
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/resample/conftest.py | 2 | 4226 | from datetime import datetime
import numpy as np
import pytest
from pandas import DataFrame, Series
from pandas.core.indexes.datetimes import date_range
from pandas.core.indexes.period import period_range
# The various methods we support
downsample_methods = [
"min",
"max",
"first",
"last",
"sum",
"mean",
"sem",
"median",
"prod",
"var",
"std",
"ohlc",
"quantile",
]
upsample_methods = ["count", "size"]
series_methods = ["nunique"]
resample_methods = downsample_methods + upsample_methods + series_methods
@pytest.fixture(params=downsample_methods)
def downsample_method(request):
"""Fixture for parametrization of Grouper downsample methods."""
return request.param
@pytest.fixture(params=upsample_methods)
def upsample_method(request):
"""Fixture for parametrization of Grouper upsample methods."""
return request.param
@pytest.fixture(params=resample_methods)
def resample_method(request):
"""Fixture for parametrization of Grouper resample methods."""
return request.param
@pytest.fixture
def simple_date_range_series():
"""
Series with date range index and random data for test purposes.
"""
def _simple_date_range_series(start, end, freq="D"):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
return _simple_date_range_series
@pytest.fixture
def simple_period_range_series():
"""
Series with period range index and random data for test purposes.
"""
def _simple_period_range_series(start, end, freq="D"):
rng = period_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
return _simple_period_range_series
@pytest.fixture
def _index_start():
"""Fixture for parametrization of index, series and frame."""
return datetime(2005, 1, 1)
@pytest.fixture
def _index_end():
"""Fixture for parametrization of index, series and frame."""
return datetime(2005, 1, 10)
@pytest.fixture
def _index_freq():
"""Fixture for parametrization of index, series and frame."""
return "D"
@pytest.fixture
def _index_name():
"""Fixture for parametrization of index, series and frame."""
return None
@pytest.fixture
def index(_index_factory, _index_start, _index_end, _index_freq, _index_name):
"""Fixture for parametrization of date_range, period_range and
timedelta_range indexes"""
return _index_factory(_index_start, _index_end, freq=_index_freq, name=_index_name)
@pytest.fixture
def _static_values(index):
"""Fixture for parametrization of values used in parametrization of
Series and DataFrames with date_range, period_range and
timedelta_range indexes"""
return np.arange(len(index))
@pytest.fixture
def _series_name():
"""Fixture for parametrization of Series name for Series used with
date_range, period_range and timedelta_range indexes"""
return None
@pytest.fixture
def series(index, _series_name, _static_values):
"""Fixture for parametrization of Series with date_range, period_range and
timedelta_range indexes"""
return Series(_static_values, index=index, name=_series_name)
@pytest.fixture
def empty_series(series):
"""Fixture for parametrization of empty Series with date_range,
period_range and timedelta_range indexes"""
return series[:0]
@pytest.fixture
def frame(index, _series_name, _static_values):
"""Fixture for parametrization of DataFrame with date_range, period_range
and timedelta_range indexes"""
# _series_name is intentionally unused
return DataFrame({"value": _static_values}, index=index)
@pytest.fixture
def empty_frame(series):
"""Fixture for parametrization of empty DataFrame with date_range,
period_range and timedelta_range indexes"""
index = series.index[:0]
return DataFrame(index=index)
@pytest.fixture(params=[Series, DataFrame])
def series_and_frame(request, series, frame):
"""Fixture for parametrization of Series and DataFrame with date_range,
period_range and timedelta_range indexes"""
if request.param == Series:
return series
if request.param == DataFrame:
return frame
| apache-2.0 |
joshua-cogliati-inl/raven | framework/Samplers/AdaptiveDynamicEventTree.py | 1 | 31872 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the Adaptive Dynamic Event Tree and
the Adaptive Hybrid Dynamic Event Tree sampling strategies
Created on May 21, 2016
@author: alfoa
supercedes Samplers.py from alfoa
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import sys
import copy
import numpy as np
from operator import mul
from functools import reduce
import xml.etree.ElementTree as ET
from sklearn import neighbors
import itertools
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .DynamicEventTree import DynamicEventTree
from .LimitSurfaceSearch import LimitSurfaceSearch
from utils import utils
import utils.TreeStructure as ETS
import MessageHandler
#Internal Modules End--------------------------------------------------------------------------------
class AdaptiveDynamicEventTree(DynamicEventTree, LimitSurfaceSearch):
"""
This class is aimed to perform a supervised Adaptive Dynamic Event Tree sampling strategy
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super(AdaptiveDynamicEventTree, cls).getInputSpecification()
return inputSpecification
def __init__(self):
"""
Default Constructor that will initialize member variables with reasonable
defaults or empty lists/dictionaries where applicable.
@ In, None
@ Out, None
"""
DynamicEventTree.__init__(self) # init DET
LimitSurfaceSearch.__init__(self) # init Adaptive
self.detAdaptMode = 1 # Adaptive Dynamic Event Tree method (=1 -> DynamicEventTree as hybridsampler and subsequent LimitSurfaceSearch,=2 -> DynamicEventTree online adaptive)
self.noTransitionStrategy = 1 # Strategy in case no transitions have been found by DET (1 = 'Probability MC', 2 = Increase the grid exploration)
self.insertAdaptBPb = True # Add Probabability THs requested by adaptive in the initial grid (default = False)
self.startAdaptive = False # Flag to trigger the begin of the adaptive limit surface search
self.adaptiveReady = False # Flag to store the response of the LimitSurfaceSearch.localStillReady method
self.investigatedPoints = [] # List containing the points that have been already investigated
self.completedHistCnt = 1 # Counter of the completed histories
self.hybridDETstrategy = None # Integer flag to turn the hybrid strategy on:
# None -> No hybrid approach,
# 1 -> the epistemic variables are going to be part of the limit surface search
# 2 -> the epistemic variables are going to be treated by a normal hybrid DET approach and the LimitSurface search
# will be performed on each epistemic tree (n LimitSurfaces)
self.foundEpistemicTree = False # flag that testifies if an epistemic tree has been found (Adaptive Hybrid DET)
self.actualHybridTree = '' # name of the root tree used in self.hybridDETstrategy=2 to check which Tree needs to be used for the current LS search
self.sortedListOfHists = [] # sorted list of histories
@staticmethod
def _checkIfRunning(treeValues):
"""
Static method (no self) that checks if a job is running
@ In, treeValues, TreeStructure.Node, the node in which the running info are stored
@ Out, _checkIfRunning, bool, is it running?
"""
return not treeValues['runEnded']
@staticmethod
def _checkEnded(treeValues):
"""
Static method (no self) that checks if a job finished to run
@ In, treeValues, TreeStructure.Node, the node in which the running info are stored
@ Out, _checkEnded, bool, is it finished?
"""
return treeValues['runEnded']
@staticmethod
def _checkCompleteHistory(treeValues):
"""
Static method (no self) that checks if a 'branch' represents a completed history
@ In, treeValues, TreeStructure.Node, the node in which the running info are stored
@ Out, _checkCompleteHistory, bool, is it a completed history (hit the last thershold?)
"""
return treeValues['completedHistory']
def _localWhatDoINeed(self):
"""
This method is a local mirror of the general whatDoINeed method.
It is implmented by the samplers that need to request special objects
@ In, None
@ Out, needDict, dict, dictionary listing needed objects
"""
#adaptNeedInst = self.limitSurfaceInstances.values()[-1]._localWhatDoINeed()
needDict = dict(itertools.chain(LimitSurfaceSearch._localWhatDoINeed(self).items(),DynamicEventTree._localWhatDoINeed(self).items()))
return needDict
def _checkIfStartAdaptive(self):
"""
Function that checks if the adaptive needs to be started (mode 1)
@ In, None
@ Out, None
"""
if not self.startAdaptive:
self.startAdaptive = True
for treer in self.TreeInfo.values():
for _ in treer.iterProvidedFunction(self._checkIfRunning):
self.startAdaptive = False
break
if not self.startAdaptive:
break
def _checkClosestBranch(self):
"""
Function that checks the closest branch already evaluated
@ In, None
@ Out, returnTuple, tuple, closest branch info:
- if self.hybridDETstrategy and branch found -> returnTuple = (valBranch,cdfValues,treer)
- if self.hybridDETstrategy and branch not found -> returnTuple = (None,cdfValues,treer)
- if not self.hybridDETstrategy and branch found -> returnTuple = (valBranch,cdfValues)
- if not self.hybridDETstrategy and branch not found -> returnTuple = (None,cdfValues)
"""
# compute cdf of sampled vars
lowerCdfValues = {}
cdfValues = {}
self.raiseADebug("Check for closest branch:")
self.raiseADebug("_"*50)
for key,value in self.values.items():
self.raiseADebug("Variable name : "+str(key))
self.raiseADebug("Distrbution name: "+str(self.toBeSampled[key]))
if key not in self.epistemicVariables.keys():
cdfValues[key] = self.distDict[key].cdf(value)
lowerCdfValues[key] = utils.find_le(self.branchProbabilities[key],cdfValues[key])[0]
self.raiseADebug("CDF value : "+str(cdfValues[key]))
self.raiseADebug("Lower CDF found : "+str(lowerCdfValues[key]))
self.raiseADebug("_"*50)
#if hybrid DET, we need to find the correct tree that matches the values of the epistemic
if self.hybridDETstrategy is not None:
self.foundEpistemicTree, treer, compareDict = False, None, dict.fromkeys(self.epistemicVariables.keys(),False)
for tree in self.TreeInfo.values():
epistemicVars = tree.getrootnode().get("hybridsamplerCoordinate")[0]['SampledVars']
for key in self.epistemicVariables.keys():
compareDict[key] = utils.compare(epistemicVars[key],self.values[key])
if all(compareDict.values()):
# we found the right epistemic tree
self.foundEpistemicTree, treer = True, tree
break
else:
treer = utils.first(self.TreeInfo.values())
# check if in the adaptive points already explored (if not push into the grid)
if not self.insertAdaptBPb:
candidatesBranch = []
# check if adaptive point is better choice -> TODO: improve efficiency
for invPoint in self.investigatedPoints:
pbth = [invPoint[self.toBeSampled[key]] for key in cdfValues.keys()]
if all(i <= pbth[cnt] for cnt,i in enumerate(cdfValues.values())):
candidatesBranch.append(invPoint)
if len(candidatesBranch) > 0:
if None in lowerCdfValues.values():
lowerCdfValues = candidatesBranch[0]
for invPoint in candidatesBranch:
pbth = [invPoint[self.toBeSampled[key]] for key in cdfValues.keys()]
if all(i >= pbth[cnt] for cnt,i in enumerate(lowerCdfValues.values())):
lowerCdfValues = invPoint
# Check if The adaptive point requested is outside the so far run grid; in case return None
# In addition, if Adaptive Hybrid DET, if treer is None, we did not find any tree
# in the epistemic space => we need to create another one
if None in lowerCdfValues.values() or treer is None:
if self.hybridDETstrategy is not None:
returnTuple = None, cdfValues, treer
else:
returnTuple = None, cdfValues
return returnTuple
nntrain, mapping = None, {}
for ending in treer.iterProvidedFunction(self._checkEnded):
#already ended branches, create training set for nearest algorithm (take coordinates <= of cdfValues) -> TODO: improve efficiency
pbth = [ending.get('SampledVarsPb')[key] for key in lowerCdfValues.keys()]
if all(pbth[cnt] <= i for cnt,i in enumerate(lowerCdfValues.values())):
if nntrain is None:
nntrain = np.zeros((1,len(cdfValues.keys())))
nntrain[0,:] = np.array(copy.copy(pbth))
else:
nntrain = np.concatenate((nntrain,np.atleast_2d(np.array(copy.copy(pbth)))),axis=0)
mapping[nntrain.shape[0]] = ending
if nntrain is not None:
neigh = neighbors.NearestNeighbors(n_neighbors=len(mapping.keys()))
neigh.fit(nntrain)
valBranch = self._checkValidityOfBranch(neigh.kneighbors([list(lowerCdfValues.values())]),mapping)
if self.hybridDETstrategy is not None:
returnTuple = valBranch,cdfValues,treer
else:
returnTuple = valBranch,cdfValues
return returnTuple
else:
returnTuple = (None,cdfValues,treer) if self.hybridDETstrategy is not None else (None,cdfValues)
return returnTuple
def _checkValidityOfBranch(self,branchSet,mapping):
"""
Function that checks if the nearest branches found by method _checkClosestBranch are valid
@ In, branchSet, tuple, tuple of branches
@ In, mapping, dict, dictionary of candidated branches
@ Out, validBranch, TreeStructure.Node, most valid branch (if not found, return None)
"""
validBranch = None
idOfBranches = branchSet[1][-1]
for closestBranch in idOfBranches:
if not mapping[closestBranch+1].get('completedHistory') and not mapping[closestBranch+1].get('happenedEvent'):
validBranch = mapping[closestBranch+1]
break
return validBranch
def _retrieveBranchInfo(self,branch):
"""
Function that retrieves the key information from a branch to start a newer calculation
@ In, branch, TreeStructure.Node, the branch to inquire
@ Out, info, dict, the dictionary with information on the inputted branch
"""
info = branch.getValues()
info['actualBranchOnLevel'] = branch.numberBranches()
info['parentNode'] = branch
return info
def _constructEndInfoFromBranch(self,model, myInput, info, cdfValues):
"""
Method to construct the end information from the 'info' inputted
@ In, model, Models object, the model that is used to explore the input space (e.g. a code, like RELAP-7)
@ In, myInput, list, list of inputs for the Models object (passed through the Steps XML block)
@ In, info, dict, dictionary of information at the end of a branch (information collected by the method _retrieveBranchInfo)
@ In, cdfValues, dict, dictionary of CDF thresholds reached by the branch that just ended.
@ Out, None
"""
endInfo = info['parentNode'].get('endInfo')
del self.inputInfo
self.counter += 1
self.branchCountOnLevel = info['actualBranchOnLevel']+1
# Get Parent node name => the branch name is creating appending to this name a comma and self.branchCountOnLevel counter
rname = info['parentNode'].get('name') + '-' + str(self.branchCountOnLevel)
info['parentNode'].add('completedHistory', False)
self.raiseADebug(str(rname))
bcnt = self.branchCountOnLevel
while info['parentNode'].isAnActualBranch(rname):
bcnt += 1
rname = info['parentNode'].get('name') + '-' + str(bcnt)
# create a subgroup that will be appended to the parent element in the xml tree structure
subGroup = ETS.HierarchicalNode(self.messageHandler,rname)
subGroup.add('parent', info['parentNode'].get('name'))
subGroup.add('name', rname)
self.raiseADebug('cond pb = '+str(info['parentNode'].get('conditionalPbr')))
condPbC = float(info['parentNode'].get('conditionalPbr'))
# Loop over branchChangedParams (events) and start storing information,
# such as conditional pb, variable values, into the xml tree object
branchChangedParamValue = []
branchChangedParamPb = []
branchParams = []
if endInfo:
for key in endInfo['branchChangedParams'].keys():
branchParams.append(key)
branchChangedParamPb.append(endInfo['branchChangedParams'][key]['associatedProbability'][0])
branchChangedParamValue.append(endInfo['branchChangedParams'][key]['oldValue'][0])
subGroup.add('branchChangedParam',branchParams)
subGroup.add('branchChangedParamValue',branchChangedParamValue)
subGroup.add('branchChangedParamPb',branchChangedParamPb)
else:
pass
# add conditional probability
subGroup.add('conditionalPbr',condPbC)
# add initiator distribution info, start time, etc.
subGroup.add('startTime', info['parentNode'].get('endTime'))
# initialize the endTime to be equal to the start one... It will modified at the end of this branch
subGroup.add('endTime', info['parentNode'].get('endTime'))
# add the branchedLevel dictionary to the subgroup
# branch calculation info... running, queue, etc are set here
subGroup.add('runEnded',False)
subGroup.add('running',False)
subGroup.add('queue',True)
subGroup.add('completedHistory', False)
# Append the new branch (subgroup) info to the parentNode in the tree object
info['parentNode'].appendBranch(subGroup)
# Fill the values dictionary that will be passed into the model in order to create an input
# In this dictionary the info for changing the original input is stored
self.inputInfo = {'prefix':rname,'endTimeStep':info['parentNode'].get('actualEndTimeStep'),
'branchChangedParam':subGroup.get('branchChangedParam'),
'branchChangedParamValue':subGroup.get('branchChangedParamValue'),
'conditionalPb':subGroup.get('conditionalPbr'),
'startTime':info['parentNode'].get('endTime'),
'RAVEN_parentID':subGroup.get('parent'),
'RAVEN_isEnding':True}
# add the newer branch name to the map
self.rootToJob[rname] = self.rootToJob[subGroup.get('parent')]
# check if it is a preconditioned DET sampling, if so add the relative information
# it exists only in case an hybridDET strategy is activated
precSampled = info['parentNode'].get('hybridsamplerCoordinate')
if precSampled:
self.inputInfo['hybridsamplerCoordinate' ] = copy.deepcopy(precSampled)
subGroup.add('hybridsamplerCoordinate', copy.copy(precSampled))
# The probability Thresholds are stored here in the cdfValues dictionary... We are sure that they are whitin the ones defined in the grid
# check is not needed
self.inputInfo['initiatorDistribution' ] = [self.toBeSampled[key] for key in cdfValues.keys()]
self.inputInfo['PbThreshold' ] = list(cdfValues.values())
self.inputInfo['ValueThreshold' ] = [self.distDict[key].ppf(value) for key,value in cdfValues.items()]
self.inputInfo['SampledVars' ] = {}
self.inputInfo['SampledVarsPb' ] = {}
for varname in self.standardDETvariables:
self.inputInfo['SampledVars' ][varname] = self.distDict[varname].ppf(cdfValues[varname])
self.inputInfo['SampledVarsPb'][varname] = cdfValues[varname]
# constant variables
self._constantVariables()
if precSampled:
for precSample in precSampled:
self.inputInfo['SampledVars' ].update(precSample['SampledVars'])
self.inputInfo['SampledVarsPb'].update(precSample['SampledVarsPb'])
self.inputInfo['PointProbability' ] = reduce(mul, self.inputInfo['SampledVarsPb'].values())*subGroup.get('conditionalPbr')
self.inputInfo['ProbabilityWeight'] = self.inputInfo['PointProbability' ]
self.inputInfo.update({'ProbabilityWeight-'+key.strip():value for key,value in self.inputInfo['SampledVarsPb'].items()})
# add additional edits if needed
model.getAdditionalInputEdits(self.inputInfo)
# Add the new input path into the RunQueue system
newInputs = {'args':[str(self.type)], 'kwargs': dict(self.inputInfo)}
self.RunQueue['queue'].append(newInputs)
self.RunQueue['identifiers'].append(self.inputInfo['prefix'])
for key,value in self.inputInfo.items():
subGroup.add(key,copy.copy(value))
if endInfo:
subGroup.add('endInfo',copy.deepcopy(endInfo))
def localStillReady(self,ready): #, lastOutput= None
"""
first perform some check to understand what it needs to be done possibly perform an early return
ready is returned
@ In, ready, bool, a boolean representing whether the caller is prepared for another input.
@ Out, ready, bool, a boolean representing whether the caller is prepared for another input.
"""
if self.counter == 0:
return True
if len(self.RunQueue['queue']) != 0:
detReady = True
else:
detReady = False
# since the RunQueue is empty, let's check if there are still branches running => if not => start the adaptive search
self._checkIfStartAdaptive()
if self.startAdaptive:
#if self._endJobRunnable != 1: self._endJobRunnable = 1
data = self.lastOutput.asDataset()
endingData = data.where(data['RAVEN_isEnding']==True,drop=True)
numCompletedHistories = len(endingData['RAVEN_isEnding'])
if numCompletedHistories > self.completedHistCnt:
lastOutDict = {key:endingData[key].values for key in endingData.keys()}
if numCompletedHistories > self.completedHistCnt:
actualLastOutput = self.lastOutput
self.lastOutput = copy.deepcopy(lastOutDict)
ready = LimitSurfaceSearch.localStillReady(self,ready)
self.lastOutput = actualLastOutput
self.completedHistCnt = numCompletedHistories
self.raiseAMessage("Completed full histories are "+str(self.completedHistCnt))
else:
ready = False
self.adaptiveReady = ready
if ready or detReady:
return True
else:
return False
return detReady
def localGenerateInput(self,model,myInput):
"""
Function to select the next most informative point for refining the limit
surface search.
After this method is called, the self.inputInfo should be ready to be sent
to the model
@ In, model, model instance, an instance of a model
@ In, myInput, list, a list of the original needed inputs for the model (e.g. list of files, etc.)
@ Out, None
"""
if self.startAdaptive == True and self.adaptiveReady == True:
LimitSurfaceSearch.localGenerateInput(self,model,myInput)
#the adaptive sampler created the next point sampled vars
#find the closest branch
if self.hybridDETstrategy is not None:
closestBranch, cdfValues, treer = self._checkClosestBranch()
else:
closestBranch, cdfValues = self._checkClosestBranch()
if closestBranch is None:
self.raiseADebug('An usable branch for next candidate has not been found => create a parallel branch!')
# add pbthresholds in the grid
investigatedPoint = {}
for key,value in cdfValues.items():
ind = utils.find_le_index(self.branchProbabilities[key],value)
if not ind:
ind = 0
if value not in self.branchProbabilities[key]:
self.branchProbabilities[key].insert(ind,value)
self.branchValues[key].insert(ind,self.distDict[key].ppf(value))
investigatedPoint[key] = value
# collect investigated point
self.investigatedPoints.append(investigatedPoint)
if closestBranch:
info = self._retrieveBranchInfo(closestBranch)
self._constructEndInfoFromBranch(model, myInput, info, cdfValues)
else:
# create a new tree, since there are no branches that are close enough to the adaptive request
elm = ETS.HierarchicalNode(self.messageHandler,self.name + '_' + str(len(self.TreeInfo.keys())+1))
elm.add('name', self.name + '_'+ str(len(self.TreeInfo.keys())+1))
elm.add('startTime', 0.0)
# Initialize the endTime to be equal to the start one...
# It will modified at the end of each branch
elm.add('endTime', 0.0)
elm.add('runEnded',False)
elm.add('running',True)
elm.add('queue',False)
elm.add('completedHistory', False)
branchedLevel = {}
for key,value in cdfValues.items():
branchedLevel[key] = utils.index(self.branchProbabilities[key],value)
# The dictionary branchedLevel is stored in the xml tree too. That's because
# the advancement of the thresholds must follow the tree structure
elm.add('branchedLevel', branchedLevel)
if self.hybridDETstrategy is not None and not self.foundEpistemicTree:
# adaptive hybrid DET and not found a tree in the epistemic space
# take the first tree and modify the hybridsamplerCoordinate
hybridSampled = copy.deepcopy(utils.first(self.TreeInfo.values()).getrootnode().get('hybridsamplerCoordinate'))
for hybridStrategy in hybridSampled:
for key in self.epistemicVariables.keys():
if key in hybridStrategy['SampledVars'].keys():
self.raiseADebug("epistemic var " + str(key)+" value = "+str(self.values[key]))
hybridStrategy['SampledVars'][key] = copy.copy(self.values[key])
hybridStrategy['SampledVarsPb'][key] = self.distDict[key].pdf(self.values[key])
hybridStrategy['prefix'] = len(self.TreeInfo.values())+1
# TODO: find a strategy to recompute the probability weight here (for now == PointProbability)
hybridStrategy['PointProbability'] = reduce(mul, self.inputInfo['SampledVarsPb'].values())
hybridStrategy['ProbabilityWeight'] = reduce(mul, self.inputInfo['SampledVarsPb'].values())
elm.add('hybridsamplerCoordinate', hybridSampled)
self.inputInfo.update({'ProbabilityWeight-'+key.strip():value for key,value in self.inputInfo['SampledVarsPb'].items()})
# Here it is stored all the info regarding the DET => we create the info for all the branchings and we store them
self.TreeInfo[self.name + '_' + str(len(self.TreeInfo.keys())+1)] = ETS.HierarchicalTree(self.messageHandler,elm)
self._createRunningQueueBeginOne(self.TreeInfo[self.name + '_' + str(len(self.TreeInfo.keys()))],branchedLevel, model,myInput)
return DynamicEventTree.localGenerateInput(self,model,myInput)
def localInputAndChecks(self,xmlNode, paramInput):
"""
Class specific xml inputs will be read here and checked for validity.
@ In, xmlNode, xml.etree.ElementTree.Element, The xml element node that will be checked against the available options specific to this Sampler.
@ In, paramInput, InputData.ParameterInput, the parsed parameters
@ Out, None
"""
#TODO remove using xmlNode
#check if the hybrid DET has been activated, in case remove the nodes and treat them separaterly
hybridNodes = xmlNode.findall("HybridSampler")
if len(hybridNodes) != 0:
# check the type of hybrid that needs to be performed
limitSurfaceHybrid = False
for elm in hybridNodes:
samplType = elm.attrib['type'] if 'type' in elm.attrib.keys() else None
if samplType == 'LimitSurface':
if len(hybridNodes) != 1:
self.raiseAnError(IOError,'if one of the HybridSampler is of type "LimitSurface", it can not be combined with other strategies. Only one HybridSampler node can be inputted!')
limitSurfaceHybrid = True
if limitSurfaceHybrid == True:
#remove the elements from original xmlNode and check if the types are compatible
for elm in hybridNodes:
xmlNode.remove(elm)
self.hybridDETstrategy = 1
else:
self.hybridDETstrategy = 2
if self.hybridDETstrategy == 2:
self.raiseAnError(IOError, 'The sheaf of LSs for the Adaptive Hybrid DET is not yet available. Use type "LimitSurface"!')
DynamicEventTree.localInputAndChecks(self,xmlNode, paramInput)
# now we put back the nodes into the xmlNode to initialize the LimitSurfaceSearch with those variables as well
for elm in hybridNodes:
for child in elm:
if limitSurfaceHybrid == True:
xmlNode.append(child)
if child.tag in ['variable','Distribution']:
self.epistemicVariables[child.attrib['name']] = None
LimitSurfaceSearch._readMoreXMLbase(self,xmlNode)
LimitSurfaceSearch.localInputAndChecks(self,xmlNode, paramInput)
if 'mode' in xmlNode.attrib.keys():
if xmlNode.attrib['mode'].lower() == 'online':
self.detAdaptMode = 2
elif xmlNode.attrib['mode'].lower() == 'post':
self.detAdaptMode = 1
else:
self.raiseAnError(IOError,'unknown mode ' + xmlNode.attrib['mode'] + '. Available are "online" and "post"!')
if 'noTransitionStrategy' in xmlNode.attrib.keys():
if xmlNode.attrib['noTransitionStrategy'].lower() == 'mc':
self.noTransitionStrategy = 1
elif xmlNode.attrib['noTransitionStrategy'].lower() == 'grid':
self.noTransitionStrategy = 2
else:
self.raiseAnError(IOError,'unknown noTransitionStrategy '+xmlNode.attrib['noTransitionStrategy']+'. Available are "mc" and "grid"!')
if 'updateGrid' in xmlNode.attrib.keys():
if xmlNode.attrib['updateGrid'].lower() in utils.stringsThatMeanTrue():
self.insertAdaptBPb = True
# we add an artificial threshold because I need to find a way to prepend a rootbranch into a Tree object
for val in self.branchProbabilities.values():
if min(val) != 1e-3:
val.insert(0, 1e-3)
def _generateDistributions(self,availableDist,availableFunc):
"""
Generates the distrbutions and functions.
@ In, availDist, dict, dict of distributions
@ In, availableFunc, dict, dict of functions
@ Out, None
"""
DynamicEventTree._generateDistributions(self,availableDist,availableFunc)
def localInitialize(self,solutionExport = None):
"""
Will perform all initialization specific to this Sampler. For instance,
creating an empty container to hold the identified surface points, error
checking the optionally provided solution export and other preset values,
and initializing the limit surface Post-Processor used by this sampler.
@ In, solutionExport, DataObjects, optional, a PointSet to hold the solution (a list of limit surface points)
@ Out, None
"""
if self.detAdaptMode == 2:
self.startAdaptive = True
# we first initialize the LimitSurfaceSearch sampler
LimitSurfaceSearch.localInitialize(self,solutionExport=solutionExport)
if self.hybridDETstrategy is not None:
# we are running an adaptive hybrid DET and not only an adaptive DET
if self.hybridDETstrategy == 1:
gridVector = self.limitSurfacePP.gridEntity.returnParameter("gridVectors")
# construct an hybrid DET through an XML node
distDict, xmlNode = {}, ET.fromstring('<InitNode> <HybridSampler type="Grid"/> </InitNode>')
for varName, dist in self.distDict.items():
if varName.replace('<distribution>','') in self.epistemicVariables.keys():
# found an epistemic
varNode = ET.Element('Distribution' if varName.startswith('<distribution>') else 'variable',{'name':varName.replace('<distribution>','')})
varNode.append(ET.fromstring("<distribution>"+dist.name.strip()+"</distribution>"))
distDict[dist.name.strip()] = self.distDict[varName]
varNode.append(ET.fromstring('<grid construction="custom" type="value">'+' '.join([str(elm) for elm in utils.first(gridVector.values())[varName.replace('<distribution>','')]])+'</grid>'))
xmlNode.find("HybridSampler").append(varNode)
#TODO, need to pass real paramInput
self._localInputAndChecksHybrid(xmlNode, paramInput=None)
for hybridsampler in self.hybridStrategyToApply.values():
hybridsampler._generateDistributions(distDict, {})
DynamicEventTree.localInitialize(self)
if self.hybridDETstrategy == 2:
self.actualHybridTree = utils.first(self.TreeInfo.keys())
self._endJobRunnable = sys.maxsize
def generateInput(self,model,oldInput):
"""
This method has to be overwritten to provide the specialization for the specific sampler
The model instance in might be needed since, especially for external codes,
only the code interface possesses the dictionary for reading the variable definition syntax
@ In, model, model instance, it is the instance of a RAVEN model
@ In, oldInput, list, a list of the original needed inputs for the model (e.g. list of files, etc. etc)
@ Out, generateInput, tuple(0,list), list containing the new inputs -in reality it is the model that returns this; the Sampler generates the value to be placed in the input of the model.
"""
return DynamicEventTree.generateInput(self, model, oldInput)
def localFinalizeActualSampling(self,jobObject,model,myInput):
"""
General function (available to all samplers) that finalize the sampling
calculation just ended. In this case (DET), The function reads the
information from the ended calculation, updates the working variables, and
creates the new inputs for the next branches
@ In, jobObject, instance, an instance of a JobHandler
@ In, model, model instance, it is the instance of a RAVEN model
@ In, myInput, list, the generating input
@ Out, None
"""
returncode = DynamicEventTree.localFinalizeActualSampling(self,jobObject,model,myInput,genRunQueue=False)
forceEvent = True if self.startAdaptive else False
if returncode:
self._createRunningQueue(model,myInput, forceEvent)
| apache-2.0 |
timothydmorton/bokeh | bokeh/server/tests/config/test_blaze_config.py | 29 | 1202 | from __future__ import absolute_import
import numpy as np
import pandas as pd
qty=10000
gauss = {'oneA': np.random.randn(qty),
'oneB': np.random.randn(qty),
'cats': np.random.randint(0,5,size=qty),
'hundredA': np.random.randn(qty)*100,
'hundredB': np.random.randn(qty)*100}
gauss = pd.DataFrame(gauss)
uniform = {'oneA': np.random.rand(qty),
'oneB': np.random.rand(qty),
'hundredA': np.random.rand(qty)*100,
'hundredB': np.random.rand(qty)*100}
uniform = pd.DataFrame(uniform)
bivariate = {'A1': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+1]),
'A2': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+2]),
'A3': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+3]),
'A4': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+4]),
'A5': np.hstack([np.random.randn(qty/2), np.random.randn(qty/2)+5]),
'B': np.random.randn(qty),
'C': np.hstack([np.zeros(qty/2), np.ones(qty/2)])}
bivariate = pd.DataFrame(bivariate)
data_dict = dict(uniform=uniform,
gauss=gauss,
bivariate=bivariate)
| bsd-3-clause |
bassio/omicexperiment | omicexperiment/transforms/filters/sample.py | 1 | 1678 | import pandas as pd
from omicexperiment.transforms.transform import Filter, AttributeFilter, GroupByTransform, FlexibleOperatorMixin, AttributeFlexibleOperatorMixin, TransformObjectsProxy
from omicexperiment.transforms.sample import SampleGroupBy, SampleSumCounts
class SampleMinCount(Filter):
def __dapply__(self, experiment):
if self.operator == '__eq__':
assert isinstance(self.value, int)
df = experiment.data_df
criteria = (df.sum() >= self.value)
return df[criteria.index[criteria]]
class SampleMaxCount(Filter):
def __dapply__(self, experiment):
if self.operator == '__eq__':
assert isinstance(self.value, int)
df = experiment.data_df
criteria = (df.sum() <= self.value)
return df[criteria.index[criteria]]
class SampleCount(FlexibleOperatorMixin, Filter):
def __dapply__(self, experiment):
_op = self._op_function(experiment.data_df.sum())
criteria = _op(self.value)
criteria = _op(self.value)
return experiment.data_df.reindex(columns=criteria.index[criteria])
class SampleAttributeFilter(AttributeFilter, AttributeFlexibleOperatorMixin):
def __dapply__(self, experiment):
_op = self._op_function(experiment.mapping_df)
criteria = _op(self.value)
return experiment.data_df.reindex(columns=criteria.index[criteria])
class Sample(TransformObjectsProxy):
#not_in =
#in_
count = SampleCount()
att = SampleAttributeFilter()
c = SampleAttributeFilter()
groupby = SampleGroupBy()
sum_counts = SampleSumCounts()
| bsd-3-clause |
PanDAWMS/panda-bigmon-core-old | core/resource/models.py | 3 | 13831 | """
topology.models -- for Schedconfig and other topology-related objects
"""
from django.db import models
# Create your models here.
class Schedconfig(models.Model):
name = models.CharField(max_length=180, db_column='NAME')
nickname = models.CharField(max_length=180, primary_key=True, db_column='NICKNAME')
queue = models.CharField(max_length=180, db_column='QUEUE', blank=True)
localqueue = models.CharField(max_length=60, db_column='LOCALQUEUE', blank=True)
system = models.CharField(max_length=180, db_column='SYSTEM')
sysconfig = models.CharField(max_length=60, db_column='SYSCONFIG', blank=True)
environ = models.CharField(max_length=750, db_column='ENVIRON', blank=True)
gatekeeper = models.CharField(max_length=120, db_column='GATEKEEPER', blank=True)
jobmanager = models.CharField(max_length=240, db_column='JOBMANAGER', blank=True)
se = models.CharField(max_length=1200, db_column='SE', blank=True)
ddm = models.CharField(max_length=360, db_column='DDM', blank=True)
jdladd = models.CharField(max_length=1500, db_column='JDLADD', blank=True)
globusadd = models.CharField(max_length=300, db_column='GLOBUSADD', blank=True)
jdl = models.CharField(max_length=180, db_column='JDL', blank=True)
jdltxt = models.CharField(max_length=1500, db_column='JDLTXT', blank=True)
version = models.CharField(max_length=180, db_column='VERSION', blank=True)
site = models.CharField(max_length=180, db_column='SITE')
region = models.CharField(max_length=180, db_column='REGION', blank=True)
gstat = models.CharField(max_length=180, db_column='GSTAT', blank=True)
tags = models.CharField(max_length=600, db_column='TAGS', blank=True)
cmd = models.CharField(max_length=600, db_column='CMD', blank=True)
lastmod = models.DateTimeField(db_column='LASTMOD')
errinfo = models.CharField(max_length=240, db_column='ERRINFO', blank=True)
nqueue = models.IntegerField(db_column='NQUEUE')
comment_field = models.CharField(max_length=1500, db_column='COMMENT_', blank=True) # Field renamed because it was a Python reserved word.
appdir = models.CharField(max_length=1500, db_column='APPDIR', blank=True)
datadir = models.CharField(max_length=240, db_column='DATADIR', blank=True)
tmpdir = models.CharField(max_length=240, db_column='TMPDIR', blank=True)
wntmpdir = models.CharField(max_length=240, db_column='WNTMPDIR', blank=True)
dq2url = models.CharField(max_length=240, db_column='DQ2URL', blank=True)
special_par = models.CharField(max_length=240, db_column='SPECIAL_PAR', blank=True)
python_path = models.CharField(max_length=240, db_column='PYTHON_PATH', blank=True)
nodes = models.IntegerField(db_column='NODES')
status = models.CharField(max_length=30, db_column='STATUS', blank=True)
copytool = models.CharField(max_length=240, db_column='COPYTOOL', blank=True)
copysetup = models.CharField(max_length=600, db_column='COPYSETUP', blank=True)
releases = models.CharField(max_length=1500, db_column='RELEASES', blank=True)
sepath = models.CharField(max_length=1200, db_column='SEPATH', blank=True)
envsetup = models.CharField(max_length=600, db_column='ENVSETUP', blank=True)
copyprefix = models.CharField(max_length=480, db_column='COPYPREFIX', blank=True)
lfcpath = models.CharField(max_length=240, db_column='LFCPATH', blank=True)
seopt = models.CharField(max_length=1200, db_column='SEOPT', blank=True)
sein = models.CharField(max_length=1200, db_column='SEIN', blank=True)
seinopt = models.CharField(max_length=1200, db_column='SEINOPT', blank=True)
lfchost = models.CharField(max_length=240, db_column='LFCHOST', blank=True)
cloud = models.CharField(max_length=180, db_column='CLOUD', blank=True)
siteid = models.CharField(max_length=180, db_column='SITEID', blank=True)
proxy = models.CharField(max_length=240, db_column='PROXY', blank=True)
retry = models.CharField(max_length=30, db_column='RETRY', blank=True)
queuehours = models.IntegerField(db_column='QUEUEHOURS')
envsetupin = models.CharField(max_length=600, db_column='ENVSETUPIN', blank=True)
copytoolin = models.CharField(max_length=540, db_column='COPYTOOLIN', blank=True)
copysetupin = models.CharField(max_length=600, db_column='COPYSETUPIN', blank=True)
seprodpath = models.CharField(max_length=1200, db_column='SEPRODPATH', blank=True)
lfcprodpath = models.CharField(max_length=240, db_column='LFCPRODPATH', blank=True)
copyprefixin = models.CharField(max_length=1080, db_column='COPYPREFIXIN', blank=True)
recoverdir = models.CharField(max_length=240, db_column='RECOVERDIR', blank=True)
memory = models.IntegerField(db_column='MEMORY')
maxtime = models.IntegerField(db_column='MAXTIME')
space = models.IntegerField(db_column='SPACE')
tspace = models.DateTimeField(db_column='TSPACE')
cmtconfig = models.CharField(max_length=750, db_column='CMTCONFIG', blank=True)
setokens = models.CharField(max_length=240, db_column='SETOKENS', blank=True)
glexec = models.CharField(max_length=30, db_column='GLEXEC', blank=True)
priorityoffset = models.CharField(max_length=180, db_column='PRIORITYOFFSET', blank=True)
allowedgroups = models.CharField(max_length=300, db_column='ALLOWEDGROUPS', blank=True)
defaulttoken = models.CharField(max_length=300, db_column='DEFAULTTOKEN', blank=True)
pcache = models.CharField(max_length=300, db_column='PCACHE', blank=True)
validatedreleases = models.CharField(max_length=1500, db_column='VALIDATEDRELEASES', blank=True)
accesscontrol = models.CharField(max_length=60, db_column='ACCESSCONTROL', blank=True)
dn = models.CharField(max_length=300, db_column='DN', blank=True)
email = models.CharField(max_length=180, db_column='EMAIL', blank=True)
allowednode = models.CharField(max_length=240, db_column='ALLOWEDNODE', blank=True)
maxinputsize = models.IntegerField(null=True, db_column='MAXINPUTSIZE', blank=True)
timefloor = models.IntegerField(null=True, db_column='TIMEFLOOR', blank=True)
depthboost = models.IntegerField(null=True, db_column='DEPTHBOOST', blank=True)
idlepilotsupression = models.IntegerField(null=True, db_column='IDLEPILOTSUPRESSION', blank=True)
pilotlimit = models.IntegerField(null=True, db_column='PILOTLIMIT', blank=True)
transferringlimit = models.IntegerField(null=True, db_column='TRANSFERRINGLIMIT', blank=True)
cachedse = models.IntegerField(null=True, db_column='CACHEDSE', blank=True)
corecount = models.IntegerField(null=True, db_column='CORECOUNT', blank=True)
countrygroup = models.CharField(max_length=192, db_column='COUNTRYGROUP', blank=True)
availablecpu = models.CharField(max_length=192, db_column='AVAILABLECPU', blank=True)
availablestorage = models.CharField(max_length=192, db_column='AVAILABLESTORAGE', blank=True)
pledgedcpu = models.CharField(max_length=192, db_column='PLEDGEDCPU', blank=True)
pledgedstorage = models.CharField(max_length=192, db_column='PLEDGEDSTORAGE', blank=True)
statusoverride = models.CharField(max_length=768, db_column='STATUSOVERRIDE', blank=True)
allowdirectaccess = models.CharField(max_length=30, db_column='ALLOWDIRECTACCESS', blank=True)
gocname = models.CharField(max_length=192, db_column='GOCNAME', blank=True)
tier = models.CharField(max_length=45, db_column='TIER', blank=True)
multicloud = models.CharField(max_length=192, db_column='MULTICLOUD', blank=True)
lfcregister = models.CharField(max_length=30, db_column='LFCREGISTER', blank=True)
stageinretry = models.IntegerField(null=True, db_column='STAGEINRETRY', blank=True)
stageoutretry = models.IntegerField(null=True, db_column='STAGEOUTRETRY', blank=True)
fairsharepolicy = models.CharField(max_length=1536, db_column='FAIRSHAREPOLICY', blank=True)
allowfax = models.CharField(null=True, max_length=64, db_column='ALLOWFAX', blank=True)
faxredirector = models.CharField(null=True, max_length=256, db_column='FAXREDIRECTOR', blank=True)
maxwdir = models.IntegerField(null=True, db_column='MAXWDIR', blank=True)
celist = models.CharField(max_length=12000, db_column='CELIST', blank=True)
minmemory = models.IntegerField(null=True, db_column='MINMEMORY', blank=True)
maxmemory = models.IntegerField(null=True, db_column='MAXMEMORY', blank=True)
mintime = models.IntegerField(null=True, db_column='MINTIME', blank=True)
allowjem = models.CharField(null=True, max_length=64, db_column='ALLOWJEM', blank=True)
catchall = models.CharField(null=True, max_length=512, db_column='CATCHALL', blank=True)
faxdoor = models.CharField(null=True, max_length=128, db_column='FAXDOOR', blank=True)
wansourcelimit = models.IntegerField(null=True, db_column='WANSOURCELIMIT', blank=True)
wansinklimit = models.IntegerField(null=True, db_column='WANSINKLIMIT', blank=True)
auto_mcu = models.SmallIntegerField(null=True, db_column='AUTO_MCU', blank=True)
objectstore = models.CharField(null=True, max_length=512, db_column='OBJECTSTORE', blank=True)
allowhttp = models.CharField(null=True, max_length=64, db_column='ALLOWHTTP', blank=True)
httpredirector = models.CharField(null=True, max_length=256, db_column='HTTPREDIRECTOR', blank=True)
multicloud_append = models.CharField(null=True, max_length=64, db_column='MULTICLOUD_APPEND', blank=True)
def __str__(self):
return 'Schedconfig:' + str(self.nickname)
def getFields(self):
return ["name", "nickname", "queue", "localqueue", "system", \
"sysconfig", "environ", "gatekeeper", "jobmanager", "se", "ddm", \
"jdladd", "globusadd", "jdl", "jdltxt", "version", "site", \
"region", "gstat", "tags", "cmd", "lastmod", "errinfo", \
"nqueue", "comment_", "appdir", "datadir", "tmpdir", "wntmpdir", \
"dq2url", "special_par", "python_path", "nodes", "status", \
"copytool", "copysetup", "releases", "sepath", "envsetup", \
"copyprefix", "lfcpath", "seopt", "sein", "seinopt", "lfchost", \
"cloud", "siteid", "proxy", "retry", "queuehours", "envsetupin", \
"copytoolin", "copysetupin", "seprodpath", "lfcprodpath", \
"copyprefixin", "recoverdir", "memory", "maxtime", "space", \
"tspace", "cmtconfig", "setokens", "glexec", "priorityoffset", \
"allowedgroups", "defaulttoken", "pcache", "validatedreleases", \
"accesscontrol", "dn", "email", "allowednode", "maxinputsize", \
"timefloor", "depthboost", "idlepilotsupression", "pilotlimit", \
"transferringlimit", "cachedse", "corecount", "countrygroup", \
"availablecpu", "availablestorage", "pledgedcpu", \
"pledgedstorage", "statusoverride", "allowdirectaccess", \
"gocname", "tier", "multicloud", "lfcregister", "stageinretry", \
"stageoutretry", "fairsharepolicy", "allowfax", "faxredirector", \
"maxwdir", "celist", "minmemory", "maxmemory", "mintime", \
"allowjem", "catchall", "faxdoor", "wansourcelimit", \
"wansinklimit", "auto_mcu", "objectstore", "allowhttp", \
"httpredirector", "multicloud_append" ]
def getValuesList(self):
repre = []
for field in self._meta.fields:
repre.append((field.name, field))
return repre
def get_all_fields(self):
"""Returns a list of all field names on the instance."""
fields = []
kys = {}
for f in self._meta.fields:
kys[f.name] = f
kys1 = kys.keys()
kys1.sort()
for k in kys1:
f = kys[k]
fname = f.name
# resolve picklists/choices, with get_xyz_display() function
get_choice = 'get_'+fname+'_display'
if hasattr( self, get_choice):
value = getattr( self, get_choice)()
else:
try :
value = getattr(self, fname)
except User.DoesNotExist:
value = None
# only display fields with values and skip some fields entirely
if f.editable and value :
fields.append(
{
'label':f.verbose_name,
'name':f.name,
'value':value,
}
)
return fields
class Meta:
db_table = u'schedconfig'
class Schedinstance(models.Model):
name = models.CharField(max_length=180, db_column='NAME')
nickname = models.CharField(max_length=180, db_column='NICKNAME', primary_key=True)
pandasite = models.CharField(max_length=180, db_column='PANDASITE', primary_key=True)
nqueue = models.IntegerField(db_column='NQUEUE')
nqueued = models.IntegerField(db_column='NQUEUED')
nrunning = models.IntegerField(db_column='NRUNNING')
nfinished = models.IntegerField(db_column='NFINISHED')
nfailed = models.IntegerField(db_column='NFAILED')
naborted = models.IntegerField(db_column='NABORTED')
njobs = models.IntegerField(db_column='NJOBS')
tvalid = models.DateTimeField(db_column='TVALID')
lastmod = models.DateTimeField(db_column='LASTMOD')
errinfo = models.CharField(max_length=450, db_column='ERRINFO', blank=True)
ndone = models.IntegerField(db_column='NDONE')
totrunt = models.IntegerField(db_column='TOTRUNT')
comment_field = models.CharField(max_length=1500, db_column='COMMENT_', blank=True) # Field renamed because it was a Python reserved word.
class Meta:
db_table = u'schedinstance'
unique_together = ('nickname', 'pandasite')
| apache-2.0 |
akionakamura/scikit-learn | sklearn/mixture/pgmm.py | 1 | 42564 | """
Parsimonious Gaussian Mixture Models
"""
# Author: Thiago Akio Nakamura <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from sklearn.decomposition import PCA
from .. import cluster
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, min_covar=1.e-7):
"""Compute the log probability under a parsimonious Gaussian mixture distribution.
Parameters
----------
# TODO change descritions of parameters (specially covars)
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian.
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def sample_gaussian(mean, covar, n_samples=1, random_state=None):
"""Generate random samples from a parsimonious Gaussian mixture distribution.
Parameters
----------
# TODO change descritions of parameters (specially covars)
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class PGMM(BaseEstimator):
"""Parsimonious Gaussian Mixture Models
Representation of a parsimonious Gaussian mixture models probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a PGMM distribution.
# TODO Review this comment
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
n_pc : int, optional
Number of principal components on each mixture component.
Defaults to 1.
covariance_type : string, optional
String describing the parsimonious covarianc e structure to
use. Must be one of triple combination of U (unrestricted) and R (restricted).
Defaults to 'UUR', equivalent to a Mixture of Probabilisic PCA.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, 'p' for principal subspace and 'n' for noise.
Defaults to 'wmpn'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, 'p' for principal subspace and 'n' for noise.
Defaults to 'wmpn'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
noise_ : array, shape (`n_components`,)
This attribute stores the isotropic noise for each mixture component.
The shape depends on `covariance_type`:
(1) if 'RRR',
(n_features, ) if 'RRU',
(n_components, ) if 'RUR',
(n_components, n_features) if 'RUU',
(1) if 'URR',
(n_features, ) if 'URU',
(n_components, ) if 'UUR',
(n_components, n_features) if 'UUU'
principal_subspace_ : array, shape (n_components, n_features, n_pc)
The principal subspace matrix for each mixture component.
The shape depends on `covariance_type`:
(n_features, n_pc) if 'RRR',
(n_features, n_pc) if 'RRU',
(n_features, n_pc) if 'RUR',
(n_features, n_pc) if 'RUU',
(n_components, n_features, n_pc) if 'URR',
(n_components, n_features, n_pc) if 'URU',
(n_components, n_features, n_pc) if 'UUR',
(n_components, n_features, n_pc) if 'UUU'
covars_ : array, shape (n_components, n_features, n_features)
Covariance parameters for each mixture component,
defined by [noise_ + (principal_subspace * principal_subspace.T)] for each component
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
# TODO Change for a MPPCA, maybe
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
# TODO example
"""
def __init__(self, n_components=1, n_pc=1, covariance_type='UUR',
random_state=None, tol=1e-3, min_covar=1e-7,
n_iter=100, n_init=1, params='wmpn', init_params='wmpn',
verbose=0):
self.n_components = n_components
self.n_pc = n_pc
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['RRR', 'RRU', 'RUR', 'RUU',
'URR', 'URU', 'UUR', 'UUU']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('PGMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component."""
return self.covars_
def _set_covars(self, principal_subspace, noise):
"""Provide values for covariance"""
if self.covariance_type.startswith('R'):
n_features = principal_subspace.shape[0]
else:
n_features = principal_subspace.shape[1]
noises = np.empty((self.n_components, n_features, n_features))
subspaces = np.empty((self.n_components, n_features, self.n_pc))
covars = np.zeros((self.n_components, n_features, n_features))
if self.covariance_type == 'RRR':
# noise: (1)
# principal_subspace: (n_features, n_pc)
noises = np.tile(noise * np.eye(n_features), (self.n_components, 1, 1))
subspaces = np.tile(principal_subspace, (self.n_components, 1, 1))
if self.covariance_type == 'RRU':
# noise: (n_features, )
# principal_subspace: (n_features, n_pc)
noises = np.tile(np.diag(noise), (self.n_components, 1, 1))
subspaces = np.tile(principal_subspace, (self.n_components, 1, 1))
if self.covariance_type == 'RUR':
# noise: (n_components, )
# principal_subspace: (n_features, n_pc)
for idx, n in enumerate(noise):
noises[idx] = n * np.eye(n_features)
subspaces = np.tile(principal_subspace, (self.n_components, 1, 1))
if self.covariance_type == 'RUU':
# noise: (n_components, n_features)
# principal_subspace: (n_features, n_pc)
for idx in np.arange(self.n_components):
noises[idx] = np.diag(noise[idx, :])
subspaces = np.tile(principal_subspace, (self.n_components, 1, 1))
if self.covariance_type == 'URR':
# noise: (1)
# principal_subspace: (n_components, n_features, n_pc)
noises = np.tile(noise * np.eye(n_features), (self.n_components, 1, 1))
subspaces = principal_subspace
if self.covariance_type == 'URU':
# noise: (n_features, )
# principal_subspace: (n_components, n_features, n_pc)
noises = np.tile(np.diag(noise), (self.n_components, 1, 1))
subspaces = principal_subspace
if self.covariance_type == 'UUR':
# noise: (n_components, )
# principal_subspace: (n_components, n_features, n_pc)
for idx, n in enumerate(noise):
noises[idx] = n * np.eye(n_features)
subspaces = principal_subspace
if self.covariance_type == 'UUU':
# noise: (n_components, n_features)
# principal_subspace: (n_components, n_features, n_pc)
for idx in np.arange(self.n_components):
noises[idx] = np.diag(noise[idx, :])
subspaces = principal_subspace
for comp in range(self.n_components):
covars[comp] = subspaces[comp].dot(subspaces[comp].T) + noises[comp]
_validate_covars(covars)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_,
self.covars_, self.min_covar) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def sum_score(self, X, y=None):
"""Compute the sum log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Sum of the Log probabilities of every data point in X
"""
logprob, _ = self.score_samples(X)
return logprob.sum()
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
X[comp_in_X] = sample_gaussian(
self.means_[comp], self.covars_[comp],
num_comp_in_X, random_state=random_state).T
return X
# This assumes only one missing variable, due do dimensionality dropping when slicing.
def _one_missing_one_model(self, x, missing_idxs, mean, covar):
n, d = x.shape
obs_idxs = range(0,d)[:missing_idxs] + range(0, d)[missing_idxs+1:]
# Separate the input as observed and missing.
x_o = x[:, obs_idxs]
x_m = x[:, [missing_idxs]]
# Separate the means as observed and missing.
mean_o = mean[obs_idxs]
mean_m = mean[missing_idxs]
# Separate the covariance.
covar_oo = covar[obs_idxs, :][:, obs_idxs]
covar_mo = covar[[missing_idxs], :][:, obs_idxs]
covar_om = covar[obs_idxs, :][:, [missing_idxs]]
covar_mm = covar[[missing_idxs], :][:, [missing_idxs]]
covar_oo_inv = np.linalg.inv(covar_oo)
x_m_given_o = mean_m + covar_mo.dot(covar_oo_inv).dot((x_o - mean_o).T).T
reconstructed_x = np.array(x, copy=True)
reconstructed_x[:, [missing_idxs]] = x_m_given_o
return reconstructed_x
def reconstruct_one_missing(self, x, missing_idxs):
reconstruted_x_per_model = np.empty((self.n_components, x.shape[0], x.shape[1]))
reconstruted_scores_per_model = np.empty((x.shape[0], self.n_components), dtype='float')
comp = 0
for mean, covar in zip(self.means_, self.covars_):
reconstruted_x_per_model[comp, :, :] = self._one_missing_one_model(x, missing_idxs, mean, covar)
reconstruted_scores_per_model[:, comp] = self.score(reconstruted_x_per_model[comp, :, :])
comp = comp + 1
select_from = np.argmax(reconstruted_scores_per_model * self.weights_, axis=1)
reconstruted_x = np.zeros(x.shape)
for idx, component_idx in enumerate(select_from):
reconstruted_x[idx] = reconstruted_x_per_model[component_idx, idx, :]
return reconstruted_x, self.sum_score(reconstruted_x)
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
n_features = X.shape[1]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'n' in self.init_params or not hasattr(self, 'noise_'):
if self.covariance_type.endswith('RR'):
self.noise_ = np.tile(1.0, 1)
elif self.covariance_type.endswith('RU'):
self.noise_ = np.tile(1.0, n_features)
elif self.covariance_type.endswith('UR'):
self.noise_ = np.tile(1.0, self.n_components)
elif self.covariance_type.endswith('UU'):
self.noise_ = np.tile(1.0, (self.n_components, n_features))
else:
raise ValueError('Invalid value for covariance_type: %s' %
self.covariance_type)
if self.verbose > 1:
print('\tNoise value have been initialized.')
if 'p' in self.init_params or not hasattr(self, 'principal_subspace_'):
pca = PCA(n_components=self.n_pc)
pca.fit(X)
ps = pca.components_.T
self.principal_subspace_ = \
distribute_covar_matrix_to_match_covariance_type(
ps, self.covariance_type, self.n_components)
self._set_covars(self.principal_subspace_, self.noise_)
if self.verbose > 1:
print('\tPrincipal sub-space have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = self.tol
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'principal_subspace': self.principal_subspace_,
'noise': self.noise_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.principal_subspace_ = best_params['principal_subspace']
self.noise_ = best_params['noise']
self._set_covars(self.principal_subspace_, self.noise_)
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
new_subspace, new_noise = covar_mstep_func(self, X, responsibilities, weighted_X_sum,
inverse_weights, min_covar)
if 'p' in params:
self.principal_subspace_ = new_subspace
if 'n' in params:
self.noise_ = new_noise
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
npc = self.n_pc
ncomp = self.n_components
if self.covariance_type == 'RRR':
cov_params = (ndim * npc - npc*(npc - 1)/2) + 1
elif self.covariance_type == 'RRU':
cov_params = (ndim * npc - npc*(npc - 1)/2) + ndim
elif self.covariance_type == 'RUR':
cov_params = (ndim * npc - npc*(npc - 1)/2) + ncomp
elif self.covariance_type == 'RUU':
cov_params = (ndim * npc - npc*(npc - 1)/2) + ndim * ncomp
elif self.covariance_type == 'URR':
cov_params = ncomp * (ndim * npc - npc*(npc - 1)/2) + 1
elif self.covariance_type == 'URU':
cov_params = ncomp * (ndim * npc - npc*(npc - 1)/2) + ndim
elif self.covariance_type == 'UUR':
cov_params = ncomp * (ndim * npc - npc*(npc - 1)/2) + ncomp
elif self.covariance_type == 'UUU':
cov_params = ncomp * (ndim * npc - npc*(npc - 1)/2) + ndim * ncomp
mean_params = ndim * ncomp
return int(cov_params + mean_params + ncomp - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _validate_covars(covars):
"""Do basic checks on matrix covariance sizes and values"""
from scipy import linalg
if len(covars.shape) != 3:
raise ValueError("covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
def distribute_covar_matrix_to_match_covariance_type(
tied_sp, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type.startswith('R'):
sp = tied_sp
elif covariance_type.startswith('U'):
sp = np.tile(tied_sp, (n_components, 1, 1))
else:
raise ValueError("covariance_type must start with" +
"'R' or 'U'")
return sp
def _covar_mstep_RRR(pgmm, X, responsibilities, weighted_X_sum,
norm, min_covar):
n_features = X.shape[1]
n_data = X.shape[0]
S = np.empty((pgmm.n_components, n_features, n_features))
final_S = np.tile(0, (n_features, n_features))
for c in range(pgmm.n_components):
post = responsibilities[:, c]
mu = pgmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
S[c] = np.dot(post * diff.T, diff) / (n_data*pgmm.weights_[c] + 10 * EPS)
final_S = final_S + S[c]
beta = pgmm.principal_subspace_.T.dot(
np.linalg.inv(pgmm.noise_*np.eye(n_features)
+ pgmm.principal_subspace_.dot(pgmm.principal_subspace_.T)))
theta = np.eye(pgmm.n_pc) - beta.dot(pgmm.principal_subspace_) + beta.dot(final_S).dot(beta.T)
W = final_S.dot(beta.T).dot(np.linalg.inv(theta))
noises = np.array([np.trace(final_S - W.dot(beta).dot(final_S))/n_features])
return W, noises
def _covar_mstep_RRU(pgmm, X, responsibilities, weighted_X_sum,
norm, min_covar):
n_features = X.shape[1]
n_data = X.shape[0]
S = np.empty((pgmm.n_components, n_features, n_features))
final_S = np.tile(0, (n_features, n_features))
for c in range(pgmm.n_components):
post = responsibilities[:, c]
mu = pgmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
S[c] = np.dot(post * diff.T, diff) / (n_data*pgmm.weights_[c] + 10 * EPS)
final_S = final_S + S[c]
beta = pgmm.principal_subspace_.T.dot(
np.linalg.inv(np.diag(pgmm.noise_)
+ pgmm.principal_subspace_.dot(pgmm.principal_subspace_.T)))
theta = np.eye(pgmm.n_pc) - beta.dot(pgmm.principal_subspace_) + beta.dot(final_S).dot(beta.T)
W = final_S.dot(beta.T).dot(np.linalg.inv(theta))
noises = np.diag(final_S - W.dot(beta).dot(final_S))
return W, noises
def _covar_mstep_RUR(pgmm, X, responsibilities, weighted_X_sum,
norm, min_covar):
n_features = X.shape[1]
n_data = X.shape[0]
beta = np.empty((pgmm.n_components, pgmm.n_pc, n_features))
S = np.empty((pgmm.n_components, n_features, n_features))
theta = np.empty((pgmm.n_components, pgmm.n_pc, pgmm.n_pc))
part1 = np.tile(0, (n_features, pgmm.n_pc))
part2 = np.tile(0, (pgmm.n_pc, pgmm.n_pc))
noises = np.empty(pgmm.n_components)
for c in range(pgmm.n_components):
post = responsibilities[:, c]
mu = pgmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
S[c] = np.dot(post * diff.T, diff) / (n_data*pgmm.weights_[c] + 10 * EPS)
beta[c] = pgmm.principal_subspace_.T.dot(
np.linalg.inv(pgmm.noise_[c]*np.eye(n_features)
+ pgmm.principal_subspace_.dot(pgmm.principal_subspace_.T)))
theta[c] = np.eye(pgmm.n_pc) - beta[c].dot(pgmm.principal_subspace_) + beta[c].dot(S[c]).dot(beta[c].T)
part1 = part1 + (n_data*pgmm.weights_[c] + 10 * EPS)/pgmm.noise_[c]*S[c].dot(beta[c].T)
part2 = part2 + (n_data*pgmm.weights_[c] + 10 * EPS)/pgmm.noise_[c]*theta[c]
W = part1.dot(np.linalg.inv(part2))
for c in range(pgmm.n_components):
noises[c] = np.trace(S[c] - 2*W.dot(beta[c]).dot(S[c]) + W.dot(theta[c]).dot(W.T))/n_features
return W, noises
def _covar_mstep_RUU(pgmm, X, responsibilities, weighted_X_sum,
norm, min_covar):
n_features = X.shape[1]
n_data = X.shape[0]
beta = np.empty((pgmm.n_components, pgmm.n_pc, n_features))
S = np.empty((pgmm.n_components, n_features, n_features))
theta = np.empty((pgmm.n_components, pgmm.n_pc, pgmm.n_pc))
part1 = np.tile(0, (n_features, pgmm.n_pc))
part2 = np.tile(0, (pgmm.n_pc, pgmm.n_pc))
noises = np.empty((pgmm.n_components, n_features))
for c in range(pgmm.n_components):
post = responsibilities[:, c]
mu = pgmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
S[c] = np.dot(post * diff.T, diff) / (n_data*pgmm.weights_[c] + 10 * EPS)
beta[c] = pgmm.principal_subspace_.T.dot(
np.linalg.inv(pgmm.noise_[c]*np.eye(n_features)
+ pgmm.principal_subspace_.dot(pgmm.principal_subspace_.T)))
theta[c] = np.eye(pgmm.n_pc) - beta[c].dot(pgmm.principal_subspace_) + beta[c].dot(S[c]).dot(beta[c].T)
for r in range(n_features):
part1[r] = part1[r] + (n_data*pgmm.weights_[c] + 10 * EPS)/pgmm.noise_[c, r]*S[c].dot(beta[c].T)[r]
part2 = part2 + (n_data*pgmm.weights_[c] + 10 * EPS)/pgmm.noise_[c, r]*theta[c]
W = part1.dot(np.linalg.inv(part2))
for c in range(pgmm.n_components):
noises[c] = np.diag(S[c] - 2*W.dot(beta[c]).dot(S[c]) + W.dot(theta[c]).dot(W.T))
return W, noises
def _covar_mstep_URR(pgmm, X, responsibilities, weighted_X_sum,
norm, min_covar):
n_features = X.shape[1]
n_data = X.shape[0]
beta = np.empty((pgmm.n_components, pgmm.n_pc, n_features))
theta = np.empty((pgmm.n_components, pgmm.n_pc, pgmm.n_pc))
S = np.empty((pgmm.n_components, n_features, n_features))
W = np.empty((pgmm.n_components, n_features, pgmm.n_pc))
noises = np.tile(0, 1)
for c in range(pgmm.n_components):
post = responsibilities[:, c]
mu = pgmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
S[c] = np.dot(post * diff.T, diff) / (n_data*pgmm.weights_[c] + 10 * EPS)
beta[c] = pgmm.principal_subspace_[c].T.dot(
np.linalg.inv(pgmm.noise_*np.eye(n_features)
+ pgmm.principal_subspace_[c].dot(pgmm.principal_subspace_[c].T)))
theta[c] = np.eye(pgmm.n_pc) - beta[c].dot(pgmm.principal_subspace_[c]) + beta[c].dot(S[c]).dot(beta[c].T)
W[c] = S[c].dot(beta[c].T).dot(np.linalg.inv(theta[c]))
noises = noises + pgmm.weights_[c]*np.trace(S[c] - W[c].dot(beta[c]).dot(S[c]))
noises = noises/n_features
return W, noises
def _covar_mstep_URU(pgmm, X, responsibilities, weighted_X_sum,
norm, min_covar):
n_features = X.shape[1]
n_data = X.shape[0]
beta = np.empty((pgmm.n_components, pgmm.n_pc, n_features))
theta = np.empty((pgmm.n_components, pgmm.n_pc, pgmm.n_pc))
S = np.empty((pgmm.n_components, n_features, n_features))
W = np.empty((pgmm.n_components, n_features, pgmm.n_pc))
noises = np.tile(0, n_features)
for c in range(pgmm.n_components):
post = responsibilities[:, c]
mu = pgmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
S[c] = np.dot(post * diff.T, diff) / (n_data*pgmm.weights_[c] + 10 * EPS)
beta[c] = pgmm.principal_subspace_[c].T.dot(
np.linalg.inv(np.diag(pgmm.noise_)
+ pgmm.principal_subspace_[c].dot(pgmm.principal_subspace_[c].T)))
theta[c] = np.eye(pgmm.n_pc) - beta[c].dot(pgmm.principal_subspace_[c]) + beta[c].dot(S[c]).dot(beta[c].T)
W[c] = S[c].dot(beta[c].T).dot(np.linalg.inv(theta[c]))
noises = noises + pgmm.weights_[c]*np.diag(S[c] - W[c].dot(beta[c]).dot(S[c]))
return W, noises
def _covar_mstep_UUR(pgmm, X, responsibilities, weighted_X_sum,
norm, min_covar):
n_features = X.shape[1]
n_data = X.shape[0]
Minv = np.empty((pgmm.n_components, pgmm.n_pc, pgmm.n_pc))
S = np.empty((pgmm.n_components, n_features, n_features))
W = np.empty((pgmm.n_components, n_features, pgmm.n_pc))
noises = np.empty(pgmm.n_components)
for c in range(pgmm.n_components):
post = responsibilities[:, c]
mu = pgmm.means_[c]
diff = X - mu
Minv[c] = np.linalg.inv(pgmm.noise_[c] * np.eye(pgmm.n_pc) + np.dot(pgmm.principal_subspace_[c].T, pgmm.principal_subspace_[c]))
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
S[c] = np.dot(post * diff.T, diff) / (n_data*pgmm.weights_[c] + 10 * EPS)
W[c] = S[c].dot(pgmm.principal_subspace_[c])\
.dot(np.linalg.inv(pgmm.noise_[c] * np.eye(pgmm.n_pc)
+ Minv[c].dot(pgmm.principal_subspace_[c].T).dot(S[c]).dot(pgmm.principal_subspace_[c])))
noises[c] = np.trace(S[c] - np.dot(S[c], np.dot(pgmm.principal_subspace_[c], np.dot(Minv[c], W[c].T))))/n_features
return W, noises
def _covar_mstep_UUU(pgmm, X, responsibilities, weighted_X_sum,
norm, min_covar):
n_features = X.shape[1]
n_data = X.shape[0]
beta = np.empty((pgmm.n_components, pgmm.n_pc, n_features))
theta = np.empty((pgmm.n_components, pgmm.n_pc, pgmm.n_pc))
S = np.empty((pgmm.n_components, n_features, n_features))
W = np.empty((pgmm.n_components, n_features, pgmm.n_pc))
noises = np.empty((pgmm.n_components, n_features))
for c in range(pgmm.n_components):
post = responsibilities[:, c]
mu = pgmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
S[c] = np.dot(post * diff.T, diff) / (n_data*pgmm.weights_[c] + 10 * EPS)
beta[c] = pgmm.principal_subspace_[c].T.dot(
np.linalg.inv(np.diag(pgmm.noise_[c])
+ pgmm.principal_subspace_[c].dot(pgmm.principal_subspace_[c].T)))
theta[c] = np.eye(pgmm.n_pc) - beta[c].dot(pgmm.principal_subspace_[c]) + beta[c].dot(S[c]).dot(beta[c].T)
W[c] = S[c].dot(beta[c].T).dot(np.linalg.inv(theta[c]))
noises[c] = np.diag(S[c] - W[c].dot(beta[c]).dot(S[c]))
return W, noises
# This assumes only one missing variable, due do dimensionality dropping when slicing.
def _one_missing_one_model(x, missing_idxs, mean, covar):
n, d = x.shape
obs_idxs = range(0,d)[:missing_idxs] + range(0, d)[missing_idxs+1:]
# Separate the input as observed and missing.
x_o = x[:, obs_idxs]
x_m = x[:, [missing_idxs]]
# Separate the means as observed and missing.
mean_o = mean[obs_idxs]
mean_m = mean[missing_idxs]
# Separate the covariance.
covar_oo = covar[obs_idxs, :][:, obs_idxs]
covar_mo = covar[[missing_idxs], :][:, obs_idxs]
covar_om = covar[obs_idxs, :][:, [missing_idxs]]
covar_mm = covar[[missing_idxs], :][:, [missing_idxs]]
covar_oo_inv = np.linalg.inv(covar_oo)
x_m_given_o = mean_m + covar_mo.dot(covar_oo_inv).dot((x_o - mean_o).T).T
reconstructed_x = np.array(x, copy=True)
reconstructed_x[:, [missing_idxs]] = x_m_given_o
return reconstructed_x
_covar_mstep_funcs = {'RRR': _covar_mstep_RRR,
'RRU': _covar_mstep_RRU,
'RUR': _covar_mstep_RUR,
'RUU': _covar_mstep_RUU,
'URR': _covar_mstep_URR,
'URU': _covar_mstep_URU,
'UUR': _covar_mstep_UUR,
'UUU': _covar_mstep_UUU,
}
| bsd-3-clause |
dsisds/caffe_with_localconnect | python/detect.py | 8 | 5265 | #!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../examples/imagenet/imagenet_deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../examples/imagenet/caffe_reference_imagenet_model"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="selective_search",
choices=CROP_MODES,
help="How to generate windows for detection."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
default=255,
help="Multiply input features by this scale before input to net"
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--context_pad",
type=int,
default='16',
help="Amount of surrounding context to collect in input window."
)
args = parser.parse_args()
channel_swap = [int(s) for s in args.channel_swap.split(',')]
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model,
gpu=args.gpu, mean_file=args.mean_file,
input_scale=args.input_scale, channel_swap=channel_swap,
context_pad=args.context_pad)
if args.gpu:
print 'GPU mode'
# Load input.
t = time.time()
print('Loading input...')
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = (
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
)
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
| bsd-2-clause |
kensugino/jGEM | jgem/dataset/__init__.py | 1 | 4324 | """
Expression Dataset for analysis of matrix (RNASeq/microarray) data with annotations
"""
import pandas as PD
import numpy as N
from matplotlib import pylab as P
from collections import OrderedDict
from ast import literal_eval
# from ..plot.matrix import matshow_clustered
class ExpressionSet(object):
def __init__(self, eData, gData=None, sData=None):
"""
eData: expression data (gene x samples) header: MultiIndex (samplename, group)
fData: gene annotation (gene x gene annotations)
pData: sample annotation (sample x sample annotations)
"""
self.eData = eData
self.gData = gData
self.sData = sData
def read(self, eFile, gFile=None, sFile=None):
pass
def write(self, eFile, gFile=None, sFile=None):
self.eData.to_csv(eFile, tupleize_cols=False, sep="\t")
if gFile is not None:
self.gData.to_csv(gFile, tupleize_cols=False, sep="\t")
if sFile is not None:
self.sData.to_csv(sFile, tupleize_cols=False, sep="\t")
def find(self, field, pat):
pass
def read_bioinfo3_data(fname):
""" read bioinfo3.table.dataset type of data """
fobj = open(fname)
groups = OrderedDict()
cnt = 0
for line in fobj:
cnt += 1
if line[:2]=='#%':
if line.startswith('#%groups:'):
gname, members = line[len('#%groups:'):].split('=')
gname = gname.strip()
members = members.strip().split(',')
groups[gname] = members
datafields = line.strip().split('=')[1].strip().split(',')
elif line.startswith('#%fields'):
fields = line.strip().split('=')[1].strip().split(',')
elif not line.strip():
continue # empty line
else:
break
df = PD.read_table(fname, skiprows=cnt-1)
f2g = {}
for g,m in groups.items():
for f in m:
f2g[f] = g
df.columns = PD.MultiIndex.from_tuples([(x, f2g.get(x,'')) for x in df.columns], names=['samplename','group'])
e = ExpressionSet(df)
return e
def read_multiindex_data(fname, tupleize=True, index_names = ['samplename','group']):
""" read dataset table with MultiIndex in the header """
if not tupleize:
df = PD.read_table(fname, header=range(len(index_names)), index_col=[0], tupleize_cols=False)
e = ExpressionSet(df)
return e
df = PD.read_table(fname, index_col=0)
df.columns = PD.MultiIndex.from_tuples(df.columns.map(literal_eval).tolist(), names=index_names)
e = ExpressionSet(df)
return e
def read_grouped_table(fname, groupfn=lambda x: '_'.join(x.split('_')[:-1])):
""" Read dataset whose group is encoded in the colname. Column 0 is index. """
df = PD.read_table(fname)
f2g = {x:groupfn(x) for x in df.columns}
df.columns = PD.MultiIndex.from_tuples([(x, f2g[x]) for x in df.columns], names=['samplename','group'])
e = ExpressionSet(df)
return e
def concatenate(dic):
""" dic: dict of DataFrames
merge all using index and outer join
"""
keys = list(dic)
d = dic[keys[0]].merge(dic[keys[1]], left_index=True, right_index=True, how='outer', suffixes=('.'+keys[0],'.'+keys[1]))
for k in keys[2:]:
d = d.merge(dic[k], left_index=True, right_index=True, how='outer', suffixes=('','.'+k))
return d
def calc_mergesortkey(dic, pos_neg_flds):
conc = concatenate(dic)
selected = ~N.isnan(conc[pos_neg_flds])
pos = conc[pos_neg_flds]>0
neg = conc[pos_neg_flds]<=0
num_pos = pos.sum(axis=1)
num_neg = neg.sum(axis=1)
pos_neg_mix = -1*(num_neg==0) + 1*(num_pos==0) # pos(-1), mix(0), neg(1)
#num_hit = num_pos - num_neg
num_hit = num_pos + num_neg
n = len(pos_neg_flds)
#position = (N.arange(1,n+1)*pos + N.arange(-1,-n-1,-1)*neg).sum(axis=1)
position = (N.arange(1,n+1)*pos + N.arange(-n,0)*neg).sum(axis=1)
strength = (conc[pos_neg_flds]*pos).sum(axis=1) + (conc[pos_neg_flds]*neg).sum(axis=1)
#msk = PD.Series(list(zip(pos_neg_mix, num_hit, position, strength)), index=conc.index)
#msk.sort()
conc['mergesortkey'] = list(zip(pos_neg_mix, num_hit, position, strength))
conc.sort('mergesortkey', inplace=True)
return conc
| mit |
aditiiyer/CERR | CERR_core/ModelImplementationLibrary/SegmentationModels/ModelDependencies/CT_HeadAndNeck_SelfAttention/util/visualizer.py | 1 | 7700 | import numpy as np
import os
import ntpath
import time
from . import util
from . import html
import matplotlib
matplotlib.use('agg')
class Visualizer():
def __init__(self, opt):
# self.opt = opt
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.opt = opt
self.saved = False
if self.display_id > 0:
import visdom
self.vis = visdom.Visdom(port=opt.display_port)
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
# with open(self.log_name, "a") as log_file:
# now = time.strftime("%c")
# log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
self.saved = False
# |visuals|: dictionary of images to display or save
def display_current_results(self, visuals, epoch, save_result):
if self.display_id > 0: # show images in the browser
ncols = self.opt.display_single_pane_ncols
if ncols > 0:
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center}
table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h)
title = self.name
label_html = ''
label_html_row = ''
nrows = int(np.ceil(len(visuals.items()) / ncols))
images = []
idx = 0
for label, image_numpy in visuals.items():
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1]))*255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
# pane col = image row
self.vis.images(images, nrow=ncols, win=self.display_id + 1,
padding=2, opts=dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win=self.display_id + 2,
opts=dict(title=title + ' labels'))
else:
idx = 1
for label, image_numpy in visuals.items():
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
win=self.display_id + idx)
idx += 1
if self.use_html and (save_result or not self.saved): # save images to a html file
self.saved = True
for label, image_numpy in visuals.items():
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
#webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
#for n in range(epoch, 0, -1):
# webpage.add_header('epoch [%d]' % n)
# ims = []
# txts = []
# links = []
# for label, image_numpy in visuals.items():
# img_path = 'epoch%.3d_%s.png' % (n, label)
# ims.append(img_path)
# txts.append(label)
# links.append(img_path)
# webpage.add_images(ims, txts, links, width=self.win_size)
#webpage.save()
# errors: dictionary of error labels and values
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
def save_current_errors(self, epoch, counter_ratio, opt, errors,sv_name):
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
#print (self.plot_data['X'])
#print (self.plot_data['Y'])
#print (self.plot_data['legend'])
def get_cur_plot_error(self, epoch, counter_ratio, opt, errors,sv_name):
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
return self.plot_data
#plt.plot(self.plot_data['X'], y[:,0], label='training loss')
#plt.plot(x, y[:,1], label='training_rpn_class_loss')
#plt.plot(x, y[:,2], label='training_rpn_bbox_loss')
#self.vis.line(
# X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
# Y=np.array(self.plot_data['Y']),
# opts={
# 'title': self.name + ' loss over time',
# 'legend': self.plot_data['legend'],
# 'xlabel': 'epoch',
# 'ylabel': 'loss'},
# win=self.display_id)
# errors: same format as |errors| of plotCurrentErrors
def print_current_errors(self, epoch, i, errors, t):
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
for k, v in errors.items():
message += '%s: %.3f ' % (k, v)
print(message)
# with open(self.log_name, "a") as log_file:
# log_file.write('%s\n' % message)
# save image to the disk
def save_images(self, webpage, visuals, image_path):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size)
| lgpl-2.1 |
giorgiop/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 6 | 39956 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/__init__.py | 2 | 37081 | """
This is an object-oriented plotting library.
A procedural interface is provided by the companion pyplot module,
which may be imported directly, e.g::
from matplotlib.pyplot import *
To include numpy functions too, use::
from pylab import *
or using ipython::
ipython -pylab
For the most part, direct use of the object-oriented library is
encouraged when programming; pyplot is primarily for working
interactively. The
exceptions are the pyplot commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.pyplot.subplots`,
:func:`~matplotlib.backends.backend_qt4agg.show`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod:`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib was initially written by John D. Hunter (1968-2012) and is now
developed and maintained by a host of others.
Occasionally the internal documentation (python docstrings) will refer
to MATLAB®, a registered trademark of The MathWorks, Inc.
"""
from __future__ import print_function
__version__ = '1.2.1'
__version__numpy__ = '1.4' # minimum required numpy version
import os, re, shutil, subprocess, sys, warnings
import distutils.sysconfig
import distutils.version
try:
reload
except NameError:
# Python 3
from imp import reload
# Needed for toolkit setuptools support
if 0:
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass # must not have setuptools
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
class MatplotlibDeprecationWarning(UserWarning):
"""
A class for issuing deprecation warnings for Matplotlib users.
In light of the fact that Python builtin DeprecationWarnings are ignored
by default as of Python 2.7 (see link below), this class was put in to
allow for the signaling of deprecation, but via UserWarnings which are not
ignored by default.
http://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
"""
pass
"""
Manage user customizations through a rc file.
The default file location is given in the following order
- environment variable MATPLOTLIBRC
- HOME/.matplotlib/matplotlibrc if HOME is defined
- PATH/matplotlibrc where PATH is the return value of
get_data_path()
"""
import sys, os, tempfile
if sys.version_info[0] >= 3:
def ascii(s): return bytes(s, 'ascii')
def byte2str(b): return b.decode('ascii')
else:
ascii = str
def byte2str(b): return b
from matplotlib.rcsetup import (defaultParams,
validate_backend,
validate_toolbar)
major, minor1, minor2, s, tmp = sys.version_info
_python24 = (major == 2 and minor1 >= 4) or major >= 3
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
from distutils import version
expected_version = version.LooseVersion(__version__numpy__)
found_version = version.LooseVersion(numpy.__version__)
if not found_version >= expected_version:
raise ImportError(
'numpy %s or later is required; you have %s' % (
__version__numpy__, numpy.__version__))
del version
def is_string_like(obj):
if hasattr(obj, 'shape'): return 0
try: obj + ''
except (TypeError, ValueError): return 0
return 1
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try: p + '' # test is string like
except TypeError: return False
try:
t = tempfile.TemporaryFile(dir=p)
try:
t.write(ascii('1'))
finally:
t.close()
except OSError: return False
else: return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'):
continue
level_str = arg[10:]
# If it doesn't match one of ours, then don't even
# bother noting it, we are just a 3rd-party library
# to somebody else's script.
if level_str in levels:
_commandLineVerbose = level_str
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
warnings.warn('matplotlib: unrecognized --verbose-* string "%s".'
' Legal values are %s' % (level, self.levels))
else:
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = open(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print(s, file=self.fileo)
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[1]
v = byte2str(line.split()[-1])
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
try:
if sys.platform == 'win32':
command_args = ['gswin32c', '--version']
else:
command_args = ['gs', '--version']
s = subprocess.Popen(command_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
v = byte2str(s.stdout.read()[:-1])
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = byte2str(s.stdout.readlines()[0])
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if b'version' in line:
v = byte2str(line.split()[-1])
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_inkscape():
try:
s = subprocess.Popen(['inkscape','-V'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stdout:
if b'Inkscape' in line:
v = byte2str(line.split()[1])
break
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def checkdep_xmllint():
try:
s = subprocess.Popen(['xmllint','--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if b'version' in line:
v = byte2str(line.split()[-1])
break
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
if a>=b: return True
else: return False
else: return False
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise raise error.
:see: http://mail.python.org/pipermail/python-list/2005-February/263921.html
"""
path=''
try:
path=os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except: pass
if path:
return path
else:
raise RuntimeError('please define environment variable $HOME')
def _create_tmp_config_dir():
"""
If the config directory can not be created, create a temporary
directory.
"""
import getpass
import tempfile
tempdir = os.path.join(
tempfile.gettempdir(), 'matplotlib-%s' % getpass.getuser())
os.environ['MPLCONFIGDIR'] = tempdir
return tempdir
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_configdir():
"""
Return the string representing the configuration directory.
Default is HOME/.matplotlib. You can override this with the
MPLCONFIGDIR environment variable. If the default is not
writable, and MPLCONFIGDIR is not set, then
tempfile.gettempdir() is used to provide a directory in
which a matplotlib subdirectory is created as the configuration
directory.
"""
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
if not os.path.exists(configdir):
os.makedirs(configdir)
if not _is_writable_dir(configdir):
return _create_tmp_config_dir()
return configdir
h = get_home()
p = os.path.join(get_home(), '.matplotlib')
if os.path.exists(p):
if not _is_writable_dir(p):
return _create_tmp_config_dir()
else:
if not _is_writable_dir(h):
return _create_tmp_config_dir()
from matplotlib.cbook import mkdirs
mkdirs(p)
return p
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path):
return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path):
return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
exe_path = os.path.dirname(sys.executable)
path = os.path.join(exe_path, 'mpl-data')
if os.path.isdir(path):
return path
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(exe_path)[0], 'mpl-data')
if os.path.isdir(path):
return path
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path):
return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
get_example_data is deprecated -- use matplotlib.cbook.get_sample_data instead
"""
raise NotImplementedError('get_example_data is deprecated -- use matplotlib.cbook.get_sample_data instead')
def get_py2exe_datafiles():
datapath = get_data_path()
head, tail = os.path.split(datapath)
d = {}
for root, dirs, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return list(d.items())
def matplotlib_fname():
"""
Return the path to the rc file
Search order:
* current working dir
* environ var MATPLOTLIBRC
* HOME/.matplotlib/matplotlibrc
* MATPLOTLIBDATA/matplotlibrc
"""
oldname = os.path.join( os.getcwd(), '.matplotlibrc')
if os.path.exists(oldname):
print("""\
WARNING: Old rc filename ".matplotlibrc" found in working dir
and and renamed to new default rc file name "matplotlibrc"
(no leading"dot"). """, file=sys.stderr)
shutil.move('.matplotlibrc', 'matplotlibrc')
home = get_home()
oldname = os.path.join( home, '.matplotlibrc')
if os.path.exists(oldname):
configdir = get_configdir()
newname = os.path.join(configdir, 'matplotlibrc')
print("""\
WARNING: Old rc filename "%s" found and renamed to
new default rc file name "%s"."""%(oldname, newname), file=sys.stderr)
shutil.move(oldname, newname)
fname = os.path.join( os.getcwd(), 'matplotlibrc')
if os.path.exists(fname): return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
fname = os.path.join(get_configdir(), 'matplotlibrc')
if os.path.exists(fname): return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': 'font.style',
'text.fontangle': 'font.style',
'text.fontvariant': 'font.variant',
'text.fontweight': 'font.weight',
'text.fontsize': 'font.size',
'tick.size' : 'tick.major.size',
'svg.embed_char_paths' : 'svg.fonttype',
'savefig.extension' : 'savefig.format'
}
_deprecated_ignore_map = {
'legend.pad' : 'legend.borderpad',
'legend.labelsep' : 'legend.labelspacing',
'legend.handlelen' : 'legend.handlelength',
'legend.handletextsep' : 'legend.handletextpad',
'legend.axespad' : 'legend.borderaxespad',
}
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict([ (key, converter) for key, (default, converter) in \
defaultParams.iteritems() ])
msg_depr = "%s is deprecated and replaced with %s; please use the latter."
msg_depr_ignore = "%s is deprecated and ignored. Use %s"
def __setitem__(self, key, val):
try:
if key in _deprecated_map:
alt = _deprecated_map[key]
warnings.warn(self.msg_depr % (key, alt))
key = alt
elif key in _deprecated_ignore_map:
alt = _deprecated_ignore_map[key]
warnings.warn(self.msg_depr_ignore % (key, alt))
return
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.' % (key,))
def __getitem__(self, key):
if key in _deprecated_map:
alt = _deprecated_map[key]
warnings.warn(self.msg_depr % (key, alt))
key = alt
elif key in _deprecated_ignore_map:
alt = _deprecated_ignore_map[key]
warnings.warn(self.msg_depr_ignore % (key, alt))
key = alt
return dict.__getitem__(self, key)
def keys(self):
"""
Return sorted list of keys.
"""
k = list(dict.keys(self))
k.sort()
return k
def values(self):
"""
Return values in order of sorted keys.
"""
return [self[k] for k in self.iterkeys()]
def rc_params(fail_on_error=False):
'Return the default params updated from the values in the rc file'
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
warnings.warn(message)
return ret
return rc_params_from_file(fname, fail_on_error)
def rc_params_from_file(fname, fail_on_error=False):
"""Load and return params from fname."""
cnt = 0
rc_temp = {}
with open(fname) as fd:
for line in fd:
cnt += 1
strippedline = line.split('#',1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':',1)
if len(tup) !=2:
warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"'%\
(cnt, line, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d'%(fname,cnt))
rc_temp[key] = (val, line, cnt)
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception as msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
verbose.set_level(ret['verbose.level'])
verbose.set_fileo(ret['verbose.fileo'])
for key, (val, line, cnt) in rc_temp.iteritems():
if key in defaultParams:
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception as msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
elif key in _deprecated_ignore_map:
warnings.warn('%s is deprecated. Update your matplotlibrc to use %s instead.'% (key, _deprecated_ignore_map[key]))
else:
print("""
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname), file=sys.stderr)
if ret['datapath'] is None:
ret['datapath'] = get_data_path()
if not ret['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(ret['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return ret
# this is the instance used by the matplotlib classes
rcParams = rc_params()
if rcParams['examples.directory']:
# paths that are intended to be relative to matplotlib_fname()
# are allowed for the examples.directory parameter.
# However, we will need to fully qualify the path because
# Sphinx requires absolute paths.
if not os.path.isabs(rcParams['examples.directory']):
_basedir, _fname = os.path.split(matplotlib_fname())
# Sometimes matplotlib_fname() can return relative paths,
# Also, using realpath() guarentees that Sphinx will use
# the same path that matplotlib sees (in case of weird symlinks).
_basedir = os.path.realpath(_basedir)
_fullpath = os.path.join(_basedir, rcParams['examples.directory'])
rcParams['examples.directory'] = _fullpath
rcParamsOrig = rcParams.copy()
rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
if rcParams['axes.formatter.use_locale']:
import locale
locale.setlocale(locale.LC_ALL, '')
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, eg.
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, eg. (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, eg::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. Eg, you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k,v in kwargs.iteritems():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
try:
rcParams[key] = v
except KeyError:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
def rcdefaults():
"""
Restore the default rc params. These are not the params loaded by
the rc file, but mpl's internal params. See rc_file_defaults for
reloading the default params from the rc file
"""
rcParams.update(rcParamsDefault)
def rc_file(fname):
"""
Update rc params from file.
"""
rcParams.update(rc_params_from_file(fname))
class rc_context(object):
"""
Return a context manager for managing rc settings.
This allows one to do::
>>> with mpl.rc_context(fname='screen.rc'):
>>> plt.plot(x, a)
>>> with mpl.rc_context(fname='print.rc'):
>>> plt.plot(x, b)
>>> plt.plot(x, c)
The 'a' vs 'x' and 'c' vs 'x' plots would have settings from
'screen.rc', while the 'b' vs 'x' plot would have settings from
'print.rc'.
A dictionary can also be passed to the context manager::
>>> with mpl.rc_context(rc={'text.usetex': True}, fname='screen.rc'):
>>> plt.plot(x, a)
The 'rc' dictionary takes precedence over the settings loaded from
'fname'. Passing a dictionary only is also valid.
"""
def __init__(self, rc=None, fname=None):
self.rcdict = rc
self.fname = fname
def __enter__(self):
self._rcparams = rcParams.copy()
if self.fname:
rc_file(self.fname)
if self.rcdict:
rcParams.update(self.rcdict)
def __exit__(self, type, value, tb):
rcParams.update(self._rcparams)
def rc_file_defaults():
"""
Restore the default rc params from the original matplotlib rc that
was loaded
"""
rcParams.update(rcParamsOrig)
_use_error_msg = """ This call to matplotlib.use() has no effect
because the the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True, force=False):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. *warn* specifies whether a
warning should be issued if a backend has already been set up.
*force* is an **experimental** flag that tells matplotlib to
attempt to initialize a new backend by reloading the backend
module.
.. note::
This function must be called *before* importing pyplot for
the first time; or, if you are not using pyplot, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and call this after pylab or pyplot have been
loaded. In certain black magic use cases, e.g.
:func:`pyplot.switch_backend`, we are doing the reloading necessary to
make the backend switch work (in some cases, e.g. pure image
backends) so one can set warn=False to suppress the warnings.
To find out which backend is currently set, see
:func:`matplotlib.get_backend`.
"""
# Check if we've already set up a backend
if 'matplotlib.backends' in sys.modules:
if warn:
warnings.warn(_use_error_msg)
# Unless we've been told to force it, just return
if not force:
return
need_reload = True
else:
need_reload = False
# Set-up the proper backend name
if arg.startswith('module://'):
name = arg
else:
# Lowercase only non-module backend names (modules are case-sensitive)
arg = arg.lower()
name = validate_backend(arg)
rcParams['backend'] = name
# If needed we reload here because a lot of setup code is triggered on
# module import. See backends/__init__.py for more detail.
if need_reload:
reload(sys.modules['matplotlib.backends'])
def get_backend():
"Returns the current backend."
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, eg, after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
b = rcParams['interactive']
return b
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (MATLAB compatible
# flag)
for s in sys.argv[1:]:
if s.startswith('-d') and len(s) > 2: # look for a -d flag
try:
use(s[2:])
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, eg -debug
default_test_modules = [
'matplotlib.tests.test_agg',
'matplotlib.tests.test_artist',
'matplotlib.tests.test_axes',
'matplotlib.tests.test_backend_svg',
'matplotlib.tests.test_backend_pgf',
'matplotlib.tests.test_basic',
'matplotlib.tests.test_cbook',
'matplotlib.tests.test_colorbar',
'matplotlib.tests.test_colors',
'matplotlib.tests.test_dates',
'matplotlib.tests.test_delaunay',
'matplotlib.tests.test_figure',
'matplotlib.tests.test_image',
'matplotlib.tests.test_legend',
'matplotlib.tests.test_mathtext',
'matplotlib.tests.test_mlab',
'matplotlib.tests.test_patches',
'matplotlib.tests.test_pickle',
'matplotlib.tests.test_rcparams',
'matplotlib.tests.test_scale',
'matplotlib.tests.test_simplification',
'matplotlib.tests.test_spines',
'matplotlib.tests.test_subplots',
'matplotlib.tests.test_text',
'matplotlib.tests.test_ticker',
'matplotlib.tests.test_tightlayout',
'matplotlib.tests.test_triangulation',
'matplotlib.tests.test_transforms',
'matplotlib.tests.test_arrow_patches',
'matplotlib.tests.test_backend_qt4',
]
def test(verbosity=1):
"""run the matplotlib test suite"""
old_backend = rcParams['backend']
try:
use('agg')
import nose
import nose.plugins.builtin
from .testing.noseclasses import KnownFailure
from nose.plugins.manager import PluginManager
# store the old values before overriding
plugins = []
plugins.append( KnownFailure() )
plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] )
manager = PluginManager(plugins=plugins)
config = nose.config.Config(verbosity=verbosity, plugins=manager)
success = nose.run( defaultTest=default_test_modules,
config=config,
)
finally:
if old_backend.lower() != 'agg':
use(old_backend)
return success
test.__test__ = False # nose: this function is not a test
verbose.report('matplotlib version %s'%__version__)
verbose.report('verbose.level %s'%verbose.level)
verbose.report('interactive is %s'%rcParams['interactive'])
verbose.report('platform is %s'%sys.platform)
verbose.report('loaded modules: %s'%sys.modules.iterkeys(), 'debug')
| mit |
loli/sklearn-ensembletrees | setup.py | 5 | 5812 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
LONG_DESCRIPTION = open('README.rst').read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
###############################################################################
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
if len(set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
)).intersection(sys.argv)) > 0:
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
###############################################################################
class CleanCommand(Clean):
description = "Remove build directories, and compiled file in the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
###############################################################################
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass={'clean': CleanCommand},
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
alexeyum/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
sharkspeed/dororis | machinelearning/udacity-dlnd/lesson2/part2/lesson2_21.py | 1 | 1634 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.colors as colors
from sklearn.manifold import TSNE
from collections import Counter
from mini_project6 import SentimentNetwork, reviews, labels
mlp_full = SentimentNetwork(reviews[:1000], labels[:1000], min_count=0, polarity_cutoff=0, learning_rate=0.01)
mlp.train(reviews[:-1000], labels[:-1000])
def get_most_similar_words(focus='horrible'):
most_similar = Counter()
for word in mlp_full.word2index.keys():
most_similar[word] = np.dot(
mlp_full.weights_0_1[mlp_full.word2index[word]],
mlp_full.weights_0_1[mlp_full.word2index[focus]]
)
return most_similar.most_common()[:30]
words_to_visualize = list()
for word, ratio in pos_neg_ratios.most_common(500):
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]:
if(word in mlp_full.word2index.keys()):
words_to_visualize.append(word)
pos = 0
neg = 0
colors_list = list()
vectors_list = list()
for word in words_to_visualize:
if word in pos_neg_ratios.keys():
vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]])
if(pos_neg_ratios[word] > 0):
pos += 1
colors_list.append('#00ff00')
else:
neg += 1
colors_list.append('#000000')
tsne = TSNE(n_components=2, random_state=0)
words_top_ted_tsne = tsne.fit_transform(vectors_list)
p = figure(tools='pan, wheel_zoom, reset, save')
# print(get_most_similar_words('excellent'))
unfinished
| bsd-2-clause |
cdegroc/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 1 | 4330 | """
Several basic tests for hierarchical clustering procedures
Author : Vincent Michel, 2010
"""
import numpy as np
from scipy.cluster import hierarchy
from nose.tools import assert_true
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster.hierarchical import _hc_cut
from sklearn.feature_extraction.image import grid_to_graph
def test_structured_ward_tree():
"""
Check that we obtain the correct solution for structured ward tree.
"""
np.random.seed(0)
mask = np.ones([10, 10], dtype=np.bool)
X = np.random.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_components, n_leaves = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_unstructured_ward_tree():
"""
Check that we obtain the correct solution for unstructured ward tree.
"""
np.random.seed(0)
X = np.random.randn(50, 100)
children, n_nodes, n_leaves = ward_tree(X.T)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_height_ward_tree():
"""
Check that the height of ward tree is sorted.
"""
np.random.seed(0)
mask = np.ones([10, 10], dtype=np.bool)
X = np.random.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_nodes, n_leaves = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_ward_clustering():
"""
Check that we obtain the correct number of clusters with Ward clustering.
"""
np.random.seed(0)
mask = np.ones([10, 10], dtype=np.bool)
X = np.random.randn(100, 50)
connectivity = grid_to_graph(*mask.shape)
clustering = Ward(n_clusters=10, connectivity=connectivity)
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
np.random.seed(0)
mask = np.ones([10, 10], dtype=np.bool)
X = np.random.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
assert_true(np.size(np.unique(ward.labels_)) == 5)
Xred = ward.transform(X)
assert_true(Xred.shape[1] == 5)
Xfull = ward.inverse_transform(Xred)
assert_true(np.unique(Xfull[0]).size == 5)
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit ward with full connectivity (i.e. unstructured) vs scipy
"""
from scipy.sparse import lil_matrix
n, p, k = 10, 5, 3
connectivity = lil_matrix(np.ones((n, n)))
for i in range(5):
X = .1 * np.random.normal(size=(n, p))
X -= 4 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves = ward_tree(X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
def test_connectivity_popagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
from sklearn.neighbors import NearestNeighbors
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
nn = NearestNeighbors(n_neighbors=10, warn_on_equidistant=False).fit(X)
connectivity = nn.kneighbors_graph(X)
ward = Ward(n_clusters=4, connectivity=connectivity)
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
vibhorag/scikit-learn | sklearn/neighbors/regression.py | 100 | 11017 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
shy1/language-model | cupy-hypervoir.py | 1 | 25765 | ## Reservoir computing on the hypersphere
import numpy as np
import cupy as cp
import chargrams as cg
import re
import pickle
import time
from random import shuffle
#from sklearn.metrics import log_loss
from gensim.models.keyedvectors import KeyedVectors
import pydybm.arraymath as amath
import pydybm.arraymath.dycupy as dycupy
from pydybm.base.sgd32 import ADAM
def gramsintext(text, n=2):
grams = cg.chargrams(text, n)
glist = []
for ngram, cnt in grams.items():
glist.append(ngram)
gramindex = {gram:idx for idx, gram in enumerate(glist)}
return glist, gramindex
# create input weight matrix u and output value one-hot identity matrix(?) v
def init(M, N):
# nu = np.empty((N, M), dtype=np.float32)
ui = cp.random.rand(N * layerscales["L1"], M, dtype=np.float32)
# u1 = cp.random.rand(N,M, dtype=np.float32)
u2 = cp.random.rand(N * layerscales["L2"], M, dtype=np.float32)
# u3 = cp.random.rand(N,M, dtype=np.float32)
u4 = cp.random.rand(N * layerscales["L3"], M, dtype=np.float32)
v = cp.identity(M, dtype=np.float32)
# normalizing columns in NxM sized input matrix U as in formulas 6, 7
# wv = KeyedVectors.load_word2vec_format('/home/user01/dev/wang2vec/embeddings-i3e4-ssg-neg15-s512w9.txt', binary=False)
wv = KeyedVectors.load_word2vec_format('/home/user01/dev/wang2vec/embeddings-i3e4-ssg-neg15-s1024w6.txt', binary=False)
temp = wv.index2word
glist = np.array(temp[1:len(temp)])
# print(len(arr))
# print(wv['e_'])
# for i in range(0, M):
# temp = glist[i]
# nu[:, i] = wv.word_vec(temp, use_norm=False)
#print(nu.shape, nu)
# u = cp.asarray(nu)
for m in range(M):
ui[:, m] = ui[:, m] - ui[:, m].mean()
ui[:, m] = ui[:, m] / cp.linalg.norm(ui[:, m])
# for m in range(M):
# u1[:, m] = u1[:, m] - u1[:, m].mean()
# u1[:, m] = u1[:, m] / cp.linalg.norm(u1[:, m])
for m in range(M):
u2[:, m] = u2[:, m] - u2[:, m].mean()
u2[:, m] = u2[:, m] / cp.linalg.norm(u2[:, m])
# for m in range(M):
# u3[:, m] = u3[:, m] - u3[:, m].mean()
# u3[:, m] = u3[:, m] / cp.linalg.norm(u3[:, m])
for m in range(M):
u4[:, m] = u4[:, m] - u4[:, m].mean()
u4[:, m] = u4[:, m] / cp.linalg.norm(u4[:, m])
#print(u.shape, u)
glist = [re.sub(r'_', ' ', j) for j in glist]
#print(glist)
return ui, u2, u4, v, glist
def grecall(T, N, w, u, a, ss):
x, i = cp.zeros(N, dtype=np.float32), ss
ssa = []
ssa.append(ss)
for t in range(T - 1):
x = (1.0 - a) * x + a * (u[:, i] + cp.roll(x, 1))
x = x / cp.linalg.norm(x)
y = cp.exp(cp.dot(w, x))
i = cp.argmax(y / cp.sum(y))
ssa.append(i)
return ssa
def generate(T, N, u, variables, a, s0, temp=0.5):
x, i = cp.zeros(N, dtype=np.float32), s0
ssa = []
ssa.append(s0)
for t in range(T - 1):
x = (1.0 - a) * x + a * (u[:, i] + cp.roll(x, 1))
x = x / cp.linalg.norm(x)
# probability distribution computed same as in online training
# except that output of dot(w, x) is divided by the temperature
output = cp.dot(variables["W1"], x) / temp
output = output - np.max(output)
probs = cp.exp(output)
probs = probs / cp.sum(probs)
i = cp.argmax(cp.random.multinomial(1, probs))
ssa.append(i)
sstext = ""
for ssi in range(0, len(ssa), 2):
#print(ssi, type(ssi))
sstext += glist[int(ssa[ssi])]
print(sstext)
def error(s, ss):
err = 0.
for t in range(len(s)):
err = err + (s[t] != ss[t])
#print(err)
totalerr = err*100.0 / len(s)
return totalerr
def online_grams(u, v, w, a, s):
T, (N, M) = len(s), u.shape
err = 100
tt = 0
totalp = time.perf_counter()
while err > 0 and tt < T:
x = cp.zeros(N, dtype=np.float32)
softerr = 0
for t in range(T - 1):
x = (1.0 - a) * x + a * (u[:, s[t]] + cp.roll(x, 1))
x = x / cp.linalg.norm(x)
p = cp.exp(cp.dot(w, x))
p = p / cp.sum(p)
smax_idx = cp.argmax(p)
smax_prob = p[smax_idx]
softerr += 1 - smax_prob
w = w + cp.outer(v[:, s[t+1]] - p, x)
avgerr = softerr / (T - 1)
ssa = grecall(T, N, w, u, a, s[0])
ssg = generate(100, N, u, a, s[0], s[1])
err = error(s, ssa)
tt = tt + 1
if tt % 3 == 0:
print(tt, "err:", err, "%", "softavg:", avgerr, "alpha:", a)
sstext = ""
for ssi in range(0, len(ssg), 2):
sstext += glist[int(ssg[ssi])]
print(sstext)
sstext = ""
endtotalp = time.perf_counter() - totalp
for ssi in range(0, len(ssa), 2):
sstext += glist[int(ssa[ssi])]
print(tt, "err=", err, "%\n", sstext, "\n", endtotalp)
sstext = ""
for ssi in range(0, len(ssg), 2):
sstext += glist[int(ssg[ssi])]
print(sstext)
return ssa, w
def offline_grams(u, v, c, a, s):
sstext = ""
T, (N, M), eta = len(s), u.shape, 1e-7
X, S, x = cp.zeros((N, T-1), dtype=np.float32), cp.zeros((M, T-1), dtype=np.float32), cp.zeros(N, dtype=np.float32)
for t in range(T - 1):
x = (1.0 - a) * x + a * (u[:, s[t]] + cp.roll(x, 1))
x = x / cp.linalg.norm(x)
X[:, t], S[:, t] = x, v[:, s[t+1]]
XX = cp.dot(X, X.T)
for n in range(N):
XX[n, n] = XX[n, n] + eta
w = cp.dot(cp.dot(S, X.T), cp.linalg.inv(XX))
ssa = grecall(T, N, w, u, c, alpha, s[0])
sstext = ""
for ssi in range(0, len(ssa), 2):
sstext += glist[int(ssa[ssi])]
print("err=", error(s, ssa), "%\n", sstext, "\n")
return ssa, w
def train_kcpa(ui, u2, u4, v, variables, leaks, bs, s, cpstates):
T = len(s)
N = 1024
M = 1024
x1 = cp.zeros(N * layerscales["L1"], dtype=np.float32)
x3 = cp.zeros(N * layerscales["L2"], dtype=np.float32)
x5 = cp.zeros(N * layerscales["L3"], dtype=np.float32)
gradient = dict()
softerr1 = 0
err1 = 0
softerr3 = 0
err3 = 0
softerr5 = 0
err5 = 0
softerrm = 0
errm = 0
skipfirst = 3
tm1 = (T - 1 - skipfirst)
t = 0
for k in range(skipfirst):
current = s[t]
# skipt.append(t)
x1 = (1.0 - leaks[0]) * x1 + leaks[0] * (ui[:, current] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
wx = cp.dot(variables["W1"], x1)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p1 = p / cp.sum(p)
pu2 = cp.dot(p1, u2.T)
x3 = (1.0 - leaks[1]) * x3 + leaks[1] * (pu2 + cp.roll(x3, 1))
x3 = x3 / cp.linalg.norm(x3)
wx = cp.dot(variables["W3"], x3)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p3 = p / cp.sum(p)
pu4 = cp.dot(p3, u4.T)
x5 = (1.0 - leaks[2]) * x5 + leaks[2] * (pu4 + cp.roll(x5, 1))
x5 = x5 / cp.linalg.norm(x5)
t += 1
for b1 in range(tm1):
current = s[t]
x1 = (1.0 - leaks[0]) * x1 + leaks[0] * (ui[:, current] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
wx = cp.dot(variables["W1"], x1)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p1 = p / cp.sum(p)
pu2 = cp.dot(p1, u2.T)
x3 = (1.0 - leaks[1]) * x3 + leaks[1] * (pu2 + cp.roll(x3, 1))
x3 = x3 / cp.linalg.norm(x3)
wx = cp.dot(variables["W3"], x3)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p3 = p / cp.sum(p)
pu4 = cp.dot(p3, u4.T)
x5 = (1.0 - leaks[2]) * x5 + leaks[2] * (pu4 + cp.roll(x5, 1))
x5 = x5 / cp.linalg.norm(x5)
cpstates = cp.concatenate((cpstates, x5.reshape((1, N * layerscales["L3"]))))
# wx = cp.dot(variables["W5"], x5)
# wx = wx - cp.max(wx)
# p = cp.exp(wx)
# p5 = p / cp.sum(p)
# pred5 = cp.argmax(p5)
target = s[t+1]
# target_prob1 = p1[target]
# target_prob3 = p3[target]
# target_prob5 = p5[target]
# err5 = err5 + (pred5 != target)
# # softerr1 += 1 - target_prob1
# # softerr3 += 1 - target_prob3
# softerr5 += 1 - target_prob5
gradient["W1"] = cp.outer(v[:, target] - p1, x1)
gradient["W3"] = cp.outer(v[:, target] - p3, x3)
# gradient["W5"] = cp.outer(v[:, target] - p5, x5)
SGD.update_state(gradient)
delta = SGD.get_delta()
SGD.update_with_L1_regularization(variables, delta, L1)
t += 1
# softerrors = dict()
# prederrors = dict()
# # softerrors["lay1"] = softerr1 / (tm1)
# # softerrors["lay3"] = softerr3 / (tm1)
# softerrors["lay5"] = softerr5 / (tm1)
# prederrors["lay5"] = err5 * 100.0 / (tm1)
return variables, cpstates
def train(ui, u2, u4, v, variables, leaks, bs, testflag, s):
T = len(s)
N = 1024
M = 1024
x1 = cp.zeros(N * layerscales["L1"], dtype=np.float32)
x3 = cp.zeros(N * layerscales["L2"], dtype=np.float32)
x5 = cp.zeros(N * layerscales["L3"], dtype=np.float32)
gradient = dict()
softerr1 = 0
err1 = 0
softerr3 = 0
err3 = 0
softerr5 = 0
err5 = 0
softerrm = 0
errm = 0
skipfirst = 3
t = 5
tm1 = (T - 1 - t - skipfirst)
if bs > 1:
# floored quotient (integer without remainder)
fullbatches = tm1 // bs
lastlen = tm1 - (fullbatches * bs)
# print(skipfirst, T, tm1, fullbatches, lastlen)
M1, N1 = variables["W1"].shape
M3, N3 = variables["W3"].shape
M5, N5 = variables["W5"].shape
# print(M1, N1)
batchgrads1 = cp.empty((bs, M1, N1), dtype=np.float32)
batchgrads3 = cp.empty((bs, M3, N3), dtype=np.float32)
batchgrads5 = cp.empty((bs, M5, N5), dtype=np.float32)
if lastlen > 0:
lastgrads1 = cp.empty((lastlen, M1, N1), dtype=np.float32)
lastgrads3 = cp.empty((lastlen, M3, N3), dtype=np.float32)
lastgrads5 = cp.empty((lastlen, M5, N5), dtype=np.float32)
# skipt = []
# batcht = []
# lastt = []
# don't update readout weights for the first 3 timesteps of a sequence to
# minimize the effects of the initial transient hidden states
for k in range(skipfirst):
step1 = s[t-5]
step2 = s[t-3]
step3 = s[t]
# skipt.append(t)
# x1 = (1.0 - leaks[0]) * x1 + leaks[0] * (ui[:, current] + cp.roll(x1, 1))
x1 = (1.0 - leaks[0]) * x1 + leaks[0] * (ui[:, step1] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
# wx = cp.dot(variables["W1"], x1)
# wx = wx - cp.max(wx)
# p = cp.exp(wx)
# p1 = p / cp.sum(p)
# pu2 = cp.dot(p1, u2.T)
# x3 = (1.0 - leaks[1]) * x3 + leaks[1] * (pu2 + cp.roll(x3, 1))
x3 = (1.0 - leaks[1]) * x3 + leaks[1] * (u2[:, step2] + cp.roll(x3, 1))
x3 = x3 / cp.linalg.norm(x3)
# wx = cp.dot(variables["W3"], x3)
# wx = wx - cp.max(wx)
# p = cp.exp(wx)
# p3 = p / cp.sum(p)
# pu4 = cp.dot(p3, u4.T)
# x5 = (1.0 - leaks[2]) * x5 + leaks[2] * (pu4 + cp.roll(x5, 1))
x5 = (1.0 - leaks[2]) * x5 + leaks[2] * (u4[:, step3] + cp.roll(x5, 1))
x5 = x5 / cp.linalg.norm(x5)
t += 1
if bs == 1:
for b1 in range(tm1):
step1 = s[t-5]
step2 = s[t-3]
step3 = s[t]
# x1 = (1.0 - leaks[0]) * x1 + leaks[0] * (ui[:, current] + cp.roll(x1, 1))
x1 = (1.0 - leaks[0]) * x1 + leaks[0] * (ui[:, step1] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
wx = cp.dot(variables["W1"], x1)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p1 = p / cp.sum(p)
pred1 = cp.argmax(p1)
# pu2 = cp.dot(p1, u2.T)
# x3 = (1.0 - leaks[1]) * x3 + leaks[1] * (pu2 + cp.roll(x3, 1))
x3 = (1.0 - leaks[1]) * x3 + leaks[1] * (u2[:, step2] + cp.roll(x3, 1))
x3 = x3 / cp.linalg.norm(x3)
wx = cp.dot(variables["W3"], x3)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p3 = p / cp.sum(p)
pred3 = cp.argmax(p3)
# pu4 = cp.dot(p3, u4.T)
# x5 = (1.0 - leaks[2]) * x5 + leaks[2] * (pu4 + cp.roll(x5, 1))
x5 = (1.0 - leaks[2]) * x5 + leaks[2] * (u4[:, step3] + cp.roll(x5, 1))
x5 = x5 / cp.linalg.norm(x5)
# print(variables["W5"].shape, x5.shape)
wx = cp.dot(variables["W5"], x5)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p5 = p / cp.sum(p)
pred5 = cp.argmax(p5)
pstack = cp.hstack((p1, p3, p5))
# print(variables["Wm"].shape, pstack.shape)
wx = cp.dot(variables["Wm"], pstack)
# wx = cp.dot(pstack, variables["Wm"])
# # print(wx.shape)
wx = wx - cp.max(wx)
p = cp.exp(wx)
pm = p / cp.sum(p)
meanpred = cp.argmax(pm)
target = s[t+1]
target_prob1 = p1[target]
target_prob3 = p3[target]
target_prob5 = p5[target]
target_probm = pm[target]
# err5 = err5 + (pred5 != target)
errm = errm + (meanpred != target)
softerr1 += 1 - target_prob1
softerr3 += 1 - target_prob3
softerr5 += 1 - target_prob5
softerrm += 1 - target_probm
if testflag == 0:
# gradient["W1"] = cp.outer(v[:, target] - p1, x1)
# gradient["W3"] = cp.outer(v[:, target] - p3, x3)
gradient["W1"] = cp.outer(v[:, target] - p1, x1)
gradient["W3"] = cp.outer(v[:, target] - p3, x3)
gradient["W5"] = cp.outer(v[:, target] - p5, x5)
gradient["Wm"] = cp.outer(v[:, target] - pm, pstack)
SGD.update_state(gradient)
delta = SGD.get_delta()
SGD.update_with_L1_regularization(variables, delta, L1)
t += 1
else:
for b in range(fullbatches):
for i in range(bs):
current = s[t]
x1 = (1.0 - leaks[0]) * x1 + leaks[0] * (ui[:, current] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
wx = cp.dot(variables["W1"], x1)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p1 = p / cp.sum(p)
pu2 = cp.dot(p1, u2.T)
x3 = (1.0 - leaks[1]) * x3 + leaks[1] * (pu2 + cp.roll(x3, 1))
x3 = x3 / cp.linalg.norm(x3)
wx = cp.dot(variables["W3"], x3)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p3 = p / cp.sum(p)
pu4 = cp.dot(p3, u4.T)
x5 = (1.0 - leaks[2]) * x5 + leaks[2] * (pu4 + cp.roll(x5, 1))
x5 = x5 / cp.linalg.norm(x5)
wx = cp.dot(variables["W5"], x5)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p5 = p / cp.sum(p)
pred5 = cp.argmax(p5)
target = s[t+1]
# target_prob1 = p1[target]
# target_prob3 = p3[target]
target_prob5 = p5[target]
err5 = err5 + (pred5 != target)
# softerr1 += 1 - target_prob1
# softerr3 += 1 - target_prob3
softerr5 += 1 - target_prob5
batchgrads1[i] = cp.outer(v[:, target] - p1, x1)
batchgrads3[i] = cp.outer(v[:, target] - p3, x3)
batchgrads5[i] = cp.outer(v[:, target] - p5, x5)
t += 1
gradient["W1"] = batchgrads1.mean(0)
gradient["W3"] = batchgrads3.mean(0)
gradient["W5"] = batchgrads5.mean(0)
SGD.update_state(gradient)
delta = SGD.get_delta()
SGD.update_with_L1_regularization(variables, delta, L1)
# SGD.apply_L2_regularization(gradient, variables, L2)
if lastlen > 0:
for j in range(lastlen):
current = s[t]
# lastt.append(t)
x1 = (1.0 - leaks[0]) * x1 + leaks[0] * (ui[:, current] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
wx = cp.dot(variables["W1"], x1)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p1 = p / cp.sum(p)
pu2 = cp.dot(p1, u2.T)
x3 = (1.0 - leaks[1]) * x3 + leaks[1] * (pu2 + cp.roll(x3, 1))
x3 = x3 / cp.linalg.norm(x3)
wx = cp.dot(variables["W3"], x3)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p3 = p / cp.sum(p)
pu4 = cp.dot(p3, u4.T)
x5 = (1.0 - leaks[2]) * x5 + leaks[2] * (pu4 + cp.roll(x5, 1))
x5 = x5 / cp.linalg.norm(x5)
wx = cp.dot(variables["W5"], x5)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p5 = p / cp.sum(p)
pred5 = cp.argmax(p5)
target = s[t+1]
target_prob5 = p5[target]
err5 = err5 + (pred5 != target)
# softerr1 += 1 - target_prob1
# softerr3 += 1 - target_prob3
softerr5 += 1 - target_prob5
lastgrads1[j] = cp.outer(v[:, target] - p1, x1)
lastgrads3[j] = cp.outer(v[:, target] - p3, x3)
lastgrads5[j] = cp.outer(v[:, target] - p5, x5)
t += 1
gradient["W1"] = lastgrads1.mean(0)
gradient["W3"] = lastgrads3.mean(0)
gradient["W5"] = lastgrads5.mean(0)
SGD.update_state(gradient)
delta = SGD.get_delta()
SGD.update_with_L1_regularization(variables, delta, L1)
# print("skip:", skipt)
# print("batch:", batcht)
# print("last:", lastt)
softerrors = dict()
prederrors = dict()
softerrors["lay1"] = softerr1 / (tm1)
softerrors["lay3"] = softerr3 / (tm1)
softerrors["lay5"] = softerr5 / (tm1)
softerrors["laym"] = softerrm / (tm1)
# prederrors["lay5"] = err5 * 100.0 / (tm1)
prederrors["laym"] = errm * 100.0 / (tm1)
return prederrors, softerrors, variables
amath.setup(dycupy)
chunkfile = '/home/user01/dev/language-model/chunks256.p'
outweights = '/home/user01/dev/language-model/outweights256.p'
inweights = '/home/user01/dev/language-model/inweights256.p'
train1280 = '/home/user01/dev/language-model/train1280.p'
test128 = '/home/user01/dev/language-model/test128.p'
#pickle.dump(allchunks, open(outfile, "wb"))
chunklist = pickle.load(open(chunkfile, "rb"))
layerscales = dict()
variables = dict()
L2 = dict()
L1 = dict()
trainchunks = []
testchunks = []
cp.random.seed(481639)
n=2
stride = 1
leaks = [0.6905, 0.5655, 0.4405]
# leaks = [0.4405, 0.5655, 0.6905]
# leaks = [0.7475, 0.4533, 0.275]
# leaks = [0.275, 0.4533, 0.7475]
# leaks = [0.5655, 0.4533, 0.4405]
N = 1024
M = 1024
layerscales["L1"] = 3
layerscales["L2"] = 2
layerscales["L3"] = 3
batchsize = 1
trainsize = 16
testsize = 8
lrate = 0.002
SGD = ADAM(alpha=lrate)
variables["W1"] = cp.zeros((M, N * layerscales["L1"]), dtype=np.float32)
variables["W3"] = cp.zeros((M, N * layerscales["L2"]), dtype=np.float32)
variables["Wm"] = cp.zeros((1024, M*3), dtype=np.float32)
# variables["Wm"] = cp.array([0.0, 0.0, 0.0], dtype=np.float32)
SGD = SGD.set_shape(variables)
for key in variables:
L1[key] = 0
L2[key] = 0
ui, u2, u4, v, glist = init(M, N)
gramindex = {gram:idx for idx, gram in enumerate(glist)}
print("L1: {} L2: {} L3: {}".format(layerscales["L1"] * N, layerscales["L2"] * N, layerscales["L3"] * N))
print("Learning rate:", lrate, "Batch size:", batchsize)
for j in range(trainsize):
chunk = chunklist[j]
sgi = []
for idx in range(0, len(chunk) - (n - 1), stride):
try:
sgi.append(gramindex[chunk[idx:idx + n]])
except:
print(chunk[idx:idx + n])
intchunk = cp.asarray(sgi, dtype=np.int16)
trainchunks.append(intchunk)
# pickle.dump(trainchunks, open(train1280, "wb"))
for k in range(trainsize, trainsize + testsize):
chunk = chunklist[k]
sgi = []
for idx in range(0, len(chunk) - (n - 1), stride):
try:
sgi.append(gramindex[chunk[idx:idx + n]])
except:
print(chunk[idx:idx + n])
intchunk = cp.asarray(sgi, dtype=np.int16)
testchunks.append(intchunk)
# pickle.dump(testchunks, open(test128, "wb"))
print("train size:", len(trainchunks), "test size:", len(testchunks))
print(leaks[0], leaks[1], leaks[2])
totalstart = time.perf_counter()
# # get kernel PCA states
# cpstates = cp.empty((0, N * layerscales["L3"]), dtype=np.float32)
# npstates = np.empty((0, N * layerscales["L3"]), dtype=np.float32)
# startp = time.perf_counter()
# totalerr5 = 0
# totalstates = 0
#
# for chunk in trainchunks:
# variables, cpstates = train_kcpa(ui, u2, u4, v, variables, leaks, batchsize, chunk, cpstates)
# npstates = np.concatenate((npstates, cp.asnumpy(cpstates)))
# cpstates = cp.empty((0, N * layerscales["L3"]), dtype=np.float32)
# # totalerr5 += softerrs["lay5"]
# totalstates += len(chunk) - 4
#
# print("total states:", totalstates, "npstates:", npstates.shape)
# elapsedp = time.perf_counter() - startp
# totalelapsed = time.perf_counter() - totalstart
# tm, ts = divmod(totalelapsed, 60)
# # totalerr5 = totalerr5 / trainsize
# print("\n", elapsedp, "-- {0:.0f}m {1:.0f}s".format(tm, ts))
# # print("Errors:", prederrs["lay5"])
# # print("Losses:", totalerr5)
# shuffle(trainchunks)
variables["W5"] = cp.zeros((M, N * layerscales["L3"]), dtype=np.float32)
SGD = SGD.set_shape(variables)
for key in variables:
L1[key] = 0
L2[key] = 0
testflag = 0
for i in range(80):
istart = time.perf_counter()
totalerr1 = 0
totalerr3 = 0
totalerr5 = 0
totalerrm = 0
count = 0
startp = time.perf_counter()
for chunk in trainchunks:
count += 1
prederrs, softerrs, variables = train(ui, u2, u4, v, variables, leaks, batchsize, testflag, chunk)
totalerr1 += softerrs["lay1"]
totalerr3 += softerrs["lay3"]
totalerr5 += softerrs["lay5"]
totalerrm += softerrs["laym"]
if count % 16 == 0:
elapsedp = time.perf_counter() - startp
totalelapsed = time.perf_counter() - totalstart
tm, ts = divmod(totalelapsed, 60)
totalerr1 = totalerr1 / 16
totalerr3 = totalerr3 / 16
totalerr5 = totalerr5 / 16
totalerrm = totalerrm / 16
print("\n", i, count, "-- {0:.0f}m {1:.0f}s".format(tm, ts))
print("Errors:", prederrs["laym"])
# print("Losses:", softerrs["lay5"])
print("Losses:", totalerr1, totalerr3, totalerr5, totalerrm)
startp = time.perf_counter()
totalerr1 = 0
totalerr3 = 0
totalerr5 = 0
totalerrm = 0
# elapsedp = time.perf_counter() - startp
# ielapsed = time.perf_counter() - istart
# tm, ts = divmod(ielapsed, 60)
#
# print("\n", i, ielapsed, "-- {0:.0f}m {1:.0f}s".format(tm, ts))
# print("Errors:", prederrs["lay5"])
# print("Losses:", totalerr5)
# generate(128, N, u, variables, alpha, 4)
shuffle(trainchunks)
totalerrm = 0
print("Testing...")
testflag = 1
startp = time.perf_counter()
for chunk in testchunks:
prederrs, softerrs, variables = train(ui, u2, u4, v, variables, leaks, batchsize, testflag, chunk)
totalerrm += softerrs["laym"]
print(totalerrm)
elapsedp = time.perf_counter() - startp
totalelapsed = time.perf_counter() - totalstart
tm, ts = divmod(totalelapsed, 60)
totalerrm = totalerrm / testsize
print("\n", i, elapsedp, "-- {0:.0f}m {1:.0f}s".format(tm, ts))
print("Test Errors:", prederrs["laym"])
print("Test Losses:", totalerrm)
shuffle(testchunks)
testflag = 0
pickle.dump(variables, open(outweights, "wb"))
pickle.dump(ui, open(inweights, "wb"))
# if tt % 10 == 0:
## a = a * 0.98
#
# print(tt, "err:", err, "%", "softavg:", avgerr, "alpha:", a)
# sstext = ""
# for ssi in range(0, len(ssg), 2):
# #print(ssi, type(ssi))
# sstext += glist[int(ssg[ssi])]
# print(sstext)
#N = int(M * 2.71828)
#div = ' mt:nt '
#nt = N / T
#nmt = [str(mt), str(nt)]
#print(div.join(nmt))
#print(T, N, M, alpha)
#print(T, N, M, alpha)
#ss, w = offline_grams(u, v, glist, alpha, sgi)
# totalo = time.perf_counter()
# endtotalo = time.perf_counter() - totalo
# print(endtotalo)
#cu_glist = cp.asarray(glist, dtype=np.float32)
#cu_sgi = cp.asarray(sgi, dtype=np.int16)
# tnt = int(T * 2)
# if tnt > M:
# N = tnt
# else:
# N = M
# T = length of sequence
# M = number of distinct input states
#T, M = len(s), len(allchars)
# mt = ratio of possible input states to timesteps to memorize
# nt = ratio of reservoir units to timesteps to memorize
# N = number of hidden states/reservoir size
# alpha = integration rate indicating "leakiness", 1 = no leaking/orig model
# alpha is determined by ratio nt (hidden states) minus ratio mt (input states)
# u = input weight matrix
# v = hidden state orthogonal identity matrix
#with open('test02.txt', 'r') as ofile:
# s = ofile.read()
#with open('counts1024b.txt', 'r') as countfile:
# counts = countfile.readlines()
#for line in counts:
# glist.append(line[:2])
#gramindex = {gram:idx for idx, gram in enumerate(glist)}
#print(len(gramindex), type(gramindex))
#unclean = False
#number of characters in each n-gram sequence that forms a single unit/timestep
| mit |
facemelters/data-science | Atlas/facebook_update_script.py | 1 | 7431 | __author__ = 'Mike'
import csv
import requests
import pprint
import gspread
import sys
from time import sleep
import socket
import json
import pandas as pd
import numpy
from oauth2client.client import SignedJwtAssertionCredentials
#Login to Google Drive
json_key = json.load(open('Update Script-b4827ff38372.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
gc = gspread.authorize(credentials)
#Grab FB API Key
json_fb_key = json.load(open('fb_api_key.json'))
apikey = json_fb_key['credentials']['apikey'].encode('ascii','ignore')
#Create a function that reads a Spreadsheet from Gdrive and stores the relevant column as a variable.
def getFBdataGroup(apikey,postid):
tries = 5
while tries >= 0:
try:
endpoint = 'https://graph.facebook.com/v2.3/'+postid+'?access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def readSheetCol(sheet,tab,col):
sheet = gc.open(sheet)
input_sheet = sheet.worksheet(tab)
#store the postids in a list
values_list = input_sheet.col_values(col)
#make sure we start processing values on the second row of the sheet
return values_list
def getFBdataReach(apikey,postid):
tries = 5
while tries >= 0:
try:
endpoint = 'https://graph.facebook.com/v2.3/'+postid+'/insights/post_impressions_unique?access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def getFBdataViralReach(apikey,postid):
tries = 5
while tries >= 5:
try:
endpoint = 'https://graph.facebook.com/v2.3/'+postid+'/insights/post_impressions_viral_unique?access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def getFBdataLinkClicks(apikey,postid):
tries = 5
while tries >= 5:
try:
endpoint = 'https://graph.facebook.com/v2.3/'+postid+'/insights/post_consumptions_by_type?access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def getFBdataComments(apikey,postid):
tries = 5
while tries >= 5:
try:
endpoint = 'https://graph.facebook.com/v2.3/'+postid+'/comments?summary=true&access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def getFBdataLikes(apikey,postid):
tries = 5
while tries >= 5:
try:
endpoint = 'https://graph.facebook.com/v2.3/'+postid+'/likes?summary=true&access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def writeSheetAnalysis(sheet, tab):
sheet = gc.open(sheet)
analysis_sheet = sheet.worksheet(tab)
current_row = int(raw_input("Start row minus 1: "))
values_list = readSheetCol("Atlas Facebook Tracker","Analysis",14)
col_date = readSheetCol("Atlas Facebook Tracker","Analysis",1)
col_url = readSheetCol("Atlas Facebook Tracker","Analysis",3)
d = {'PostID' : pd.Series(values_list[1:], index=[item for item in range(len(values_list)-1)]),
"Date" : pd.Series(col_date[1:], index=[item for item in range(len(col_date)-1)]),
"URL" : pd.Series(col_url[1:], index=[item for item in range(len(col_url)-1)])}
df = pd.DataFrame(d)
print df
attempts = 5
for i in range(10):
try:
for item in range(len(values_list)):
if current_row > len(col_date):
break
else:
if current_row > len(col_url):
value = values_list[current_row-1]
#Hit the Facebook API for various bits of data
fb_data_group = getFBdataGroup(apikey,value)
fb_data_comments = getFBdataComments(apikey,value)
fb_data_likes = getFBdataLikes(apikey,value)
fb_data_clicks = getFBdataLinkClicks(apikey,value)
#Write that data to Google Spreadsheets
analysis_sheet.update_cell(current_row,3,fb_data_group['link'])
try:
analysis_sheet.update_cell(current_row,5,fb_data_group['name'])
analysis_sheet.update_cell(current_row,6,fb_data_group['description'])
except KeyError:
analysis_sheet.update_cell(current_row,5,"none")
analysis_sheet.update_cell(current_row,6,"none")
try:
analysis_sheet.update_cell(current_row,9,fb_data_group['shares']['count'])
except KeyError:
analysis_sheet.update_cell(current_row,9,"0")
except TypeError:
continue
except IndexError:
continue
analysis_sheet.update_cell(current_row,10,fb_data_comments['summary']['total_count'])
analysis_sheet.update_cell(current_row,11,fb_data_likes['summary']['total_count'])
analysis_sheet.update_cell(current_row,12,fb_data_clicks['data'][0]['values'][0]['value']['link clicks'])
current_row += 1
print current_row
sleep(3)
else:
current_row += 1
except socket.error:
sleep(3)
print "socket error. Attempting Again"
continue
except gspread.httpsession.HTTPError:
sleep(3)
print "HTTPError. Attempting Again."
continue
except KeyError:
sleep(3)
if i < 3:
print "Key error. Attempting again."
continue
else:
current_row += 1
writeSheetAnalysis("Atlas Facebook Tracker","Analysis")
#maybe add People Talking About This (on a macro-level sheet)
#Catch exception for API rate limiting
#Catch exception for key expiration and use oAuth to get a new one
#Then create a function that does a Pythonic index(match) with the spreadsheet with the var from Function 1
| gpl-2.0 |
JohannesUIBK/oggm | oggm/sandbox/run_benchmark.py | 2 | 3482 | """Run with a subset of benchmark glaciers"""
from __future__ import division
# Log message format
import logging
logging.basicConfig(format='%(asctime)s: %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
# Module logger
log = logging.getLogger(__name__)
# Python imports
import os
import shutil
from functools import partial
# Libs
import pandas as pd
import geopandas as gpd
import numpy as np
import shapely.geometry as shpg
import matplotlib.pyplot as plt
# Locals
import oggm
import oggm.cfg as cfg
from oggm import workflow
from oggm.utils import get_demo_file
from oggm import tasks
from oggm.workflow import execute_entity_task, reset_multiprocessing
from oggm import graphics, utils
# Initialize OGGM
cfg.initialize()
# Local paths (where to write output and where to download input)
WORKING_DIR = os.path.expanduser('~/OGGM_wd_bench')
DATA_DIR = os.path.join(WORKING_DIR, 'datadir')
cfg.PATHS['working_dir'] = WORKING_DIR
cfg.PATHS['topo_dir'] = os.path.join(DATA_DIR, 'topo')
cfg.PATHS['cru_dir'] = os.path.join(DATA_DIR, 'cru')
cfg.PATHS['rgi_dir'] = os.path.join(DATA_DIR, 'rgi')
cfg.PATHS['tmp_dir'] = os.path.join(DATA_DIR, 'tmp')
# Currently OGGM wants some directories to exist
# (maybe I'll change this but it can also catch errors in the user config)
utils.mkdir(cfg.PATHS['working_dir'])
utils.mkdir(cfg.PATHS['topo_dir'])
utils.mkdir(cfg.PATHS['cru_dir'])
utils.mkdir(cfg.PATHS['rgi_dir'])
utils.mkdir(cfg.PATHS['tmp_dir'])
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = True
cfg.CONTINUE_ON_ERROR = False
# Read in the Benchmark RGI file
rgi_pkl_path = utils.aws_file_download('rgi_benchmark.pkl')
rgidf = pd.read_pickle(rgi_pkl_path)
# Remove glaciers causing issues
rgidf = rgidf.iloc[[s not in ('RGI50-11.00291', 'RGI50-03.02479') for s in rgidf['RGIId']]]
utils.get_rgi_dir()
log.info('Number of glaciers: {}'.format(len(rgidf)))
# Go - initialize working directories
# gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
gdirs = workflow.init_glacier_regions(rgidf)
# Prepro tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.compute_downstream_lines,
tasks.catchment_area,
tasks.initialize_flowlines,
tasks.catchment_width_geom,
tasks.catchment_width_correction
]
for task in task_list:
execute_entity_task(task, gdirs)
# Climate related tasks - this will download
execute_entity_task(tasks.process_cru_data, gdirs)
tasks.compute_ref_t_stars(gdirs)
tasks.distribute_t_stars(gdirs)
# Inversion
execute_entity_task(tasks.prepare_for_inversion, gdirs)
tasks.optimize_inversion_params(gdirs)
execute_entity_task(tasks.volume_inversion, gdirs)
# Write out glacier statistics
df = utils.glacier_characteristics(gdirs)
fpath = os.path.join(cfg.PATHS['working_dir'], 'glacier_char.csv')
df.to_csv(fpath)
# Plots (if you want)
PLOTS_DIR = ''
if PLOTS_DIR == '':
exit()
utils.mkdir(PLOTS_DIR)
for gd in gdirs:
bname = os.path.join(PLOTS_DIR, gd.name + '_' + gd.rgi_id + '_')
graphics.plot_googlemap(gd)
plt.savefig(bname + 'ggl.png')
plt.close()
graphics.plot_domain(gd)
plt.savefig(bname + 'dom.png')
plt.close()
graphics.plot_centerlines(gd)
plt.savefig(bname + 'cls.png')
plt.close()
graphics.plot_catchment_width(gd, corrected=True)
plt.savefig(bname + 'w.png')
plt.close()
graphics.plot_inversion(gd)
plt.savefig(bname + 'inv.png')
plt.close()
| gpl-3.0 |
kai5263499/networkx | examples/graph/knuth_miles.py | 36 | 2994 | #!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2006 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
| bsd-3-clause |
PG-TUe/tpot | tpot/gp_deap.py | 1 | 19463 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Weixuan Fu ([email protected])
- Daniel Angell ([email protected])
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from deap import tools, gp
from inspect import isclass
from .operator_utils import set_sample_weight
from sklearn.utils import indexable
from sklearn.metrics.scorer import check_scoring
from sklearn.model_selection._validation import _fit_and_score
from sklearn.model_selection._split import check_cv
from sklearn.base import clone, is_classifier
from collections import defaultdict
import warnings
from stopit import threading_timeoutable, TimeoutException
def pick_two_individuals_eligible_for_crossover(population):
"""Pick two individuals from the population which can do crossover, that is, they share a primitive.
Parameters
----------
population: array of individuals
Returns
----------
tuple: (individual, individual)
Two individuals which are not the same, but share at least one primitive.
Alternatively, if no such pair exists in the population, (None, None) is returned instead.
"""
primitives_by_ind = [set([node.name for node in ind if isinstance(node, gp.Primitive)])
for ind in population]
pop_as_str = [str(ind) for ind in population]
eligible_pairs = [(i, i+1+j) for i, ind1_prims in enumerate(primitives_by_ind)
for j, ind2_prims in enumerate(primitives_by_ind[i+1:])
if not ind1_prims.isdisjoint(ind2_prims) and
pop_as_str[i] != pop_as_str[i+1+j]]
# Pairs are eligible in both orders, this ensures that both orders are considered
eligible_pairs += [(j, i) for (i, j) in eligible_pairs]
if not eligible_pairs:
# If there are no eligible pairs, the caller should decide what to do
return None, None
pair = np.random.randint(0, len(eligible_pairs))
idx1, idx2 = eligible_pairs[pair]
return population[idx1], population[idx2]
def mutate_random_individual(population, toolbox):
"""Picks a random individual from the population, and performs mutation on a copy of it.
Parameters
----------
population: array of individuals
Returns
----------
individual: individual
An individual which is a mutated copy of one of the individuals in population,
the returned individual does not have fitness.values
"""
idx = np.random.randint(0,len(population))
ind = population[idx]
ind, = toolbox.mutate(ind)
del ind.fitness.values
return ind
def varOr(population, toolbox, lambda_, cxpb, mutpb):
"""Part of an evolutionary algorithm applying only the variation part
(crossover, mutation **or** reproduction). The modified individuals have
their fitness invalidated. The individuals are cloned so returned
population is independent of the input population.
:param population: A list of individuals to vary.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param lambda\_: The number of children to produce
:param cxpb: The probability of mating two individuals.
:param mutpb: The probability of mutating an individual.
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution
The variation goes as follow. On each of the *lambda_* iteration, it
selects one of the three operations; crossover, mutation or reproduction.
In the case of a crossover, two individuals are selected at random from
the parental population :math:`P_\mathrm{p}`, those individuals are cloned
using the :meth:`toolbox.clone` method and then mated using the
:meth:`toolbox.mate` method. Only the first child is appended to the
offspring population :math:`P_\mathrm{o}`, the second child is discarded.
In the case of a mutation, one individual is selected at random from
:math:`P_\mathrm{p}`, it is cloned and then mutated using using the
:meth:`toolbox.mutate` method. The resulting mutant is appended to
:math:`P_\mathrm{o}`. In the case of a reproduction, one individual is
selected at random from :math:`P_\mathrm{p}`, cloned and appended to
:math:`P_\mathrm{o}`.
This variation is named *Or* beceause an offspring will never result from
both operations crossover and mutation. The sum of both probabilities
shall be in :math:`[0, 1]`, the reproduction probability is
1 - *cxpb* - *mutpb*.
"""
offspring = []
for _ in range(lambda_):
op_choice = np.random.random()
if op_choice < cxpb: # Apply crossover
ind1, ind2 = pick_two_individuals_eligible_for_crossover(population)
if ind1 is not None:
ind1, _ = toolbox.mate(ind1, ind2)
del ind1.fitness.values
else:
# If there is no pair eligible for crossover, we still want to
# create diversity in the population, and do so by mutation instead.
ind1 = mutate_random_individual(population, toolbox)
offspring.append(ind1)
elif op_choice < cxpb + mutpb: # Apply mutation
ind = mutate_random_individual(population, toolbox)
offspring.append(ind)
else: # Apply reproduction
idx = np.random.randint(0, len(population))
offspring.append(toolbox.clone(population[idx]))
return offspring
def initialize_stats_dict(individual):
'''
Initializes the stats dict for individual
The statistics initialized are:
'generation': generation in which the individual was evaluated. Initialized as: 0
'mutation_count': number of mutation operations applied to the individual and its predecessor cumulatively. Initialized as: 0
'crossover_count': number of crossover operations applied to the individual and its predecessor cumulatively. Initialized as: 0
'predecessor': string representation of the individual. Initialized as: ('ROOT',)
Parameters
----------
individual: deap individual
Returns
-------
object
'''
individual.statistics['generation'] = 0
individual.statistics['mutation_count'] = 0
individual.statistics['crossover_count'] = 0
individual.statistics['predecessor'] = 'ROOT',
def eaMuPlusLambda(population, toolbox, mu, lambda_, cxpb, mutpb, ngen, pbar,
stats=None, halloffame=None, verbose=0, per_generation_function=None):
"""This is the :math:`(\mu + \lambda)` evolutionary algorithm.
:param population: A list of individuals.
:param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
operators.
:param mu: The number of individuals to select for the next generation.
:param lambda\_: The number of children to produce at each generation.
:param cxpb: The probability that an offspring is produced by crossover.
:param mutpb: The probability that an offspring is produced by mutation.
:param ngen: The number of generation.
:param pbar: processing bar
:param stats: A :class:`~deap.tools.Statistics` object that is updated
inplace, optional.
:param halloffame: A :class:`~deap.tools.HallOfFame` object that will
contain the best individuals, optional.
:param verbose: Whether or not to log the statistics.
:param per_generation_function: if supplied, call this function before each generation
used by tpot to save best pipeline before each new generation
:returns: The final population
:returns: A class:`~deap.tools.Logbook` with the statistics of the
evolution.
The algorithm takes in a population and evolves it in place using the
:func:`varOr` function. It returns the optimized population and a
:class:`~deap.tools.Logbook` with the statistics of the evolution. The
logbook will contain the generation number, the number of evalutions for
each generation and the statistics if a :class:`~deap.tools.Statistics` is
given as argument. The *cxpb* and *mutpb* arguments are passed to the
:func:`varOr` function. The pseudocode goes as follow ::
evaluate(population)
for g in range(ngen):
offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
evaluate(offspring)
population = select(population + offspring, mu)
First, the individuals having an invalid fitness are evaluated. Second,
the evolutionary loop begins by producing *lambda_* offspring from the
population, the offspring are generated by the :func:`varOr` function. The
offspring are then evaluated and the next generation population is
selected from both the offspring **and** the population. Finally, when
*ngen* generations are done, the algorithm returns a tuple with the final
population and a :class:`~deap.tools.Logbook` of the evolution.
This function expects :meth:`toolbox.mate`, :meth:`toolbox.mutate`,
:meth:`toolbox.select` and :meth:`toolbox.evaluate` aliases to be
registered in the toolbox. This algorithm uses the :func:`varOr`
variation.
"""
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Initialize statistics dict for the individuals in the population, to keep track of mutation/crossover operations and predecessor relations
for ind in population:
initialize_stats_dict(ind)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.evaluate(invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
# Begin the generational process
for gen in range(1, ngen + 1):
# after each population save a periodic pipeline
if per_generation_function is not None:
per_generation_function()
# Vary the population
offspring = varOr(population, toolbox, lambda_, cxpb, mutpb)
# Update generation statistic for all individuals which have invalid 'generation' stats
# This hold for individuals that have been altered in the varOr function
for ind in population:
if ind.statistics['generation'] == 'INVALID':
ind.statistics['generation'] = gen
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
# update pbar for valid individuals (with fitness values)
if not pbar.disable:
pbar.update(len(offspring)-len(invalid_ind))
fitnesses = toolbox.evaluate(invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Select the next generation population
population[:] = toolbox.select(population + offspring, mu)
# pbar process
if not pbar.disable:
# Print only the best individual fitness
if verbose == 2:
high_score = abs(max([halloffame.keys[x].wvalues[1] for x in range(len(halloffame.keys))]))
pbar.write('Generation {0} - Current best internal CV score: {1}'.format(gen, high_score))
# Print the entire Pareto front
elif verbose == 3:
pbar.write('Generation {} - Current Pareto front scores:'.format(gen))
for pipeline, pipeline_scores in zip(halloffame.items, reversed(halloffame.keys)):
pbar.write('{}\t{}\t{}'.format(
int(abs(pipeline_scores.wvalues[0])),
abs(pipeline_scores.wvalues[1]),
pipeline
)
)
pbar.write('')
# Update the statistics with the new population
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
return population, logbook
def cxOnePoint(ind1, ind2):
"""Randomly select in each individual and exchange each subtree with the
point as root between each individual.
:param ind1: First tree participating in the crossover.
:param ind2: Second tree participating in the crossover.
:returns: A tuple of two trees.
"""
# List all available primitive types in each individual
types1 = defaultdict(list)
types2 = defaultdict(list)
for idx, node in enumerate(ind1[1:], 1):
types1[node.ret].append(idx)
common_types = []
for idx, node in enumerate(ind2[1:], 1):
if node.ret in types1 and node.ret not in types2:
common_types.append(node.ret)
types2[node.ret].append(idx)
if len(common_types) > 0:
type_ = np.random.choice(common_types)
index1 = np.random.choice(types1[type_])
index2 = np.random.choice(types2[type_])
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
# point mutation function
def mutNodeReplacement(individual, pset):
"""Replaces a randomly chosen primitive from *individual* by a randomly
chosen primitive no matter if it has the same number of arguments from the :attr:`pset`
attribute of the individual.
Parameters
----------
individual: DEAP individual
A list of pipeline operators and model parameters that can be
compiled by DEAP into a callable function
Returns
-------
individual: DEAP individual
Returns the individual with one of point mutation applied to it
"""
index = np.random.randint(0, len(individual))
node = individual[index]
slice_ = individual.searchSubtree(index)
if node.arity == 0: # Terminal
term = np.random.choice(pset.terminals[node.ret])
if isclass(term):
term = term()
individual[index] = term
else: # Primitive
# find next primitive if any
rindex = None
if index + 1 < len(individual):
for i, tmpnode in enumerate(individual[index + 1:], index + 1):
if isinstance(tmpnode, gp.Primitive) and tmpnode.ret in tmpnode.args:
rindex = i
break
# pset.primitives[node.ret] can get a list of the type of node
# for example: if op.root is True then the node.ret is Output_DF object
# based on the function _setup_pset. Then primitives is the list of classifor or regressor
primitives = pset.primitives[node.ret]
if len(primitives) != 0:
new_node = np.random.choice(primitives)
new_subtree = [None] * len(new_node.args)
if rindex:
rnode = individual[rindex]
rslice = individual.searchSubtree(rindex)
# find position for passing return values to next operator
position = np.random.choice([i for i, a in enumerate(new_node.args) if a == rnode.ret])
else:
position = None
for i, arg_type in enumerate(new_node.args):
if i != position:
term = np.random.choice(pset.terminals[arg_type])
if isclass(term):
term = term()
new_subtree[i] = term
# paste the subtree to new node
if rindex:
new_subtree[position:position + 1] = individual[rslice]
# combine with primitives
new_subtree.insert(0, new_node)
individual[slice_] = new_subtree
return individual,
@threading_timeoutable(default="Timeout")
def _wrapped_cross_val_score(sklearn_pipeline, features, target,
cv, scoring_function, sample_weight=None, groups=None):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
sklearn_pipeline : pipeline object implementing 'fit'
The object to use to fit the data.
features : array-like of shape at least 2D
The data to fit.
target : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv: int or cross-validation generator
If CV is a number, then it is the number of folds to evaluate each
pipeline over in k-fold cross-validation during the TPOT optimization
process. If it is an object then it is an object to be used as a
cross-validation generator.
scoring_function : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
sample_weight : array-like, optional
List of sample weights to balance (or un-balanace) the dataset target as needed
groups: array-like {n_samples, }, optional
Group labels for the samples used while splitting the dataset into train/test set
"""
sample_weight_dict = set_sample_weight(sklearn_pipeline.steps, sample_weight)
features, target, groups = indexable(features, target, groups)
cv = check_cv(cv, target, classifier=is_classifier(sklearn_pipeline))
cv_iter = list(cv.split(features, target, groups))
scorer = check_scoring(sklearn_pipeline, scoring=scoring_function)
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
scores = [_fit_and_score(estimator=clone(sklearn_pipeline),
X=features,
y=target,
scorer=scorer,
train=train,
test=test,
verbose=0,
parameters=None,
fit_params=sample_weight_dict)
for train, test in cv_iter]
CV_score = np.array(scores)[:, 0]
return np.nanmean(CV_score)
except TimeoutException:
return "Timeout"
except Exception as e:
return -float('inf')
| lgpl-3.0 |
spallavolu/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
anirudhSK/chromium | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 26 | 11131 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
kernc/scikit-learn | sklearn/neural_network/tests/test_mlp.py | 46 | 18585 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# Licence: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false)
np.seterr(all='warn')
ACTIVATION_TYPES = ["logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
def test_alpha():
# Test that larger alpha yields weights closer to zero"""
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example."""
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(algorithm='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.classes_ = [0, 1]
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
mlp.label_binarizer_.y_type_ = 'binary'
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
assert_almost_equal(mlp.decision_function(X), 1.043, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
algorithm='l-bfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems."""
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuses past solution."""
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected."""
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(algorithm='l-bfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(algorithm='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected"""
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error"""
X = [[3, 2]]
y = [0]
clf = MLPClassifier(algorithm='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit'for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(algorithm='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(algorithm='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(algorithm='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(algorithm='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling."""
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(
algorithm='sgd').partial_fit,
X, y,
classes=[2])
# l-bfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(algorithm='l-bfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error"""
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(algorithm='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multi():
# Test that predict_proba works as expected for multi class."""
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results."""
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(random_state=1, hidden_layer_sizes=15)
mlp.fit(X, y)
pred1 = mlp.decision_function(X)
mlp.fit(X_sparse, y)
pred2 = mlp.decision_function(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the algorithm to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(algorithm='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, algorithm='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, algorithm='sgd',
learning_rate='adaptive', verbose=10)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
| bsd-3-clause |
espenhgn/nest-simulator | pynest/examples/correlospinmatrix_detector_two_neuron.py | 12 | 2587 | # -*- coding: utf-8 -*-
#
# correlospinmatrix_detector_two_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Correlospinmatrix detector example
----------------------------------------
This scripts simulates two connected binary neurons, similar
as in [1]_. It measures and plots the auto- and cross covariance functions
of the individual neurons and between them, repsectively.
References
~~~~~~~~~~~~
.. [1] Ginzburg and Sompolinsky (1994). Theory of correlations in stochastic neural netoworks. 50(4) p. 3175. Fig. 1.
"""
import matplotlib.pyplot as plt
import nest
import numpy as np
m_x = 0.5
tau_m = 10.
h = 0.1
T = 1000000.
tau_max = 100.
csd = nest.Create("correlospinmatrix_detector")
csd.set(N_channels=2, tau_max=tau_max, Tstart=tau_max, delta_tau=h)
nest.SetDefaults('ginzburg_neuron', {'theta': 0.0, 'tau_m': tau_m,
'c_1': 0.0, 'c_2': 2. * m_x, 'c_3': 1.0})
n1 = nest.Create("ginzburg_neuron")
nest.SetDefaults("mcculloch_pitts_neuron", {'theta': 0.5, 'tau_m': tau_m})
n2 = nest.Create("mcculloch_pitts_neuron")
nest.Connect(n1, n2, syn_spec={"weight": 1.0})
nest.Connect(n1, csd, syn_spec={"receptor_type": 0})
nest.Connect(n2, csd, syn_spec={"receptor_type": 1})
nest.Simulate(T)
c = csd.get("count_covariance")
m = np.zeros(2, dtype=float)
for i in range(2):
m[i] = c[i][i][int(tau_max / h)] * (h / T)
print('mean activities =', m)
cmat = np.zeros((2, 2, int(2 * tau_max / h) + 1), dtype=float)
for i in range(2):
for j in range(2):
cmat[i, j] = c[i][j] * (h / T) - m[i] * m[j]
ts = np.arange(-tau_max, tau_max + h, h)
plt.title("auto- and cross covariance functions")
plt.plot(ts, cmat[0, 1], 'r', label=r"$c_{12}$")
plt.plot(ts, cmat[1, 0], 'b', label=r"$c_{21}$")
plt.plot(ts, cmat[0, 0], 'g', label=r"$c_{11}$")
plt.plot(ts, cmat[1, 1], 'y', label=r"$c_{22}$")
plt.xlabel(r"time $t \; \mathrm{ms}$")
plt.ylabel(r"$c$")
plt.legend()
plt.show()
| gpl-2.0 |
uhjish/BIDMach | scripts/runICA.py | 8 | 8769 | '''
A testing suite for ICA. This will run some Python code to build the data, then calls the ICA
testing script that contains BIDMach commands, then comes back to this Python code to plot the data.
This code should be in the BIDMach/scripts folder.
(c) February 2015 by Daniel Seita
'''
import matplotlib.pyplot as plt
import numpy as np
import pylab
import sys
from scipy import signal
from sklearn.decomposition import FastICA,PCA
from subprocess import call
'''
Returns a matrix where each row corresponds to one signal. Each row has been standardized to have
zero mean and unit variance (I think), and they also have additive Gaussian noise. In order to
ensure that we actually see enough variation in a small time stamp, the "first" and "second"
variables (and possibly others) are used to increase/decrease the "density" of the data. For
instance a high "first" value pushes the sine waves close together.
> group is an integer that represents the group selection, useful for running many tests
> time is from numpy and controls the density of the data
> num_samples is the number of total samples to use for each row
'''
def get_source(group, time, num_samples):
S = None
first = max(2, int(num_samples/4000))
second = max(3, int(num_samples/3000))
third = max(2, first/10)
if group == 1:
s1 = np.sin(first * time)
s2 = np.sign(np.sin(second * time))
s3 = signal.sawtooth(first * np.pi * time)
S = np.c_[s1, s2, s3]
elif group == 2:
s1 = np.sin(first * time)
s2 = np.sign(np.sin(second * time))
s3 = signal.sawtooth(first * np.pi * time)
s4 = signal.sweep_poly(third * time, [1,2])
S = np.c_[s1, s2, s3, s4]
elif group == 3:
s1 = np.cos(second * time) # Signal 1: cosineusoidal signal
s2 = np.sign(np.sin(second * time)) # Signal 2: square signal
s3 = signal.sawtooth(first * np.pi * time) # Signal 3: saw tooth signal
s4 = signal.sweep_poly(third * time, [1,2]) # Signal 4: sweeping polynomial signal
s5 = np.sin(first * time) # Signal 5: sinusoidal signal
S = np.c_[s1, s2, s3, s4, s5]
elif group == 4:
s1 = np.sin(first * time)
s2 = signal.sawtooth(float(first/2.55) * np.pi * time)
s3 = np.sign(np.sin(second * time))
s4 = signal.sawtooth(first * np.pi * time)
s5 = signal.sweep_poly(third * time, [1,2])
S = np.c_[s1, s2, s3, s4, s5]
S += 0.2 * np.random.normal(size=S.shape)
S /= S.std(axis=0)
return S.T
'''
Generates mixed data. Note that if whitened = True, this picks a pre-whitened matrix to analyze...
Takes in the group number and returns a mixing matrix of the appropriate size. If the data needs to
be pre-whitened, then we should pick an orthogonal mixing matrix. There are three orthogonal
matrices and three non-orthogonal matrices.
'''
def get_mixing_matrix(group, pre_whitened):
A = None
if group == 1:
if pre_whitened:
A = np.array([[ 0, -.8, -.6],
[.8, -.36, .48],
[.6, .48, -.64]])
else:
A = np.array([[ 1, 1, 1],
[0.5, 2, 1],
[1.5, 1, 2]])
elif group == 2:
if pre_whitened:
A = np.array([[-0.040037, 0.24263, -0.015820, 0.96916],
[ -0.54019, 0.29635, 0.78318, -0.083724],
[ 0.84003, 0.23492, 0.48878, -0.016133],
[ 0.030827, -0.89337, 0.38403, 0.23120]])
else:
A = np.array([[ 1, 2, -1, 2.5],
[-.1, -.1, 3, -.9],
[8, 1, 7, 1],
[1.5, -2, 3, -1]])
elif group == 3 or group == 4:
if pre_whitened:
A = np.array([[ 0.31571, 0.45390, -0.59557, 0.12972, 0.56837],
[-0.32657, 0.47508, 0.43818, -0.56815, 0.39129],
[ 0.82671, 0.11176, 0.54879, 0.05170, 0.01480],
[-0.12123, -0.56812, 0.25204, 0.28505, 0.71969],
[-0.30915, 0.48299, 0.29782, 0.75955, -0.07568]])
else:
A = np.array([[ 1, 2, -1, 2.5, 1],
[-.1, -.1, 3, -.9, 2],
[8, 1, 7, 1, 3],
[1.5, -2, 3, -1, 4],
[-.1, 4, -.1, 3, -.2]])
return A
'''
Takes in the predicted source from BIDMach and the original source and attempts to change the order
and cardinality of the predicted data to match the original one. This is purely for debugging. newS
is the list of lists that forms the numpy array, and rows_B_taken ensures a 1-1 correspondence.
> B is the predicted source from BIDMach
> S is the actual source before mixing
'''
def rearrange_data(B, S):
newS = []
rows_B_taken = []
for i in range(S.shape[0]):
new_row = S[i,:]
change_sign = False
best_norm = 99999999
best_row_index = -1
for j in range(B.shape[0]):
if j in rows_B_taken:
continue
old_row = B[j,:]
norm1 = np.linalg.norm(old_row + new_row)
if norm1 < best_norm:
best_norm = norm1
best_row_index = j
change_sign = True
norm2 = np.linalg.norm(old_row - new_row)
if norm2 < best_norm:
best_norm = norm2
best_row_index = j
rows_B_taken.append(best_row_index)
if change_sign:
newS.append((-B[best_row_index,:]).tolist())
else:
newS.append(B[best_row_index,:].tolist())
return np.array(newS)
########
# MAIN #
########
# Some administrative stuff to make it clear how to handle this code.
if len(sys.argv) != 5:
print "\nUsage: python runICA.py <num_samples> <data_group> <pre_zero_mean> <pre_whitened>"
print "<num_samples> should be an integer; recommended to be at least 10000"
print "<data_group> should be an integer; currently only {1,2,3,4} are supported"
print "<pre_zero_mean> should be \'Y\' or \'y\' if you want the (mixed) data to have zero-mean"
print "<pre_whitened> should be \'Y\' or \'y\' if you want the (mixed) data to be pre-whitened"
print "You also need to call this code in the directory where you can call \'./bidmach scripts/ica_test.ssc\'\n"
sys.exit()
n_samples = int(sys.argv[1])
data_group = int(sys.argv[2])
pre_zero_mean = True if sys.argv[3].lower() == "y" else False
pre_whitened = True if sys.argv[4].lower() == "y" else False
if data_group < 1 or data_group > 4:
raise Exception("Data group = " + str(data_group) + " is out of range.")
plot_extra_info = False # If true, plot the mixed input data (X) in addition to the real/predicted sources
# With parameters in pace, generate source, mixing, and output matrices, and save them to files.
np.random.seed(0)
time = np.linspace(0, 8, n_samples) # These need to depend on num of samples
S = get_source(data_group, time, n_samples)
A = get_mixing_matrix(data_group, pre_whitened)
X = np.dot(A,S)
print "\nMean for the mixed data:"
for i in range(X.shape[0]):
print "Row {}: {}".format(i+1, np.mean(X[i,:]))
print "\nThe covariance matrix for the mixed data is\n{}.".format(np.cov(X))
np.savetxt("ica_source.txt", S, delimiter=" ")
np.savetxt("ica_mixing.txt", A, delimiter=" ")
np.savetxt("ica_output.txt", X, delimiter=" ")
print "\nNow calling ICA in BIDMach...\n"
# Call BIDMach. Note that this will exit automatically with sys.exit, without user intervention.
call(["./bidmach", "scripts/ica_test.ssc"])
print "\nFinished with BIDMach. Now let us plot the data."
# Done with BIDMach. First, for the sake of readability, get distributions in same order.
B = pylab.loadtxt('ica_pred_source.txt')
newB = rearrange_data(B, S)
# Extract data and plot results. Add more colors if needed but 5 is plenty.
plt.figure()
if plot_extra_info:
models = [X.T, S.T, newB.T]
names = ['Input to ICA','True Sources Before Mixing','BIDMach\'s FastICA']
else:
models = [S.T, newB.T]
names = ['True Sources Before Mixing','BIDMach\'s FastICA']
colors = ['darkcyan', 'red', 'blue', 'orange', 'yellow']
plot_xlim = min(n_samples-1, 10000)
for ii, (model, name) in enumerate(zip(models, names), 1):
if plot_extra_info:
plt.subplot(3, 1, ii)
else:
plt.subplot(2, 1, ii)
plt.title(name)
plt.xlim([0,plot_xlim])
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
vigilv/scikit-learn | sklearn/linear_model/ridge.py | 60 | 44642 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
canercandan/polyphasic | everyman.py | 1 | 2286 | #!/usr/bin/env python
import matplotlib.pyplot as plt
from datetime import datetime, date, time, timedelta
TOTAL_AWAKE_TIME = time(19,30)
TOTAL_ASLEEP_TIME = time(4,30)
def check_times(core, nap, awakes):
total_awake_time = datetime.combine(date.today(), time(0,0))
for i in range(4): total_awake_time += awakes[i]
total_awake_time = total_awake_time.time()
assert total_awake_time == TOTAL_AWAKE_TIME
total_asleep_time = (datetime.combine(date.today(), time(0,0)) + nap*3 + core).time()
assert total_asleep_time == TOTAL_ASLEEP_TIME
assert (datetime.combine(date.today(), total_awake_time) + timedelta(hours=total_asleep_time.hour, minutes=total_asleep_time.minute)).time() == time(0,0)
def compute_times(initial=(0,0), core_rank=0, core=(3,30), nap=(0,20),
awakes=[(5,30), (4,30), (4,30), (5,0)]):
core = timedelta(hours=core[0], minutes=core[1])
nap = timedelta(hours=nap[0], minutes=nap[1])
for i in range(4): awakes[i] = timedelta(hours=awakes[i][0], minutes=awakes[i][1])
check_times(core, nap, awakes)
times = []
s = e = datetime.combine(date.today(), time(*initial))
for i in range(4):
d = (core if core_rank == i else nap)
s = e; e = s + d; times.append((s.time(), e.time(), d))
s = e; e = s + awakes[i]; times.append((s.time(), e.time(), awakes[i]))
return times
fig, axes = plt.subplots(nrows=2, ncols=2)
explode = (.1, 0, .1, 0, .1, 0, .1, 0,)
colors = ['gold', 'lightskyblue',]
for ax, initial, rank, aa in [(axes[0,0], (22,0), 0, [(5,30), (4,30), (4,30), (5,0)]),
(axes[0,1], (22,0), 0, [(4,30), (5,30), (4,30), (5,0)]),
(axes[1,0], (22,0), 0, [(5,30), (4,30), (4,30), (5,0)]),
]:
times = compute_times(initial=initial, core_rank=rank, awakes=aa)
sizes = [t[2].seconds for t in times]
labels = ['%s\n%s' % (s.strftime('%H:%M'),e.strftime('%H:%M')) for s,e,d in times]
ax.pie(list(reversed(sizes)), labels=list(reversed(labels)),
explode=list(reversed(explode)), colors=list(reversed(colors)),
shadow=True, startangle=(-(initial[0]*60+initial[1])/(24*60))*360+90,
#autopct='%1.1f%%'
)
ax.axis('equal')
plt.show()
| gpl-3.0 |
AllenDowney/HeriReligion | archive/heri2.py | 1 | 3742 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import matplotlib.pyplot as pyplot
#import myplot
import csv
#import Cdf
#import correlation
#import heri
import math
import random
#import thinkstats
UPPER = 2014
FORMATS = ['pdf', 'png']
FILENAME = 'heri13.csv'
def ReadData(filename=FILENAME):
"""Reads a CSV file of data from HERI's CIRP survey.
Args:
filename: string filename
Returns:
list of (score, number) pairs
"""
fp = open(filename)
reader = csv.reader(fp)
res = []
for t in reader:
try:
year = int(t[0])
res.append(t)
except ValueError:
pass
return res
def GetColumn(data, index):
"""Extracts the given column from the dataset.
data: sequence of rows
index: which column
Returns: map from int year to float datum
"""
res = {}
for row in data:
try:
year = int(row[0])
res[year] = float(row[index]) / 10.0
except ValueError:
pass
return res
def RenderColumn(col):
"""Returns a sequence of years and a sequence of data.
col: map from int year to float datum
Returns: tuples of ts and ys
"""
return zip(*sorted(col.items()))
def DiffColumns(col1, col2):
"""Computes the difference between two columns.
col1: map from int year to float datum
col2: map from int year to float datum
Returns: map from int year to float difference
"""
years1 = set(col1)
years2 = set(col2)
res = [(year, col1[year] - col2[year])for year in sorted(years1 & years2)]
return zip(*res)
def MakePlot(filename='heri.csv'):
"""Generates a plot with the data, a fitted model, and error bars."""
pyplot.clf()
data = ReadData(filename)
attended = GetColumn(data, 4)
del attended[1966]
ts, ys = RenderColumn(attended)
ys = [100-y for y in ys]
pyplot.plot(ts, ys, 'go-', linewidth=3, markersize=0, alpha=0.7,
label='No attendance')
nones = GetColumn(data, 1)
ts, ys = RenderColumn(nones)
pyplot.plot(ts, ys, 'bs-', linewidth=3, markersize=0, alpha=0.7,
label='No religion')
myplot.Save(root='heri2.3',
formats=FORMATS,
title='',
xlabel='',
ylabel='Percent',
axis=[1966, UPPER, 0, 30])
def MakeGenderPlot(filename='heri13.csv'):
"""Generates a plot with the data, a fitted model, and error bars."""
pyplot.clf()
data = ReadData(filename)
men = GetColumn(data, 6)
ts, ys = RenderColumn(men)
pyplot.plot(ts, ys, 'b-', linewidth=3, alpha=0.7, label='men')
women = GetColumn(data, 11)
ts, ys = RenderColumn(women)
pyplot.plot(ts, ys, 'g-', linewidth=3, alpha=0.7, label='women')
myplot.Save(root='heri2.1',
formats=FORMATS,
title='',
xlabel='',
ylabel='Preferred religion None (%)',
axis=[1967, UPPER, 0, 28])
del men[1969]
del women[1969]
ts, ds = DiffColumns(men, women)
heri.MakePlot(ts, ds,
model='ys ~ ts')
pyplot.plot(ts, ds, color='purple', linewidth=3, alpha=0.7,
label='Gender gap')
myplot.Save(root='heri2.2',
formats=FORMATS,
title='',
xlabel='',
ylabel='Percentage points',
axis=[1967, UPPER, 0, 6])
def main(script):
MakePlot()
MakeGenderPlot()
if __name__ == '__main__':
import sys
main(*sys.argv)
| mit |
ScienceStacks/SciSheets | mysite/scisheets/plugins/test_importCSV.py | 2 | 1607 | """
Tests importCSV
"""
from importCSV import importCSV
from scisheets.core import api as api
from scisheets.core import helpers_test as ht
import os
import pandas as pd
import unittest
#############################
# Tests
#############################
# pylint: disable=W0212,C0111,R0904
class TestAPI(unittest.TestCase):
def setUp(self):
ht.setupTableInitialization(self)
self.api = api.APIFormulas(self.table)
def testImportCSV(self):
filename = "test_importCSV.csv"
filepath = os.path.join(ht.TEST_DIR, filename)
names = ["x", "y", "z"]
data = [ names, [1, 10.0, "aa"], [2, 20.0, "bb"]]
data_len = len(data) - 1
data_idx = range(1, len(data))
fd = open(filepath, "w")
for line_as_list in data:
str_list = [str(x) for x in line_as_list]
line = "%s\n" % (','.join(str_list))
fd.write(line)
fd.close()
try:
importCSV(self.api, "badpath.csv")
except Exception as e:
b = isinstance(e, IOError) or isinstance(e, ValueError)
self.assertTrue(b)
with self.assertRaises(KeyError):
importCSV(self.api, filepath, ['w'])
column_list = list(names)
imported_names = importCSV(self.api, filepath, names=column_list)
self.assertTrue(imported_names == column_list)
for idx in range(len(column_list)):
name = names[idx]
self.assertTrue(self.api._table.isColumnPresent(name))
column = self.api._table.columnFromName(name)
values = column.getCells()[:data_len]
expected_list = [data[n][idx] for n in range(1, len(data))]
self.assertTrue(values == expected_list)
| apache-2.0 |
acmaheri/sms-tools | lectures/4-STFT/plots-code/sine-spectrum.py | 24 | 1563 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, ifft
N = 256
M = 63
f0 = 1000
fs = 10000
A0 = .8
hN = N/2
hM = (M+1)/2
fftbuffer = np.zeros(N)
X1 = np.zeros(N, dtype='complex')
X2 = np.zeros(N, dtype='complex')
x = A0 * np.cos(2*np.pi*f0/fs*np.arange(-hM+1,hM))
plt.figure(1, figsize=(9.5, 7))
w = np.hanning(M)
plt.subplot(2,3,1)
plt.title('w (hanning window)')
plt.plot(np.arange(-hM+1, hM), w, 'b', lw=1.5)
plt.axis([-hM+1, hM, 0, 1])
fftbuffer[:hM] = w[hM-1:]
fftbuffer[N-hM+1:] = w[:hM-1]
X = fft(fftbuffer)
X1[:hN] = X[hN:]
X1[N-hN:] = X[:hN]
mX = 20*np.log10(abs(X1))
plt.subplot(2,3,2)
plt.title('mW')
plt.plot(np.arange(-hN, hN), mX, 'r', lw=1.5)
plt.axis([-hN,hN,-40,max(mX)])
pX = np.angle(X1)
plt.subplot(2,3,3)
plt.title('pW')
plt.plot(np.arange(-hN, hN), np.unwrap(pX), 'c', lw=1.5)
plt.axis([-hN,hN,min(np.unwrap(pX)),max(np.unwrap(pX))])
plt.subplot(2,3,4)
plt.title('xw (windowed sinewave)')
xw = x*w
plt.plot(np.arange(-hM+1, hM), xw, 'b', lw=1.5)
plt.axis([-hM+1, hM, -1, 1])
fftbuffer = np.zeros(N)
fftbuffer[0:hM] = xw[hM-1:]
fftbuffer[N-hM+1:] = xw[:hM-1]
X = fft(fftbuffer)
X2[:hN] = X[hN:]
X2[N-hN:] = X[:hN]
mX2 = 20*np.log10(abs(X2))
plt.subplot(2,3,5)
plt.title('mXW')
plt.plot(np.arange(-hN, hN), mX2, 'r', lw=1.5)
plt.axis([-hN,hN,-40,max(mX)])
pX = np.angle(X2)
plt.subplot(2,3,6)
plt.title('pXW')
plt.plot(np.arange(-hN, hN), np.unwrap(pX), 'c', lw=1.5)
plt.axis([-hN,hN,min(np.unwrap(pX)),max(np.unwrap(pX))])
plt.tight_layout()
plt.savefig('sine-spectrum.png')
plt.show()
| agpl-3.0 |
reinaldomaslim/Project_Bixi | bixi_nav/nodes/find_boxes.py | 2 | 3917 | #!/usr/bin/env python
""" Reinaldo
5-5-17
"""
import rospy
import actionlib
import numpy as np
import math
import cv2
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, Point, Quaternion, Twist, PoseStamped
from sensor_msgs.msg import RegionOfInterest, CameraInfo, LaserScan
from nav_msgs.msg import Odometry
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from visualization_msgs.msg import Marker
from sklearn.cluster import MeanShift, estimate_bandwidth
class IdentifyBox(object):
x0, y0, yaw0= 0, 0, 0
box_length=0.2
edges=[]
clustered_edges=[]
box=[]
n_edge=30
cam_angle=math.pi/2
def __init__(self, nodename):
rospy.init_node(nodename, anonymous=False)
self.initMarker()
rospy.Subscriber("/odometry/filtered", Odometry, self.odom_callback, queue_size = 50)
rospy.Subscriber("/edge", PoseStamped, self.edgeCallback, queue_size = 50)
self.box_pose_pub=rospy.Publisher("/box", PoseStamped, queue_size=10)
rate=rospy.Rate(10)
msg=PoseStamped()
msg.header.frame_id="odom"
while not rospy.is_shutdown():
if self.box not None:
for x in self.box:
msg.pose.position.x = x[0]
msg.pose.position.y = x[1]
q_angle = quaternion_from_euler(0, 0, x[2])
msg.pose.orientation = Quaternion(*q_angle)
self.box_pose_pub.publish(msg)
rate.sleep()
def edgeCallback(self, msg):
#for a detected edge, add it into the list. If list is full, replace the first element.
if len(self.edges)==n_edge:
#remove the first element
del self.edges[0]
_, _, yaw_angle = euler_from_quaternion((msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w))
self.edges.append([msg.pose.x, msg.pose.y, yaw_angle])
#perform clustering to edges
X=np.asarray(self.edges)
bandwidth = estimate_bandwidth(X, quantile=0.4)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
self.clustered_edges = ms.cluster_centers_
def symbolCallback(self, msg):
#msg.x is type 0-> left, 1->right, 2->hole
#msg.z is the symbol's center angle wrt camera's field of view.
#heading in rad, cameras field of view=78 deg, camera's center heading as cam_angle
sym_heading=self.yaw0+cam_angle-msg.z
#match symbol with existing edges
for edge in self.clustered_edges:
theta=math.atan2(edge[0]-self.x0, edge[1]-self.y0)
#if difference between theta and symbol heading within 5 degrees
if abs(sym_heading-theta)<5*math.pi/180:
#found the right edge, compute center of box
center_x=edge[0]-self.box_length*math.cos(edge[2])/2
center_y=edge[1]-self.box_length*math.sin(edge[2])/2
if msg.x==0:
#left
direction=edge[2]-math.pi/2
elif msg.x==1:
#right
direction=edge[2]+math.pi/2
elif msg.x==2:
#center
direction=edge[2]
self.box.append([center_x, center_y, direction])
break
def odom_callback(self, msg):
self.x0 = msg.pose.pose.position.x
self.y0 = msg.pose.pose.position.y
_, _, self.yaw0 = euler_from_quaternion((msg.pose.pose.orientation.x, msg.pose.pose.orientation.y, msg.pose.pose.orientation.z, msg.pose.pose.orientation.w))
self.odom_received = True
if __name__ == '__main__':
try:
IdentifyBox(nodename="identify_box")
except rospy.ROSInterruptException:
rospy.loginfo("Boxes detection finished.")
| gpl-3.0 |
jsfan/swim | scripts/epsilon-plot.py | 1 | 3640 | #!/usr/bin/python
# SWiM - a semi-Lagrangian, semi-implicit shallow water model in
# Cartesian coordiates
# Copyright (C) 2008-2012 Christian Lerrahn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# epsilon-plot.py
import getopt
import sys
import matplotlib
matplotlib.use('Agg')
import pylab as p
from Scientific.IO.NetCDF import *
from numpy import *
def usage():
print 'Usage: epsilon-plot.py [<options>] <NetCDF file>'
print "\t--minval\tMinimum value for colour scale"
print "\t--maxval\tMaximum value for colour scale"
print "\t-d,--dynamic\tOff-centring is dynamic => plot maks for every time step"
print "\t-t, --transpose\ttranspose value matrix before plotting"
print "\t-p, --prefix\tprefix for output files"
sys.exit(2)
try:
(opts,args) = getopt.getopt(sys.argv[1:],'tp:d',('minval=','maxval=','transpose','prefix=','dynamic'))
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
if len(args) < 1:
usage()
# get file name from cmd line
filename = args[0]
# load data from netCDF file
netcdf = NetCDFFile(filename,'r')
# read coordinate data
lat = netcdf.variables['latitude'].getValue()
# read dimensions
x = netcdf.dimensions['x']
y = netcdf.dimensions['y']
# read variable to be plotted
timevar = netcdf.variables['t'].getValue()
minval = 0
maxval = 0
tp = 0
prefix = ''
dyn = 0
for opt, arg in opts:
if opt == '--minval':
minval = int(arg)
elif opt == '--maxval':
maxval = int(arg)
elif opt in ('-t','--transpose'):
tp = 1
elif opt in ('-d','dynamic'):
dyn = 1
elif opt in ('--prefix','-p'):
prefix = arg + '-'
if tp:
print 'Value matrix will be transposed.'
print 'Boundaries for values are %0.2f and %0.2f' % (minval,maxval)
step = int(ceil((maxval-minval)/1024.0))
for i in range(1,4):
# read variable to be plotted
pvar = netcdf.variables['epsilon'+repr(i)].getValue()
timevar = netcdf.variables['t'].getValue()
maxt = len(pvar)
# colourbar range not set on cmdline
if (minval == maxval):
minval = int(floor(1.01*array(pvar).min()))
maxval = int(ceil(1.01*array(pvar).max()))
print 'Plotting epsilon'+repr(i)+'...'
if dyn == 1:
for t in range(maxt):
print 'Plotting time step '+repr(t)
p.imshow(pvar[t,:,:].transpose(),origin='lower',interpolation='nearest',vmin=minval,vmax=maxval)
p.colorbar(orientation="horizontal")
filename = "epsilon%04d.png" % (t)
ptime = "%0.3f" % (timevar[t]/3600)
p.title('epsilon'+repr(i)+' at t=' + ptime + 'h')
p.savefig(filename)
#p.show()
p.clf()
else:
print 'Plotting first epsilon'+repr(i)+' as global.'
p.imshow(pvar[0,:,:].transpose(),origin='lower',interpolation='nearest')
p.colorbar(orientation="horizontal")
filename = "epsilon%01d.png" % (i)
p.title('epsilon'+repr(i))
p.savefig(filename)
#p.show()
p.clf()
| gpl-3.0 |
saimn/glue | glue/_mpl_backend.py | 4 | 1130 | class MatplotlibBackendSetter(object):
"""
Import hook to make sure the proper Qt backend is set when importing
Matplotlib.
"""
enabled = True
def find_module(self, mod_name, pth=None):
if self.enabled and 'matplotlib' in mod_name:
self.enabled = False
set_mpl_backend()
def find_spec(self, name, import_path, target_module=None):
pass
def set_mpl_backend():
try:
from qtpy import PYQT5
except:
# If Qt isn't available, we don't have to worry about
# setting the backend
return
from matplotlib import rcParams, rcdefaults
# standardize mpl setup
rcdefaults()
if PYQT5:
rcParams['backend'] = 'Qt5Agg'
else:
rcParams['backend'] = 'Qt4Agg'
# The following is a workaround for the fact that Matplotlib checks the
# rcParams at import time, not at run-time. I have opened an issue with
# Matplotlib here: https://github.com/matplotlib/matplotlib/issues/5513
from matplotlib import get_backend
from matplotlib import backends
backends.backend = get_backend()
| bsd-3-clause |
alexvmarch/atomic | exatomic/core/atom.py | 3 | 13347 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Atomic Position Data
############################
This module provides a collection of dataframes supporting nuclear positions,
forces, velocities, symbols, etc. (all data associated with atoms as points).
"""
from numbers import Integral
import numpy as np
import pandas as pd
from exa import DataFrame, SparseDataFrame, Series
from exa.util.units import Length
from exatomic.base import sym2z, sym2mass
from exatomic.algorithms.distance import modv
from exatomic.core.error import PeriodicUniverseError
from exatomic.algorithms.geometry import make_small_molecule
class Atom(DataFrame):
"""
The atom dataframe.
+-------------------+----------+-------------------------------------------+
| Column | Type | Description |
+===================+==========+===========================================+
| x | float | position in x (req.) |
+-------------------+----------+-------------------------------------------+
| y | float | position in y (req.) |
+-------------------+----------+-------------------------------------------+
| z | float | position in z (req.) |
+-------------------+----------+-------------------------------------------+
| frame | category | non-unique integer (req.) |
+-------------------+----------+-------------------------------------------+
| symbol | category | element symbol (req.) |
+-------------------+----------+-------------------------------------------+
| fx | float | force in x |
+-------------------+----------+-------------------------------------------+
| fy | float | force in y |
+-------------------+----------+-------------------------------------------+
| fz | float | force in z |
+-------------------+----------+-------------------------------------------+
| vx | float | velocity in x |
+-------------------+----------+-------------------------------------------+
| vy | float | velocity in y |
+-------------------+----------+-------------------------------------------+
| vz | float | velocity in z |
+-------------------+----------+-------------------------------------------+
"""
_index = 'atom'
_cardinal = ('frame', np.int64)
_categories = {'symbol': str, 'set': np.int64, 'molecule': np.int64,
'label': np.int64}
_columns = ['x', 'y', 'z', 'symbol']
#@property
#def _constructor(self):
# return Atom
@property
def nframes(self):
"""Return the total number of frames in the atom table."""
return np.int64(self.frame.cat.as_ordered().max() + 1)
@property
def last_frame(self):
"""Return the last frame of the atom table."""
return self[self.frame == self.nframes - 1]
@property
def unique_atoms(self):
"""Return unique atom symbols of the last frame."""
return self.last_frame.symbol.unique()
def center(self, idx, frame=None):
"""Return a copy of a single frame of the atom table
centered around a specific atom index."""
if frame is None: frame = self.last_frame.copy()
else: frame = self[self.frame == frame].copy()
center = frame.ix[idx]
for r in ['x', 'y', 'z']:
if center[r] > 0: frame[r] = frame[r] - center[r]
else: frame[r] = frame[r] + np.abs(center[r])
return frame
def to_xyz(self, tag='symbol', header=False, comments='', columns=None,
frame=None, units='Angstrom'):
"""
Return atomic data in XYZ format, by default without the first 2 lines.
If multiple frames are specified, return an XYZ trajectory format. If
frame is not specified, by default returns the last frame in the table.
Args:
tag (str): column name to use in place of 'symbol'
header (bool): if True, return the first 2 lines of XYZ format
comment (str, list): comment(s) to put in the comment line
frame (int, iter): frame or frames to return
units (str): units (default angstroms)
Returns:
ret (str): XYZ formatted atomic data
"""
# TODO :: this is conceptually a duplicate of XYZ.from_universe
columns = (tag, 'x', 'y', 'z') if columns is None else columns
frame = self.nframes - 1 if frame is None else frame
if isinstance(frame, Integral): frame = [frame]
if not isinstance(comments, list): comments = [comments]
if len(comments) == 1: comments = comments * len(frame)
df = self[self['frame'].isin(frame)].copy()
if tag not in df.columns:
if tag == 'Z':
stoz = sym2z()
df[tag] = df['symbol'].map(stoz)
df['x'] *= Length['au', units]
df['y'] *= Length['au', units]
df['z'] *= Length['au', units]
grps = df.groupby('frame')
ret = ''
formatter = {tag: '{:<5}'.format}
stargs = {'columns': columns, 'header': False,
'index': False, 'formatters': formatter}
t = 0
for _, grp in grps:
if not len(grp): continue
tru = (header or comments[t] or len(frame) > 1)
hdr = '\n'.join([str(len(grp)), comments[t], '']) if tru else ''
ret = ''.join([ret, hdr, grp.to_string(**stargs), '\n'])
t += 1
return ret
def get_element_masses(self):
"""Compute and return element masses from symbols."""
return self['symbol'].astype('O').map(sym2mass)
def get_atom_labels(self):
"""
Compute and return enumerated atoms.
Returns:
labels (:class:`~exa.core.numerical.Series`): Enumerated atom labels (of type int)
"""
nats = self.cardinal_groupby().size().values
labels = Series([i for nat in nats for i in range(nat)], dtype='category')
labels.index = self.index
return labels
@classmethod
def from_small_molecule_data(cls, center=None, ligand=None, distance=None, geometry=None,
offset=None, plane=None, axis=None, domains=None, unit='Angstrom'):
'''
A minimal molecule builder for simple one-center, homogeneous ligand
molecules of various general chemistry molecular geometries. If domains
is not specified and geometry is ambiguous (like 'bent'),
it just guesses the simplest geometry (smallest number of domains).
Args
center (str): atomic symbol of central atom
ligand (str): atomic symbol of ligand atoms
distance (float): distance between central atom and any ligand
geometry (str): molecular geometry
domains (int): number of electronic domains
offset (np.array): 3-array of position of central atom
plane (str): cartesian plane of molecule (eg. for 'square_planar')
axis (str): cartesian axis of molecule (eg. for 'linear')
Returns
exatomic.atom.Atom: Atom table of small molecule
'''
return cls(make_small_molecule(center=center, ligand=ligand, distance=distance,
geometry=geometry, offset=offset, plane=plane,
axis=axis, domains=domains, unit=unit))
class UnitAtom(SparseDataFrame):
"""
In unit cell coordinates (sparse) for periodic systems. These coordinates
are used to update the corresponding :class:`~exatomic.atom.Atom` object
"""
_index = 'atom'
_columns = ['x', 'y', 'z']
#@property
#def _constructor(self):
# return UnitAtom
@classmethod
def from_universe(cls, universe):
if universe.periodic:
if "rx" not in universe.frame.columns:
universe.frame.compute_cell_magnitudes()
a, b, c = universe.frame[["rx", "ry", "rz"]].max().values
x = modv(universe.atom['x'].values, a)
y = modv(universe.atom['y'].values, b)
z = modv(universe.atom['z'].values, c)
df = pd.DataFrame.from_dict({'x': x, 'y': y, 'z': z})
df.index = universe.atom.index
df = df[universe.atom[['x', 'y', 'z']] != df].to_sparse()
return cls(df)
raise PeriodicUniverseError()
class ProjectedAtom(SparseDataFrame):
"""
Projected atom coordinates (e.g. on 3x3x3 supercell). These coordinates are
typically associated with their corresponding indices in another dataframe.
Note:
This table is computed when periodic two body properties are computed;
it doesn't have meaning outside of that context.
See Also:
:func:`~exatomic.two.compute_periodic_two`.
"""
_index = 'two'
_columns = ['x', 'y', 'z']
#@property
#def _constructor(self):
# return ProjectedAtom
class VisualAtom(SparseDataFrame):
"""
"""
_index = 'atom'
_columns = ['x', 'y', 'z']
@classmethod
def from_universe(cls, universe):
"""
"""
if universe.frame.is_periodic():
atom = universe.atom[['x', 'y', 'z']].copy()
atom.update(universe.unit_atom)
bonded = universe.atom_two.ix[universe.atom_two['bond'] == True, 'atom1'].astype(np.int64)
prjd = universe.projected_atom.ix[bonded.index].to_dense()
prjd['atom'] = bonded
prjd.drop_duplicates('atom', inplace=True)
prjd.set_index('atom', inplace=True)
atom.update(prjd)
atom = atom[atom != universe.atom[['x', 'y', 'z']]].to_sparse()
return cls(atom)
raise PeriodicUniverseError()
#@property
#def _constructor(self):
# return VisualAtom
class Frequency(DataFrame):
"""
The Frequency dataframe.
+-------------------+----------+-------------------------------------------+
| Column | Type | Description |
+===================+==========+===========================================+
| frame | category | non-unique integer (req.) |
+-------------------+----------+-------------------------------------------+
| frequency | float | frequency of oscillation (cm-1) (req.) |
+-------------------+----------+-------------------------------------------+
| freqdx | int | index of frequency of oscillation (req.) |
+-------------------+----------+-------------------------------------------+
| dx | float | atomic displacement in x direction (req.) |
+-------------------+----------+-------------------------------------------+
| dy | float | atomic displacement in y direction (req.) |
+-------------------+----------+-------------------------------------------+
| dz | float | atomic displacement in z direction (req.) |
+-------------------+----------+-------------------------------------------+
| symbol | str | atomic symbol (req.) |
+-------------------+----------+-------------------------------------------+
| label | int | atomic identifier |
+-------------------+----------+-------------------------------------------+
"""
#@property
#def _constructor(self):
# return Frequency
def displacement(self, freqdx):
return self[self['freqdx'] == freqdx][['dx', 'dy', 'dz', 'symbol']]
def add_vibrational_mode(uni, freqdx):
displacements = uni.frequency.displacements(freqdx)
if not all(displacements['symbol'] == uni.atom['symbol']):
print('Mismatch in ordering of atoms and frequencies.')
return
displaced = []
frames = []
# Should these only be absolute values?
factor = np.abs(np.sin(np.linspace(-4*np.pi, 4*np.pi, 200)))
for fac in factor:
moved = uni.atom.copy()
moved['x'] += displacements['dx'].values * fac
moved['y'] += displacements['dy'].values * fac
moved['z'] += displacements['dz'].values * fac
displaced.append(moved)
frames.append(uni.frame)
movie = pd.concat(displaced).reset_index()
movie['frame'] = np.repeat(range(len(factor)), len(uni.atom))
uni.frame = pd.concat(frames).reset_index()
uni.atom = movie
| apache-2.0 |
carlvlewis/bokeh | bokeh/util/serialization.py | 31 | 7419 | """ Functions for helping with serialization and deserialization of
Bokeh objects.
"""
from __future__ import absolute_import
from six import iterkeys
is_numpy = None
try:
import numpy as np
is_numpy = True
except ImportError:
is_numpy = False
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
import logging
log = logging.getLogger(__name__)
_simple_id = 1000
def make_id():
""" Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
"""
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def urljoin(*args):
""" Construct an absolute URL from several URL components.
Args:
*args (str) : URL components to join
Returns:
str : joined URL
"""
from six.moves.urllib.parse import urljoin as sys_urljoin
from functools import reduce
return reduce(sys_urljoin, args)
def get_json(response):
""" Unify retrieving JSON responses from different sources.
Works correctly for HTTP responses from requests <=1.0, >1.0, and
the Flask test client.
Args:
response (Flask or requests response) : a response to process
Returns:
JSON
"""
import json
try:
import flask
except ImportError:
flask = None
if flask and isinstance(response, flask.Response):
# flask testing
return json.loads(response.data.decode('utf-8'))
else:
# requests
if hasattr(response.json, '__call__'):
return response.json()
else:
return response.json
def dump(objs, docid, changed_only=True):
""" Serialize a sequence of Bokeh objects into JSON
Args:
objs (seq[obj]) : a sequence of Bokeh object to dump
docid (str) : an ID for a Bokeh Document to dump relative to
changed_only (bool, optional) : whether to dump only attributes
that have had their values changed at some point (default: True)
Returns:
list[json]
"""
json_objs = []
for obj in objs:
ref = obj.ref
ref["attributes"] = obj.vm_serialize(changed_only=changed_only)
ref["attributes"].update({"id": ref["id"], "doc" : docid})
json_objs.append(ref)
return json_objs
def is_ref(frag):
""" Test whether a given Bokeh object graph fragment is a reference.
A Bokeh "reference" is a ``dict`` with ``"type"`` and ``"id"`` keys.
Args:
frag (dict) : a fragment of a Bokeh object graph
Returns:
True, if the fragment is a reference, otherwise False
"""
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
""" Apply a function to JSON fragments that match the given predicate
and return the collected results.
Recursively traverses a nested collection of ``dict`` and ``list``,
applying ``check_func`` to each fragment. If True, then collect
``func(fragment)`` in the final output
Args:
fragment (JSON-like) : the fragment to apply ``func`` to recursively
check_func (callable) : the predicate to test fragments with
func (callable) : the conversion function to apply
Returns:
converted fragments
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment
def transform_series(obj):
"""transforms pandas series into array of values
"""
vals = obj.values
return transform_array(vals)
def transform_array(obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / 10**6.0).tolist()
else:
return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def traverse_data(datum, is_numpy=is_numpy, use_numpy=True):
"""recursively dig until a flat list is found
if numpy is available convert the flat list to a numpy array
and send off to transform_array() to handle nan, inf, -inf
otherwise iterate through items in array converting non-json items
Args:
datum (list) : a list of values or lists
is_numpy: True if numpy is present (see imports)
use_numpy: toggle numpy as a dependency for testing purposes
"""
is_numpy = is_numpy and use_numpy
if is_numpy and not any(isinstance(el, (list, tuple)) for el in datum):
return transform_array(np.asarray(datum))
datum_copy = []
for item in datum:
if isinstance(item, (list, tuple)):
datum_copy.append(traverse_data(item))
elif isinstance(item, float):
if np.isnan(item):
item = 'NaN'
elif np.isposinf(item):
item = 'Infinity'
elif np.isneginf(item):
item = '-Infinity'
datum_copy.append(item)
else:
datum_copy.append(item)
return datum_copy
def transform_column_source_data(data):
"""iterate through the data of a ColumnSourceData object replacing
non-JSON-compliant objects with compliant ones
"""
data_copy = {}
for key in iterkeys(data):
if is_pandas and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key])
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key])
else:
data_copy[key] = traverse_data(data[key])
return data_copy
| bsd-3-clause |
sanketloke/scikit-learn | sklearn/linear_model/least_angle.py | 6 | 57264 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..model_selection import check_cv
from ..exceptions import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..externals.six import string_types
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif isinstance(Gram, string_types) and Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by
# the test suite. The `equality_tolerance` margin added in 0.16
# to get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = self._preprocess_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv.split(X, y))
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
https://en.wikipedia.org/wiki/Akaike_information_criterion
https://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
Sparsh-Sharma/SteaPy | steapy/integral.py | 1 | 1066 | import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
def integral(x, y, panel, dxdk, dydk):
"""
Evaluates the contribution from a panel at a given point.
Parameters
----------
x: float
x-coordinate of the target point.
y: float
y-coordinate of the target point.
panel: Panel object
Panel whose contribution is evaluated.
dxdk: float
Value of the derivative of x in a certain direction.
dydk: float
Value of the derivative of y in a certain direction.
Returns
-------
Contribution from the panel at a given point (x, y).
"""
def integrand(s):
return ( ((x - (panel.xa - numpy.sin(panel.beta)*s))*dxdk
+(y - (panel.ya + numpy.cos(panel.beta)*s))*dydk)
/ ((x - (panel.xa - numpy.sin(panel.beta)*s))**2
+(y - (panel.ya + numpy.cos(panel.beta)*s))**2) )
return integrate.quad(integrand, 0.0, panel.length)[0]
| mit |
ChanChiChoi/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
eickenberg/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 32 | 2869 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
"""Test delayed input validation in fit (useful for grid search)."""
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
"""Assert that no zeros are materialized in the output."""
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
Habasari/sms-tools | software/models_interface/harmonicModel_function.py | 21 | 2884 | # function to call the main analysis/synthesis functions in software/models/harmonicModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import sineModel as SM
import harmonicModel as HM
def main(inputFile='../../sounds/vignesh.wav', window='blackman', M=1201, N=2048, t=-90,
minSineDur=0.1, nH=100, minf0=130, maxf0=300, f0et=7, harmDevSlope=0.01):
"""
Analysis and synthesis using the harmonic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics; minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics could have higher allowed deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# detect harmonics of input sound
hfreq, hmag, hphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)
# synthesize the harmonics
y = SM.sineModelSynth(hfreq, hmag, hphase, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_harmonicModel.wav'
# write the sound resulting from harmonic analysis
UF.wavwrite(y, fs, outputFile)
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the harmonic frequencies
plt.subplot(3,1,2)
if (hfreq.shape[1] > 0):
numFrames = hfreq.shape[0]
frmTime = H*np.arange(numFrames)/float(fs)
hfreq[hfreq<=0] = np.nan
plt.plot(frmTime, hfreq)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of harmonic tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
maciejkula/scipy | scipy/special/basic.py | 1 | 40201 | #
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt,
where, mgrid, sin, place, issubdtype, extract,
less, inexact, nan, zeros, atleast_1d, sinc)
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta,
hankel1, hankel2, yv, kv, gammaln, ndtri, errprint, poch,
binom)
from . import specfun
from . import orthogonal
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk', 'erf_zeros',
'erfcinv', 'erfinv', 'errprint', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
"""Warning that can be issued with ``errprint(True)``"""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def diric(x, n):
"""Return the periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where n is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8,8));
>>> for idx, n in enumerate([2,3,4,9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute nt (<=1200) zeros of the Bessel functions Jn and Jn'
and arange them in order of their magnitudes.
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n,m,t,zo = specfun.jdzo(nt)
return zo[1:nt+1],n[:nt],m[:nt],t[:nt]
def jnyn_zeros(n,nt):
"""Compute nt zeros of the Bessel functions Jn(x), Jn'(x), Yn(x), and
Yn'(x), respectively. Returns 4 arrays of length nt.
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n),nt)
def jn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn(x).
"""
return jnyn_zeros(n,nt)[0]
def jnp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn'(x).
"""
return jnyn_zeros(n,nt)[1]
def yn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn(x).
"""
return jnyn_zeros(n,nt)[2]
def ynp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn'(x).
"""
return jnyn_zeros(n,nt)[3]
def y0_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y0(z), z0, and the value
of Y0'(z0) = -Y1(z0) at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1(z), z1, and the value
of Y1'(z1) = Y0(z1) at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1p_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1'(z), z1', and the value
of Y1(z1') at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
# L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v,z,n=1):
"""Return the nth derivative of Jv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v,z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
# return (jvp(v-1,z,n-1) - jvp(v+1,z,n-1))/2.0
def yvp(v,z,n=1):
"""Return the nth derivative of Yv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v,z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
# return (yvp(v-1,z,n-1) - yvp(v+1,z,n-1))/2.0
def kvp(v,z,n=1):
"""Return the nth derivative of Kv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v,z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v,z,n=1):
"""Return the nth derivative of Iv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v,z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v,z,n=1):
"""Return the nth derivative of H1v(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v,z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
# return (h1vp(v-1,z,n-1) - h1vp(v+1,z,n-1))/2.0
def h2vp(v,z,n=1):
"""Return the nth derivative of H2v(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v,z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
# return (h2vp(v-1,z,n-1) - h2vp(v+1,z,n-1))/2.0
def sph_jn(n,z):
"""Compute the spherical Bessel function jn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)], jnp[:(n+1)]
def sph_yn(n,z):
"""Compute the spherical Bessel function yn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
return yn[:(n+1)], ynp[:(n+1)]
def sph_jnyn(n,z):
"""Compute the spherical Bessel functions, jn(z) and yn(z) and their
derivatives for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)],jnp[:(n+1)],yn[:(n+1)],ynp[:(n+1)]
def sph_in(n,z):
"""Compute the spherical Bessel function in(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,In,Inp = specfun.sphi(n1,z)
return In[:(n+1)], Inp[:(n+1)]
def sph_kn(n,z):
"""Compute the spherical Bessel function kn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,kn,knp = specfun.sphk(n1,z)
return kn[:(n+1)], knp[:(n+1)]
def sph_inkn(n,z):
"""Compute the spherical Bessel functions, in(z) and kn(z) and their
derivatives for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,In,Inp = specfun.sphi(n1,z)
nm,kn,knp = specfun.sphk(n1,z)
return In[:(n+1)],Inp[:(n+1)],kn[:(n+1)],knp[:(n+1)]
def riccati_jn(n,x):
"""Compute the Ricatti-Bessel function of the first kind and its
derivative for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm,jn,jnp = specfun.rctj(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def riccati_yn(n,x):
"""Compute the Ricatti-Bessel function of the second kind and its
derivative for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm,jn,jnp = specfun.rcty(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def erfinv(y):
"""
Inverse function for erf
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""
Inverse function for erfc
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of the error function erf(z).
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of the cosine Fresnel integral C(z).
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1,nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of the sine Fresnel integral S(z).
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of the sine and cosine Fresnel integrals
S(z) and C(z).
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt), specfun.fcszo(1,nt)
def hyp0f1(v, z):
r"""Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v,z) = \sum_{k=0}^{\inf}\frac{z^k}{(v)_k k!}.
It's also the limit as q -> infinity of ``1F1(q;v;z/q)``, and satisfies
the differential equation :math:`f''(z) + vf'(z) = f(z)`.
"""
v = atleast_1d(v)
z = atleast_1d(z)
v, z = np.broadcast_arrays(v, z)
arg = 2 * sqrt(abs(z))
old_err = np.seterr(all='ignore') # for z=0, a<1 and num=inf, next lines
num = where(z.real >= 0, iv(v - 1, arg), jv(v - 1, arg))
den = abs(z)**((v - 1.0) / 2)
num *= gamma(v)
np.seterr(**old_err)
num[z == 0] = 1
den[z == 0] = 1
return num / den
def assoc_laguerre(x, n, k=0.0):
"""Returns the n-th order generalized (associated) Laguerre polynomial.
The polynomial :math:`L^(alpha)_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**alpha`` with ``alpha > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function which is the nth derivative of the digamma (psi)
function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1,x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m,q):
"""Compute expansion coefficients for even Mathieu functions and
modified Mathieu functions.
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm = 17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m,q)
fc = specfun.fcoef(kd,m,q,a)
return fc[:km]
def mathieu_odd_coef(m,q):
"""Compute expansion coefficients for even Mathieu functions and
modified Mathieu functions.
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm = 17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m,q)
fc = specfun.fcoef(kd,m,q,b)
return fc[:km]
def lpmn(m,n,z):
"""Associated Legendre function of the first kind, Pmn(z)
Computes the associated Legendre function of the first kind
of order m and degree n,::
Pmn(z) = P_n^m(z)
and its derivative, ``Pmn'(z)``. Returns two arrays of size
``(m+1, n+1)`` containing ``Pmn(z)`` and ``Pmn'(z)`` for all
orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf,nf = mgrid[0:mp+1,0:n+1]
sv = errprint(0)
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf,0.0,(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf,0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p,pd = specfun.lpmn(mp,n,z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p,pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind, Pmn(z)
Computes the (associated) Legendre function of the first kind
of order m and degree n,::
Pmn(z) = P_n^m(z)
and its derivative, ``Pmn'(z)``. Returns two arrays of size
``(m+1, n+1)`` containing ``Pmn(z)`` and ``Pmn'(z)`` for all
orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf,nf = mgrid[0:mp+1,0:n+1]
sv = errprint(0)
if type == 2:
fixarr = where(mf > nf,0.0, (-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf,0.0,gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p,pd = specfun.clpmn(mp,n,real(z),imag(z),type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p,pd
def lqmn(m,n,z):
"""Associated Legendre functions of the second kind, Qmn(z) and its
derivative, ``Qmn'(z)`` of order m and degree n. Returns two
arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and ``Qmn'(z)`` for
all orders from ``0..m`` and degrees from ``0..n``.
z can be complex.
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1,m)
nn = max(1,n)
if iscomplex(z):
q,qd = specfun.clqmn(mm,nn,z)
else:
q,qd = specfun.lqmn(mm,nn,z)
return q[:(m+1),:(n+1)],qd[:(m+1),:(n+1)]
def bernoulli(n):
"""Return an array of the Bernoulli numbers B0..Bn
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Return an array of the Euler numbers E0..En (inclusive)
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n,z):
"""Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn,pd = specfun.clpn(n1,z)
else:
pn,pd = specfun.lpn(n1,z)
return pn[:(n+1)],pd[:(n+1)]
## lpni
def lqn(n,z):
"""Compute sequence of Legendre functions of the second kind,
Qn(z) and derivatives for all degrees from 0 to n (inclusive).
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn,qd = specfun.clqn(n1,z)
else:
qn,qd = specfun.lqnb(n1,z)
return qn[:(n+1)],qd[:(n+1)]
def ai_zeros(nt):
"""Compute the zeros of Airy Functions Ai(x) and Ai'(x), a and a'
respectively, and the associated values of Ai(a') and Ai'(a).
Returns
-------
a[l-1] -- the lth zero of Ai(x)
ap[l-1] -- the lth zero of Ai'(x)
ai[l-1] -- Ai(ap[l-1])
aip[l-1] -- Ai'(a[l-1])
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def bi_zeros(nt):
"""Compute the zeros of Airy Functions Bi(x) and Bi'(x), b and b'
respectively, and the associated values of Ai(b') and Ai'(b).
Returns
-------
b[l-1] -- the lth zero of Bi(x)
bp[l-1] -- the lth zero of Bi'(x)
bi[l-1] -- Bi(bp[l-1])
bip[l-1] -- Bi'(b[l-1])
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def lmbda(v,x):
"""Compute sequence of lambda functions with arbitrary order v
and their derivatives. Lv0(x)..Lv(x) are computed with v0=v-int(v).
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1,x)
else:
vm, vl, dl = specfun.lamn(v1,x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v,x):
"""Compute sequence of parabolic cylinder functions Dv(x) and
their derivatives for Dv0(x)..Dv(x) with v0=v-int(v).
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbdv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbvv_seq(v,x):
"""Compute sequence of parabolic cylinder functions Dv(x) and
their derivatives for Dv0(x)..Dv(x) with v0=v-int(v).
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbvv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbdn_seq(n,z):
"""Compute sequence of parabolic cylinder functions Dn(z) and
their derivatives for D0(z)..Dn(z).
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb,cpd = specfun.cpbdn(n1,z)
return cpb[:n1+1],cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber' x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei' x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker' x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei' x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,8)
def kelvin_zeros(nt):
"""Compute nt zeros of all the Kelvin functions returned in a
length 8 tuple of arrays of length nt.
The tuple containse the arrays of zeros of
(ber, bei, ker, kei, ber', bei', ker', kei')
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1), \
specfun.klvnzo(nt,2), \
specfun.klvnzo(nt,3), \
specfun.klvnzo(nt,4), \
specfun.klvnzo(nt,5), \
specfun.klvnzo(nt,6), \
specfun.klvnzo(nt,7), \
specfun.klvnzo(nt,8)
def pro_cv_seq(m,n,c):
"""Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,1)[1][:maxL]
def obl_cv_seq(m,n,c):
"""Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,-1)[1][:maxL]
def ellipk(m):
"""
Complete elliptic integral of the first kind
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
"""
return ellipkm1(1 - asarray(m))
def agm(a,b):
"""Arithmetic, Geometric Mean
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a,b)
agm(a,b)=agm(b,a)
agm(a,a) = a
min(a,b) < agm(a,b) < max(a,b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""
The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> sc.comb(n, k, exact=False)
array([ 120., 210.])
>>> sc.comb(10, 3, exact=True)
120L
>>> sc.comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
N = int(N)
k = int(k)
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
k,N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""
Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def factorial(n,exact=False):
"""
The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=False case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> arr = np.array([3,4,5])
>>> sc.factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> sc.factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0
val = 1
for k in xrange(1,n+1):
val *= k
return val
else:
n = asarray(n)
vals = gamma(n+1)
return where(n >= 0,vals,0)
def factorial2(n, exact=False):
"""
Double factorial.
This is the factorial with every second value skipped, i.e.,
``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n,0,-2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape,'d')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1,n)
evenn = extract(cond2,n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals,cond1,gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
place(vals,cond2,gamma(nd2e+1) * pow(2.0,nd2e))
return vals
def factorialk(n,k,exact=True):
"""
n(!!...!) = multifactorial of order k
k times
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multi factorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> sc.factorialk(5, 1, exact=True)
120L
>>> sc.factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n,0,-k):
val = val*j
return val
else:
raise NotImplementedError
| bsd-3-clause |
bmazin/SDR | DataReadout/ChannelizerControls/channelizerCustom.py | 1 | 51713 | import sys, os, random, math, array, fractions,time,datetime,pickle
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import socket
import matplotlib, corr, time, struct, numpy
from bitstring import BitArray
import matplotlib.pyplot as mpl
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from tables import *
from lib import iqsweep
#Things to update:
#DONE...make filename_NEW.txt only hold information for channel that is changed
#DONE...delete resonator (change FIR?)
#DONE...Do not add custom threshold when zooming or panning plot
#DONE...roughly calculate baseline from snapshot data and show on plot
#WORKING...show originally calculated median/threshold as faded line
#DONE...toggle button to save longsnapshot data
#DONE..read pulses does not write to a "sean" directory; plot histograms of peak heights
# Added logic to dump threshold information to pkl file after '(4)load thresholds' button pushed.
#
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Channelizer 2')
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.dacStatus = 'off'
self.dramStatus = 'off'
self.tapStatus = 'off'
self.socketStatus = 'off'
self.numFreqs=0
self.ch_all = []
self.attens = numpy.array([1. for i in range(256)])
self.freqRes = 7812.5
self.sampleRate = 512e6
self.zeroChannels = [0]*256
#writing threshold to register
self.thresholds, self.medians = numpy.array([0.]*256), numpy.array([0.]*256)
self.customThresholds = numpy.array([360.]*256)
self.customResonators=numpy.array([[0.0,-1]]*256) #customResonator[ch]=[freq,atten]
self.lastNoiseFFT = None
self.lastNoiseFFTFreqs = None
def openClient(self):
self.roach = corr.katcp_wrapper.FpgaClient(self.textbox_roachIP.text(),7147)
time.sleep(2)
self.status_text.setText('connection established')
print 'Connected to ',self.textbox_roachIP.text()
self.button_openClient.setDisabled(True)
def loadFIRcoeffs(self):
N_freqs = len(map(float, unicode(self.textedit_DACfreqs.toPlainText()).split()))
taps = 26
for ch in range(N_freqs):
# If the resonator's attenuation is >=99 then its FIR should be zeroed
if self.zeroChannels[ch]:
fir = numpy.array([0.]*taps)*(2**11-1)
print 'deleted ch ',ch
else:
if numpy.ndim(self.fir) == 1:
fir = numpy.array(self.fir)*(2**11-1)
else:
fir = numpy.array(self.fir[ch])*(2**11-1)
print ch
#fir = numpy.array([1.]+[0]*(taps-1))*(2**11-1)
# 26 tap, 25 us matched fir
#fir = numpy.array([0.0875788844768 , 0.0840583257978 , 0.0810527406206 , 0.0779008825067 , 0.075106964962 , 0.0721712998256 , 0.0689723729398 , 0.066450095496 , 0.0638302570705 , 0.0613005685486 , 0.0589247737004 , 0.0565981917436 , 0.0544878914297 , 0.0524710948658 , 0.0503447054014 , 0.0483170854189 , 0.0463121066637 , 0.044504238059 , 0.0428469827102 , 0.0410615366471 , 0.0395570640218 , 0.0380071830756 , 0.0364836787854 , 0.034960959124 , 0.033456372241 , 0.0321854467182])*(2**11-1)
#26 tap, 20 us matched fir
#fir = numpy.array([ 0.102806030245 , 0.097570344415 , 0.0928789946181 , 0.0885800360545 , 0.0841898850361 , 0.079995145104 , 0.0761649967857 , 0.0724892663141 , 0.0689470889358 , 0.0657584886557 , 0.0627766233242 , 0.0595952531565 , 0.0566356208278 , 0.053835736579 , 0.0510331408751 , 0.048623806127 , 0.0461240096904 , 0.0438134132285 , 0.0418265743203 , 0.0397546477453 , 0.0377809254888 , 0.0358044897245 , 0.0338686929847 , 0.0321034547839 , 0.0306255734188 , 0.0291036235859 ])*(2**11-1)
#26 tap, 30 us matched fir
#fir = numpy.array([ 0.0781747107378 , 0.0757060398243 , 0.0732917718492 , 0.0708317694778 , 0.0686092845217 , 0.0665286923521 , 0.0643467681477 , 0.0621985982971 , 0.0600681642401 , 0.058054873199 , 0.0562486467178 , 0.0542955553149 , 0.0527148880657 , 0.05096365681 , 0.0491121116212 , 0.0474936094733 , 0.0458638771941 , 0.0443219286645 , 0.0429290438102 , 0.0415003391096 , 0.0401174498302 , 0.0386957715665 , 0.0374064708747 , 0.0362454802408 , 0.0350170176804 , 0.033873302383 ])*(2**11-1)
#fir = fir[::-1]
# 26 tap, fir, 250 kHz,
#fir = numpy.array([-0 , 0.000166959420533 , 0.00173811663844 , 0.00420937801998 , 0.00333739357391 , -0.0056305703275 , -0.0212738104942 , -0.0318529375832 , -0.0193635986879 , 0.0285916612022 , 0.106763943766 , 0.18981814328 , 0.243495321192 , 0.243495321192 , 0.18981814328 , 0.106763943766 , 0.0285916612022 , -0.0193635986879 , -0.0318529375832 , -0.0212738104942 , -0.0056305703275 , 0.00333739357391 , 0.00420937801998 , 0.00173811663844 , 0.000166959420533 , -0])*(2**11-1)
# 26 tap, fir, 125 kHz.
#fir = numpy.array([0 , -0.000431898216436 , -0.00157886921107 , -0.00255492263971 , -0.00171727439076 , 0.00289724121972 , 0.0129123447233 , 0.0289345497995 , 0.0500906370566 , 0.0739622085341 , 0.0969821586979 , 0.115211955161 , 0.125291869266 , 0.125291869266 , 0.115211955161 , 0.0969821586979 , 0.0739622085341 , 0.0500906370566 , 0.0289345497995 , 0.0129123447233 , 0.00289724121972 , -0.00171727439076 , -0.00255492263971 , -0.00157886921107 , -0.000431898216436 , -0])*(2**11-1)
# Generic 40 tap matched filter for 25 us lifetime pulse
#fir = numpy.array([0.153725595011 , 0.141052390733 , 0.129753816201 , 0.119528429291 , 0.110045314901 , 0.101336838027 , 0.0933265803805 , 0.0862038188673 , 0.0794067694409 , 0.0729543134914 , 0.0674101836798 , 0.0618283869464 , 0.0567253144676 , 0.0519730940444 , 0.047978953698 , 0.043791412767 , 0.0404560656757 , 0.0372466775252 , 0.0345000956808 , 0.0319243455811 , 0.0293425115323 , 0.0268372778298 , 0.0245216835234 , 0.0226817116475 , 0.0208024488535 , 0.0189575043357 , 0.0174290665862 , 0.0158791788119 , 0.0144611054123 , 0.0132599563305 , 0.0121083419203 , 0.0109003580368 , 0.0100328742978 , 0.00939328253743 , 0.00842247241585 , 0.00789304712484 , 0.00725494259117 , 0.00664528407122 , 0.00606688645845 , 0.00552041438208])*(2**11-1)
#fir = fir[::-1]
for n in range(taps/2):
coeff0 = int(fir[2*n])
coeff1 = int(fir[2*n+1])
coeff0 = numpy.binary_repr(int(fir[2*n]), 12)
coeff1 = numpy.binary_repr(int(fir[2*n+1]), 12)
coeffs = int(coeff1+coeff0, 2)
coeffs_bin = struct.pack('>l', coeffs)
register_name = 'FIR_b' + str(2*n) + 'b' + str(2*n+1)
self.roach.write(register_name, coeffs_bin)
self.roach.write_int('FIR_load_coeff', (ch<<1) + (1<<0))
self.roach.write_int('FIR_load_coeff', (ch<<1) + (0<<0))
# Inactive channels will also be zeroed.
fir = numpy.array([0.]*taps)
for ch in range(N_freqs, 256):
print ch,'deleted'
for n in range(taps/2):
#coeffs = struct.pack('>h', fir[2*n]) + struct.pack('>h', fir[2*n+1])
coeffs = struct.pack('>h', fir[2*n+1]) + struct.pack('>h', fir[2*n])
register_name = 'FIR_b' + str(2*n) + 'b' + str(2*n+1)
self.roach.write(register_name, coeffs)
self.roach.write_int('FIR_load_coeff', (ch<<1) + (1<<0))
self.roach.write_int('FIR_load_coeff', (ch<<1) + (0<<0))
print 'done loading fir.'
self.status_text.setText('FIRs loaded')
def find_nearest(self, array, value):
idx=(numpy.abs(array-value)).argmin()
return idx
def loadCustomThresholds(self):
freqFile =str(self.textbox_freqFile.text())
if freqFile[-8:] == '_NEW.txt':
freqFile=freqFile[:-8]+'_THRESHOLD.txt'
else:
freqFile=freqFile[:-4]+'_THRESHOLD.txt'
try:
x=numpy.loadtxt(freqFile)
self.customThresholds = numpy.array([360.]*256)
if type(x[0]) == numpy.ndarray:
for arr in x:
self.customThresholds[int(arr[0])]=arr[1]
else:
self.customThresholds[int(x[0])]=x[1]
print 'Custom Thresholds loaded from',freqFile
except IOError:
#No custom thresholds to load
pass
def rmCustomThreshold(self):
ch = int(self.textbox_channel.text())
if self.customThresholds[ch] != 360.0:
self.customThresholds[ch]=360.0
print "Custom Threshold from channel",ch,"removed."
#self.loadSingleThreshold(ch)
scale_to_angle = 360./2**16*4/numpy.pi
self.roach.write_int('capture_threshold', int(self.thresholds[ch]/scale_to_angle))
self.roach.write_int('capture_load_thresh', (ch<<1)+(1<<0))
self.roach.write_int('capture_load_thresh', (ch<<1)+(0<<0))
print "Old threshold updated to roach"
freqFile =str(self.textbox_freqFile.text())
if freqFile[-8:] == '_NEW.txt':
freqFile=freqFile[:-8]+'_THRESHOLD.txt'
else:
freqFile=freqFile[:-4]+'_THRESHOLD.txt'
try:
x=numpy.loadtxt(freqFile)
f=open(freqFile,'w')
if type(x[0]) == numpy.ndarray:
for arr in x:
#print 'arr',arr
if arr[0]!=ch:
f.write(str(int(arr[0]))+'\t'+str(float(arr[1]))+'\n')
else:
if x[0]!=ch:
f.write(str(int(x[0]))+'\t'+str(float(x[1]))+'\n')
print "Removed Custom Threshold on channel ",ch," from ",freqFile
self.status_text.setText("Removed Custom Threshold on channel "+str(ch)+" from "+str(freqFile))
f.close()
except IOError:
print 'Unable to remove custom threshold from channel',ch
else:
print "No custom threshold set for channel",ch
def setCustomThreshold(self,event):
scale_to_angle = 360./2**16*4/numpy.pi
ch = int(self.textbox_channel.text())
newThreshold = event.ydata
#print "Threshold selected:",newThreshold
if event.ydata != None and self.mpl_toolbar.mode == '':
self.loadSingleThreshold(ch) #resets median
newThreshold = newThreshold - self.medians[ch] #for threshold adjusting firmware only!
self.customThresholds[ch] = newThreshold
newThreshold = int(newThreshold/scale_to_angle)
#print "writing threshold to register:",newThreshold
#print "median for channel ",ch,": ",self.medians[ch]
#self.customThresholds[ch] = newThreshold
#print "new threshold: ",newThreshold
#print "old threshold: ",self.thresholds[ch]/scale_to_angle
self.roach.write_int('capture_threshold', newThreshold)
self.roach.write_int('capture_load_thresh', (ch<<1)+(1<<0))
self.roach.write_int('capture_load_thresh', (ch<<1)+(0<<0))
print "channel: ", ch, "median: ", self.medians[ch], "new threshold: ", scale_to_angle*newThreshold
#print self.customThresholds[ch]
#self.loadSingleThreshold(ch)
freqFile =str(self.textbox_freqFile.text())
if freqFile[-8:] == '_NEW.txt':
freqFile=freqFile[:-8]+'_THRESHOLD.txt'
else:
freqFile=freqFile[:-4]+'_THRESHOLD.txt'
try:
f=open(freqFile,'a')
f.write(str(int(ch))+'\t'+str(float(self.customThresholds[ch]))+'\n')
f.close()
print 'Saved custom threshold to',freqFile
except IOError:
print 'ERROR: There was a problem saving thresholds to',freqFile
def loadThresholds(self):
""" Takes two time streams and concatenates together for a longer sample.
median is used instead of mean.
"""
x = raw_input('Is the lamp off? ')
Nsigma = float(self.textbox_Nsigma.text())
N_freqs = len(map(float, unicode(self.textedit_DACfreqs.toPlainText()).split()))
self.thresholds, self.medians = numpy.array([0.]*N_freqs), numpy.array([0.]*N_freqs)
#for ch in range(N_freqs):
#print 'attempting to load channel: ',ch
# self.loadSingleThreshold(ch)
steps = int(self.textbox_timeLengths.text())
L = 2**10
scale_to_angle = 360./2**16*4/numpy.pi
threshInfo = {}
threshInfo['roachNum'] = roachNum
threshInfo['scaleToAngle'] = scale_to_angle
threshInfo['N_freqs'] = N_freqs
now = datetime.datetime.now()
threshInfo['now'] = now
threshInfo['nowAscii'] = datetime.datetime.strftime(now,"%c")
threshInfo['phase'] = {}
threshInfo['phaseHg'] = {}
threshInfo['phaseBins'] = {}
for ch in range(N_freqs):
bin_data_phase = ''
for n in range(steps):
self.roach.write_int('ch_we', ch)
self.roach.write_int('startSnap', 0)
self.roach.write_int('snapPhase_ctrl', 1)
self.roach.write_int('snapPhase_ctrl', 0)
self.roach.write_int('startSnap', 1)
time.sleep(0.001)
bin_data_phase = bin_data_phase + self.roach.read('snapPhase_bram', 4*L)
phase = []
for m in range(steps*L):
phase.append(struct.unpack('>h', bin_data_phase[m*4+2:m*4+4])[0])
phase.append(struct.unpack('>h', bin_data_phase[m*4+0:m*4+2])[0])
phase = numpy.array(phase)
threshInfo['phase'][ch] = phase.copy()
#phase_avg = numpy.median(self.phase)
#sigma = self.phase.std()
n,bins= numpy.histogram(phase,bins=100)
threshInfo['phaseHg'][ch] = n.copy()
threshInfo['phaseBins'][ch] = bins.copy()
n = numpy.array(n,dtype='float32')/numpy.sum(n)
tot = numpy.zeros(len(bins))
for i in xrange(len(bins)):
tot[i] = numpy.sum(n[:i])
bins1 = .5*(bins[1:]+bins[:-1])
med = bins[self.find_nearest(tot,0.5)]
thresh = bins[self.find_nearest(tot,0.05)]
#threshold = int(med-Nsigma*abs(med-thresh))
threshold = int(-Nsigma*abs(med-thresh)) #for threshold adjusting firmware!
#threshold = int((phase_avg - Nsigma*sigma))
# -25736 = -180 degrees
if threshold < -25736:
threshold = -25736
self.thresholds[ch] = scale_to_angle*threshold
self.medians[ch] = scale_to_angle*med
if self.customThresholds[ch] != 360.0:
threshold = self.customThresholds[ch]/scale_to_angle
if threshold < -25736:
threshold = -25736
print 'Channel '+str(ch)+' has a custom threshold'
self.roach.write_int('capture_threshold', threshold)
self.roach.write_int('capture_load_thresh', (ch<<1)+(1<<0))
self.roach.write_int('capture_load_thresh', (ch<<1)+(0<<0))
print "channel: ", ch, "median: ", scale_to_angle*med, "threshold: ", scale_to_angle*threshold
#print "channel: ", ch, "avg: ", scale_to_angle*phase_avg, "sigma: ", scale_to_angle*sigma, "threshold: ", scale_to_angle*threshold
threshInfo['medians'] = self.medians.copy()
threshInfo['thresholds'] = self.thresholds.copy()
nowStr = datetime.datetime.strftime(now,"%Y%m%d-%H%M%S")
pklFileName = "thresh_%d_%s.pkl"%(roachNum,nowStr)
print 'Dump threshold infomation to ',os.path.join(os.environ['MKID_DATA_DIR'],pklFileName)
pickle.dump(threshInfo,open(os.path.join(os.environ['MKID_DATA_DIR'],pklFileName),'wb'))
self.status_text.setText('Thresholds loaded and written to %s'%pklFileName)
print 'Done loading thresholds'
def loadSingleThreshold(self,ch):
#print 'ch: ',ch
L = 2**10
scale_to_angle = 360./2**16*4/numpy.pi
Nsigma = float(self.textbox_Nsigma.text())
bin_data_phase = ''
steps = int(self.textbox_timeLengths.text())
for n in range(steps):
self.roach.write_int('ch_we', ch)
self.roach.write_int('startSnap', 0)
self.roach.write_int('snapPhase_ctrl', 1)
self.roach.write_int('snapPhase_ctrl', 0)
self.roach.write_int('startSnap', 1)
time.sleep(0.001)
bin_data_phase = bin_data_phase + self.roach.read('snapPhase_bram', 4*L)
phase = []
for m in range(steps*L):
phase.append(struct.unpack('>h', bin_data_phase[m*4+2:m*4+4])[0])
phase.append(struct.unpack('>h', bin_data_phase[m*4+0:m*4+2])[0])
phase = numpy.array(phase)
#phase_avg = numpy.median(phase)
#sigma = phase.std()
n,bins= numpy.histogram(phase,bins=100)
n = numpy.array(n,dtype='float32')/numpy.sum(n)
tot = numpy.zeros(len(bins))
for i in xrange(len(bins)):
tot[i] = numpy.sum(n[:i])
bins1 = .5*(bins[1:]+bins[:-1])
med = bins[self.find_nearest(tot,0.5)]
thresh = bins[self.find_nearest(tot,0.05)]
#threshold = int(med-Nsigma*abs(med-thresh))
threshold = int(-Nsigma*abs(med-thresh)) #for threshold adjusting firmware!
#threshold = int((phase_avg - Nsigma*sigma))
# -25736 = -180 degrees
if threshold < -25736:
threshold = -25736
self.thresholds[ch] = scale_to_angle*threshold
self.medians[ch] = scale_to_angle*med
if self.customThresholds[ch] != 360.0:
threshold = self.customThresholds[ch]/scale_to_angle
if threshold < -25736:
threshold = -25736
print 'Channel '+str(ch)+' has a custom threshold'
self.roach.write_int('capture_threshold', threshold)
self.roach.write_int('capture_load_thresh', (ch<<1)+(1<<0))
self.roach.write_int('capture_load_thresh', (ch<<1)+(0<<0))
print "channel: ", ch, "median: ", scale_to_angle*med, "threshold: ", scale_to_angle*threshold
#print "channel: ", ch, "avg: ", scale_to_angle*phase_avg, "sigma: ", scale_to_angle*sigma, "threshold: ", scale_to_angle*threshold
def snapshot(self):
self.displayResonatorProperties()
ch_we = int(self.textbox_channel.text())
self.roach.write_int('ch_we', ch_we)
#print self.roach.read_int('ch_we')
steps = int(self.textbox_snapSteps.text())
L = 2**10
bin_data_phase = ''
for n in range(steps):
self.roach.write_int('startSnap', 0)
self.roach.write_int('snapPhase_ctrl', 1)
self.roach.write_int('snapPhase_ctrl', 0)
self.roach.write_int('startSnap', 1)
time.sleep(0.001)
bin_data_phase = bin_data_phase + self.roach.read('snapPhase_bram', 4*L)
phase = []
for m in range(steps*L):
phase.append(struct.unpack('>h', bin_data_phase[m*4+2:m*4+4])[0])
phase.append(struct.unpack('>h', bin_data_phase[m*4+0:m*4+2])[0])
phase = numpy.array(phase)*360./2**16*4/numpy.pi
self.axes1.clear()
#self.axes1.plot(phase, '.-', [self.thresholds[ch_we]]*2*L*steps, 'r.', [self.medians[ch_we]]*2*L*steps, 'g.')
if steps <= 1000:
self.axes1.plot(phase,'.-')
med=numpy.median(phase)
print 'ch:',ch_we,'median:',med,
thresh=self.thresholds[ch_we]
if self.customThresholds[ch_we] != 360.0:
thresh=self.customThresholds[ch_we]
print "Custom Threshold: ", thresh,
self.axes1.plot([thresh+med]*2*L*steps,'r.',[med]*2*L*steps,'g.',alpha=1)
med=self.medians[ch_we]
self.axes1.plot([thresh+med]*2*L*steps,'y.',[med]*2*L*steps,'y.',alpha=0.2)
print "Threshold: ",self.thresholds[ch_we]
if numpy.ndim(self.fir) == 2:
self.axes0.clear()
self.axes0.plot(self.fir[ch_we])
#print "Channel: ",ch_we," median: " ,self.medians[ch_we],
#if self.customThresholds[ch_we] != 360.0:
# self.axes1.plot(phase, '.-', [self.customThresholds[ch_we]+self.medians[ch_we]]*2*L*steps, 'r.', [self.medians[ch_we]]*2*L*steps, 'g.',alpha=0.3)
# print "Custom Threshold: ",self.customThresholds[ch_we]," Threshold: ",self.thresholds[ch_we]
#else:
# self.axes1.plot(phase, '.-', [self.thresholds[ch_we]+self.medians[ch_we]]*2*L*steps, 'r.', [self.medians[ch_we]]*2*L*steps, 'g.',alpha=0)
# print "Threshold: ",self.thresholds[ch_we]
#print " "
self.canvas.draw()
print "snapshot taken"
def longsnapshot(self):
self.longsnapshotInfo = None
self.displayResonatorProperties()
ch_we = int(self.textbox_channel.text())
startTime = int(numpy.floor(time.time()))
self.roach.write_int('ch_we', ch_we)
#print self.roach.read_int('ch_we')
steps = int(self.textbox_longsnapSteps.text())
L = 2**10
numQDRSamples=2**19
numBytesPerSample=4
nLongsnapSamples = numQDRSamples*2*steps # 2 16-bit samples per 32-bit QDR word
bin_data_phase = ''
qdr_data_str = ''
for n in range(steps):
print 'starting sec snap'
self.roach.write_int('snapPhase_ctrl', 0)
self.roach.write_int('snapqdr_ctrl',0)
self.roach.write_int('startSnap', 0)
self.roach.write_int('snapqdr_ctrl',1)
self.roach.write_int('snapPhase_ctrl', 1)
self.roach.write_int('startSnap', 1)
time.sleep(2)
bin_data_phase = bin_data_phase + self.roach.read('snapPhase_bram', 4*L)
qdr_data_str = qdr_data_str + self.roach.read('qdr0_memory',numQDRSamples*numBytesPerSample)
print '1 sec read'
self.roach.write_int('snapPhase_ctrl', 0)
self.roach.write_int('snapqdr_ctrl',0)
self.roach.write_int('startSnap', 0)
phase = []
print "before m loop: steps=",steps," L=",L
for m in range(steps*L):
phase.append(struct.unpack('>h', bin_data_phase[m*4+2:m*4+4])[0])
phase.append(struct.unpack('>h', bin_data_phase[m*4+0:m*4+2])[0])
print "after m loop: len(phase) = ",len(phase)
if steps > 1:
dir = os.environ['MKID_DATA_DIR']
fname =os.path.join(dir,'ch_snap_r%dp%d_%dsecs.dat'%(roachNum,ch_we,steps))
file = open(fname,'w')
file.write(qdr_data_str)
print 'raw values saved to',fname
# qdr_values = struct.unpack('>%dh'%(nLongsnapSamples),qdr_data_str)
#
# fname =os.path.join(dir,'ch_snap_r%dp%d_%dsecs.bin'%(roachNum,ch_we,steps))
# numpy.savetxt(fname,qdr_values,fmt='%u')
# print 'unpacked values saved to',fname
# qdr_phase_values = numpy.array(qdr_values)*360./2**16*4/numpy.pi
# fname =os.path.join(dir,'ch_snap_r%dp%d_%dsecs.txt'%(roachNum,ch_we,steps))
# numpy.savetxt(fname,qdr_phase_values,fmt='%f')
# print 'phase values saved to',fname
if steps < 2:
print 'unpacking'
qdr_values = struct.unpack('>%dh'%(nLongsnapSamples),qdr_data_str)
print 'unpacked'
if steps < 2:
qdr_phase_values = numpy.array(qdr_values,dtype=numpy.float32)*360./2**16*4/numpy.pi
phase = numpy.array(phase)*360./2**16*4/numpy.pi
self.axes1.clear()
#self.axes1.plot(phase, '.-', [self.thresholds[ch_we]]*2*L*steps, 'r.', [self.medians[ch_we]]*2*L*steps, 'g.')
med=numpy.median(phase)
print 'ch:',ch_we,'median:',med,
thresh=self.thresholds[ch_we]
if self.customThresholds[ch_we] != 360.0:
thresh=self.customThresholds[ch_we]
print "Custom Threshold: ", thresh,
print "Threshold: ",self.thresholds[ch_we]
self.axes1.plot(qdr_phase_values,'b-')
self.axes1.plot([thresh+med]*2*numQDRSamples*steps,'r-',[med]*2*numQDRSamples*steps,'g-',alpha=1)
med=self.medians[ch_we]
self.axes1.plot([thresh+med]*2*numQDRSamples*steps,'y-',[med]*2*numQDRSamples*steps,'y-',alpha=0.2)
nFFTAverages = 100
nSamplesPerFFT = nLongsnapSamples/nFFTAverages
noiseFFT = numpy.zeros(nSamplesPerFFT)
noiseFFTFreqs = numpy.fft.fftfreq(nSamplesPerFFT)
for iAvg in xrange(nFFTAverages):
noise = numpy.fft.fft(qdr_phase_values[iAvg*nSamplesPerFFT:(iAvg+1)*nSamplesPerFFT])
noise = numpy.abs(noise)**2
noiseFFT += noise
noiseFFT /= nFFTAverages
self.axes0.clear()
nFFTfreqs = len(noiseFFTFreqs)/2
noiseFFTFreqs *= 1e6 #convert MHz to Hz
self.axes0.loglog(noiseFFTFreqs[1:nFFTfreqs],noiseFFT[1:nFFTfreqs])
if (self.lastNoiseFFT != None):
self.axes0.loglog(self.lastNoiseFFTFreqs[1:nFFTfreqs],self.lastNoiseFFT[1:nFFTfreqs],'c',alpha=0.6)
self.lastNoiseFFTFreqs = noiseFFTFreqs
self.lastNoiseFFT = noiseFFT
self.axes0.set_xlabel('Freq (Hz)')
self.axes0.set_ylabel('FFT of snapshot, nAverages=%d'%nFFTAverages)
self.canvas.draw()
self.longsnapshotInfo = {"channel":ch_we,"startTime":startTime,"phase":qdr_phase_values}
self.writeSnapshotData()
print "longsnapshot taken"
def readPulses(self):
scale_to_degrees = 360./2**12*4/numpy.pi
channel_count = [0]*256
p1 = [[] for n in range(256)]
timestamp = [[] for n in range(256)]
baseline = [[] for n in range(256)]
peaks = [[] for n in range(256)]
seconds = int(self.textbox_seconds.text())
nStepsPerSec = 10.
steps = int(seconds*nStepsPerSec)
self.roach.write_int('startBuffer', 1)
time.sleep(1)
for n in range(steps):
addr0 = self.roach.read_int('pulses_addr')
time.sleep(1./nStepsPerSec)
addr1 = self.roach.read_int('pulses_addr')
bin_data_0 = self.roach.read('pulses_bram0', 4*2**14)
bin_data_1 = self.roach.read('pulses_bram1', 4*2**14)
if addr1 >= addr0:
total_counts = addr1-addr0
for n in range(addr0, addr1):
raw_data_1 = struct.unpack('>L', bin_data_1[n*4:n*4+4])[0]
raw_data_0 = struct.unpack('>L', bin_data_0[n*4:n*4+4])[0]
ch = raw_data_1/2**24
channel_count[ch] = channel_count[ch] + 1
timestamp[ch].append(raw_data_0%2**20)
baseline[ch].append((raw_data_0>>20)%2**12)
peaks[ch].append((raw_data_1>>12)%2**12)
else:
total_counts = addr1+2**14-addr0
for n in range(addr0, 2**14):
raw_data_1 = struct.unpack('>L', bin_data_1[n*4:n*4+4])[0]
raw_data_0 = struct.unpack('>L', bin_data_0[n*4:n*4+4])[0]
ch = raw_data_1/2**24
channel_count[ch] = channel_count[ch] + 1
p1[ch].append((raw_data_1%2**12 - 2**11)*scale_to_degrees)
timestamp[ch].append(raw_data_0%2**20)
baseline[ch].append((raw_data_0>>20)%2**12)
peaks[ch].append((raw_data_1>>12)%2**12)
for n in range(0, addr1):
raw_data_1 = struct.unpack('>L', bin_data_1[n*4:n*4+4])[0]
raw_data_0 = struct.unpack('>L', bin_data_0[n*4:n*4+4])[0]
ch = raw_data_1/2**24
channel_count[ch] = channel_count[ch] + 1
p1[ch].append((raw_data_1%2**12 - 2**11)*scale_to_degrees)
timestamp[ch].append(raw_data_0%2**20)
baseline[ch].append((raw_data_0>>20)%2**12)
peaks[ch].append((raw_data_1>>12)%2**12)
print total_counts
self.roach.write_int('startBuffer', 0)
print 'sorted indices by counts', numpy.argsort(channel_count)[::-1]
print 'sorted by counts', numpy.sort(channel_count)[::-1]
print 'total counts by channel: ',channel_count
channel_count = numpy.array(channel_count)
ch = int(self.textbox_channel.text())
#numpy.savetxt('/home/sean/data/restest/test.dat', p1[ch])
# With lamp off, run "readPulses." If count rate is above 50, it's anamolous
# and it's FIR should be set to 0.
#for n in range(256):
# if channel_count[n] > 100:
# self.zeroChannels[n] = 1
#x = numpy.arange(-270, 0, 3)
#y = numpy.histogram(data, 90)
base = numpy.array(baseline[ch],dtype='float')
base = base/2.0**9-4.0
base = base*180.0/numpy.pi
times = numpy.array(timestamp[ch],dtype='float')/1e6
peaksCh = numpy.array(peaks[ch],dtype = 'float')
peaksCh = peaksCh/2.0**9-4.0
peaksCh = peaksCh*180.0/numpy.pi
peaksSubBase = peaksCh-base
print 'count rate:',len(base)
print 'mean baseline:',numpy.mean(base)
print 'median baseline:',numpy.median(base)
print 'stddev baseline:',numpy.std(base)
self.axes0.clear()
#self.axes0.plot(timestamp[ch], '.')
self.axes0.plot(times,base, 'g.')
self.axes0.plot(times,peaksCh, 'b.')
self.axes0.plot(times,peaksSubBase, 'r.')
labels = self.axes0.get_xticklabels()
for label in labels:
label.set_rotation(-90)
self.axes0.set_title("ch=%d b: peakg:base r: peak-base"%ch)
self.axes0.set_xlabel("time within second (sec)")
self.axes1.clear()
baseMean = numpy.mean(base)
baseStd = numpy.std(base)
peakMean = numpy.mean(peaksCh)
peakStd = numpy.std(peaksCh)
psbMean = numpy.mean(peaksSubBase)
psbStd = numpy.std(peaksSubBase)
hTitle = "p:%.1f(%.1f) b:%.1f(%.1f) p-b:%.1f(%.1f)"%(peakMean,peakStd,baseMean,baseStd,psbMean,psbStd)
print "hTitle=",hTitle
r = (-150,10)
nBin=40
hgBase,bins = numpy.histogram(base,nBin,range=r, density=False)
hgPeak,bins = numpy.histogram(peaksCh,nBin,range=r, density=False)
hgPeakSubBase,bins = numpy.histogram(peaksSubBase,nBin,range=r, density=False)
width = 0.7*(bins[1]-bins[0])
center = (bins[:-1]+bins[1:])/2
self.axes1.bar(center, hgBase, width, alpha=0.5, linewidth=0, color='g')
self.axes1.bar(center, hgPeak, width, alpha=0.5, linewidth=0, color='b')
self.axes1.bar(center, hgPeakSubBase, width, alpha=0.5, linewidth=0, color='r')
self.axes1.set_xlabel("peak phase (degrees)")
self.axes1.set_title(hTitle)
self.canvas.draw()
def channelInc(self):
ch_we = int(self.textbox_channel.text())
ch_we = ch_we + 1
if ch_we >=self.numFreqs:
ch_we=0
self.textbox_channel.setText(str(ch_we))
def toggleDAC(self):
if self.dacStatus == 'off':
print "Starting LUT...",
self.roach.write_int('startDAC', 1)
time.sleep(1)
while self.roach.read_int('DRAM_LUT_rd_valid') != 0:
self.roach.write_int('startDAC', 0)
time.sleep(0.25)
self.roach.write_int('startDAC', 1)
time.sleep(1)
print ".",
print "done."
#self.button_startDAC.setText('Stop DAC')
self.dacStatus = 'on'
self.status_text.setText('DAC turned on. ')
else:
self.roach.write_int('startDAC', 0)
#self.button_startDAC.setText('Start DAC')
self.dacStatus = 'off'
self.status_text.setText('DAC turned off. ')
def loadIQcenters(self):
for ch in range(256):
I_c = int(self.iq_centers[ch].real/2**3)
Q_c = int(self.iq_centers[ch].imag/2**3)
center = (I_c<<16) + (Q_c<<0)
self.roach.write_int('conv_phase_centers', center)
self.roach.write_int('conv_phase_load_centers', (ch<<1)+(1<<0))
self.roach.write_int('conv_phase_load_centers', 0)
def select_bins(self, readout_freqs):
fft_len = 2**9
bins = ''
i = 0
residuals = []
for f in readout_freqs:
fft_bin = int(round(f*fft_len/self.sampleRate))
fft_freq = fft_bin*self.sampleRate/fft_len
freq_residual = round((f - fft_freq)/self.freqRes)*self.freqRes
residuals.append(freq_residual)
bins = bins + struct.pack('>l', fft_bin)
self.roach.write_int('bins', fft_bin)
self.roach.write_int('load_bins', (i<<1) + (1<<0))
self.roach.write_int('load_bins', (i<<1) + (0<<0))
i = i + 1
self.status_text.setText('done writing LUTs. ')
return residuals
def loadLUTs(self):
self.scale_factor = 1.
self.iq_centers = numpy.array([0.+0j]*256)
# Loads the DAC and DDS LUTs from file. As well as the IQ loop centers.
if self.dacStatus == 'off':
self.roach.write_int('startDAC', 0)
else:
self.toggleDAC()
saveDir = str(self.textbox_lutDir.text())
#saveDir = str(os.environ['PWD'] + '/'+ self.textbox_lutDir.text())
f = open(saveDir+'luts.dat', 'r')
binaryData = f.read()
self.roach.write('dram_memory', binaryData)
x = numpy.loadtxt(saveDir+'centers.dat')
print "channelizerCustom: saveDir=",saveDir," x=",x
N_freqs = len(x[:,0])
for n in range(N_freqs):
self.iq_centers[n] = complex(x[n,0], x[n,1])
# Select and write bins for first stage of channelizer.
freqs = map(float, unicode(self.textedit_DACfreqs.toPlainText()).split())
f_base = float(self.textbox_loFreq.text())
for n in range(len(freqs)):
if freqs[n] < f_base:
freqs[n] = freqs[n] + 512e6
freqs_dds = [0 for j in range(256)]
for n in range(len(freqs)):
freqs_dds[n] = round((freqs[n]-f_base)/self.freqRes)*self.freqRes
freq_residuals = self.select_bins(freqs_dds)
print 'LUTs and IQ centers loaded from file.'
self.loadIQcenters()
self.toggleDAC()
def importFreqs(self):
freqFile =str(self.textbox_freqFile.text())
self.loadCustomAtten()
try:
print "channelizerCustom.y: freqFile=",freqFile
x = numpy.loadtxt(freqFile)
x_string = ''
for i in range(len(self.customResonators)):
if self.customResonators[i][1]!=-1:
x[i+1,0]=self.customResonators[i][0]
x[i+1,3]=self.customResonators[i][1]
self.previous_scale_factor = x[0,0]
N_freqs = len(x[1:,0])
self.numFreqs=N_freqs
for l in x[1:,0]:
temp_x_string = str(l*1e9) + '\n'
x_string = x_string + str(l*1e9) + '\n'
self.iq_centers = numpy.array([0.+0j]*256)
print N_freqs,numpy.shape(self.iq_centers),numpy.shape(x)
for n in range(N_freqs):
#for n in range(256):
self.iq_centers[n] = complex(x[n+1,1], x[n+1,2])
self.attens = x[1:,3]
self.textedit_DACfreqs.setText(x_string)
self.findDeletedResonators()
print 'Freq/Atten loaded from',freqFile
self.status_text.setText('Freq/Atten loaded')
#self.loadCustomThresholds()
except IOError:
print 'No such file or directory:',freqFile
self.status_text.setText('IOError: trouble with %s'%freqFile)
def findDeletedResonators(self):
for i in range(len(self.customResonators)):
if self.customResonators[i][1] >=99:
self.zeroChannels[i] = 1
else:
self.zeroChannels[i] = 0 #needed so you can undelete resonators
def loadCustomAtten(self):
freqFile =str(self.textbox_freqFile.text())
newFreqFile = freqFile[:-4] + '_NEW.txt'
try:
y=numpy.loadtxt(newFreqFile)
self.customResonators=numpy.array([[0.0,-1]]*256)
if type(y[0]) == numpy.ndarray:
for arr in y:
self.customResonators[int(arr[0])]=arr[1:3]
else:
self.customResonators[int(y[0])]=y[1:3]
print 'Loaded custom resonator freq/atten from',newFreqFile
except IOError:
pass
def displayResonatorProperties(self):
ch=int(self.textbox_channel.text())
freqFile =str(self.textbox_freqFile.text())
x = numpy.loadtxt(freqFile)
#self.loadCustomAtten()
for i in range(len(self.customResonators)):
if self.customResonators[i][1]!=-1:
x[i+1,0]=self.customResonators[i][0]
x[i+1,3]=self.customResonators[i][1]
#print 'atten: '+str(x[ch,3])
self.label_attenuation.setText('attenuation: ' + str(int(x[ch+1,3])))
self.label_frequency.setText('freq (GHz): ' + str(float(x[ch+1,0])))
self.label_median.setText('median: '+str(self.medians[ch]))
if self.customThresholds[ch] != 360.0:
self.label_threshold.setText('threshold: '+str(self.customThresholds[ch]))
else:
self.label_threshold.setText('threshold: '+str(self.thresholds[ch]))
def importFIRcoeffs(self):
coeffsFile =str(self.textbox_coeffsFile.text())
self.fir = numpy.loadtxt(coeffsFile)
print self.fir
def file_dialog(self):
print 'add dialog box here'
#self.newdatadir = QFileDialog.getExistingDirectory(self, str("Choose SaveDirectory"), "",QFileDialog.ShowDirsOnly)
#if len(self.newdatadir) > 0:
# self.datadir = self.newdatadir
# print self.datadir
#self.ui.data_directory_lineEdit.setText(self.datadir) #put new path name in line edit
# self.button_saveDir.setText(str(self.datadir))
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
self.dpi = 100
self.fig = Figure((9.0, 5.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
self.axes0 = self.fig.add_subplot(121)
self.axes1 = self.fig.add_subplot(122)
#cid=self.canvas.mpl_connect('button_press_event', self.setCustomThreshold)
# Create the navigation toolbar, tied to the canvas
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# Roach board's IP address
self.textbox_roachIP = QLineEdit('10.0.0.1%d'%roachNum)
self.textbox_roachIP.setMaximumWidth(200)
label_roachIP = QLabel('Roach IP Address:')
# Start connection to roach.
self.button_openClient = QPushButton("(1)Open Client")
self.button_openClient.setMaximumWidth(100)
self.connect(self.button_openClient, SIGNAL('clicked()'), self.openClient)
# DAC Frequencies.
self.textedit_DACfreqs = QTextEdit()
self.textedit_DACfreqs.setMaximumWidth(170)
self.textedit_DACfreqs.setMaximumHeight(100)
label_DACfreqs = QLabel('DAC Freqs:')
# File with frequencies/attens
self.textbox_freqFile = QLineEdit(os.path.join(datadir,'ps_freq%d.txt'%roachNum))
self.textbox_freqFile.setMaximumWidth(200)
# Import freqs from file.
self.button_importFreqs = QPushButton("(2)Load freqs")
self.button_importFreqs.setMaximumWidth(200)
self.connect(self.button_importFreqs, SIGNAL('clicked()'), self.importFreqs)
# File with FIR coefficients
#self.textbox_coeffsFile = QLineEdit('/home/sean/data/common/fir/matched_30us.txt')
#self.textbox_coeffsFile = QLineEdit('/home/sean/data/common/fir/lpf_250kHz.txt')
firFileName = os.environ['MKID_CUSTOM_FIR']
firDir = os.environ['MKID_CUSTOM_FIR_DIR']
if '%d' in firFileName:#the fir file is specific to the roach, stick the number in
firFileName = firFileName%roachNum
self.textbox_coeffsFile = QLineEdit(os.path.join(firDir,firFileName))
#self.textbox_coeffsFile = QLineEdit('/home/sean/data/common/fir/matched20121128r%d.txt'%roachNum)
self.textbox_coeffsFile.setMaximumWidth(200)
# Import FIR coefficients from file.
self.button_importFIRcoeffs = QPushButton("Import FIR coeffs.")
self.button_importFIRcoeffs.setMaximumWidth(200)
self.connect(self.button_importFIRcoeffs, SIGNAL('clicked()'), self.importFIRcoeffs)
# Channel increment by 1.
self.button_channelInc = QPushButton("Channel++")
self.button_channelInc.setMaximumWidth(100)
self.connect(self.button_channelInc, SIGNAL('clicked()'), self.channelInc)
# Load FIR coefficients.
self.button_loadFIRcoeffs = QPushButton("(3)load FIR")
self.button_loadFIRcoeffs.setMaximumWidth(170)
self.connect(self.button_loadFIRcoeffs, SIGNAL('clicked()'), self.loadFIRcoeffs)
# Load thresholds.
self.button_loadThresholds = QPushButton("(4)load thresholds")
self.button_loadThresholds.setMaximumWidth(170)
self.connect(self.button_loadThresholds, SIGNAL('clicked()'), self.loadThresholds)
# remove custom threshold button
self.button_rmCustomThreshold = QPushButton("remove custom threshold")
self.button_rmCustomThreshold.setMaximumWidth(170)
self.connect(self.button_rmCustomThreshold, SIGNAL('clicked()'), self.rmCustomThreshold)
# Channel to measure
self.textbox_channel = QLineEdit('0')
self.textbox_channel.setMaximumWidth(50)
# threshold N*sigma
self.textbox_Nsigma = QLineEdit('2.5')
self.textbox_Nsigma.setMaximumWidth(50)
label_Nsigma = QLabel('sigma ')
# Time snapshot of a single channel
self.button_snapshot = QPushButton("snapshot")
self.button_snapshot.setMaximumWidth(170)
self.connect(self.button_snapshot, SIGNAL('clicked()'), self.snapshot)
# Long time snapshot of a single channel
self.button_longsnapshot = QPushButton("longsnapshot")
self.button_longsnapshot.setMaximumWidth(170)
self.connect(self.button_longsnapshot, SIGNAL('clicked()'), self.longsnapshot)
# toggle whether to write long snap to pickle file
self.button_saveSnapshot = QPushButton("Save longsnapshot")
#self.button_saveSnapshot.setMaximumWidth(400)
self.connect(self.button_saveSnapshot,SIGNAL('clicked()'),
self.toggleSaveSnapshot)
self.saveSnapshot = True
self.toggleSaveSnapshot()
# Read pulses
self.button_readPulses = QPushButton("Read pulses")
self.button_readPulses.setMaximumWidth(170)
self.connect(self.button_readPulses, SIGNAL('clicked()'), self.readPulses)
# Seconds for "read pulses."
self.textbox_seconds = QLineEdit('1')
self.textbox_seconds.setMaximumWidth(50)
# lengths of 2 ms for defining thresholds.
self.textbox_timeLengths = QLineEdit('10')
self.textbox_timeLengths.setMaximumWidth(50)
label_timeLengths = QLabel('* 2 msec ')
# lengths of 2 ms steps to combine in a snapshot.
self.textbox_snapSteps = QLineEdit('10')
self.textbox_snapSteps.setMaximumWidth(50)
label_snapSteps = QLabel('* 2 msec')
# lengths of 2 ms steps to combine in a snapshot.
self.textbox_longsnapSteps = QLineEdit('1')
self.textbox_longsnapSteps.setMaximumWidth(50)
label_longsnapSteps = QLabel('* sec')
#median
self.label_median = QLabel('median: 0.0000')
self.label_median.setMaximumWidth(170)
#threshold
self.label_threshold = QLabel('threshold: 0.0000')
self.label_threshold.setMaximumWidth(170)
#attenuation
self.label_attenuation = QLabel('attenuation: 0')
self.label_attenuation.setMaximumWidth(170)
#frequency
self.label_frequency = QLabel('freq (GHz): 0.0000')
self.label_frequency.setMaximumWidth(170)
# Add widgets to window.
gbox0 = QVBoxLayout()
hbox00 = QHBoxLayout()
hbox00.addWidget(self.textbox_roachIP)
hbox00.addWidget(self.button_openClient)
gbox0.addLayout(hbox00)
hbox01 = QHBoxLayout()
hbox01.addWidget(self.textbox_freqFile)
hbox01.addWidget(self.button_importFreqs)
gbox0.addLayout(hbox01)
hbox02 = QHBoxLayout()
hbox02.addWidget(self.textbox_coeffsFile)
hbox02.addWidget(self.button_importFIRcoeffs)
hbox02.addWidget(self.button_loadFIRcoeffs)
gbox0.addLayout(hbox02)
hbox03 = QHBoxLayout()
hbox03.addWidget(self.textbox_timeLengths)
hbox03.addWidget(label_timeLengths)
hbox03.addWidget(self.textbox_Nsigma)
hbox03.addWidget(label_Nsigma)
hbox03.addWidget(self.button_loadThresholds)
gbox0.addLayout(hbox03)
gbox1 = QVBoxLayout()
gbox1.addWidget(label_DACfreqs)
gbox1.addWidget(self.textedit_DACfreqs)
gbox2 = QVBoxLayout()
hbox20 = QHBoxLayout()
hbox20.addWidget(self.textbox_channel)
hbox20.addWidget(self.button_channelInc)
gbox2.addLayout(hbox20)
hbox21 = QHBoxLayout()
hbox21.addWidget(self.button_snapshot)
hbox21.addWidget(self.textbox_snapSteps)
hbox21.addWidget(label_snapSteps)
gbox2.addLayout(hbox21)
hbox22 = QHBoxLayout()
hbox22.addWidget(self.button_longsnapshot)
hbox22.addWidget(self.textbox_longsnapSteps)
hbox22.addWidget(label_longsnapSteps)
hbox22.addWidget(self.button_saveSnapshot)
gbox2.addLayout(hbox22)
gbox2.addWidget(self.button_rmCustomThreshold)
hbox23 = QHBoxLayout()
hbox23.addWidget(self.textbox_seconds)
hbox23.addWidget(self.button_readPulses)
gbox2.addLayout(hbox23)
gbox3 = QVBoxLayout()
gbox3.addWidget(self.label_median)
gbox3.addWidget(self.label_threshold)
gbox3.addWidget(self.label_attenuation)
gbox3.addWidget(self.label_frequency)
hbox = QHBoxLayout()
hbox.addLayout(gbox0)
hbox.addLayout(gbox1)
hbox.addLayout(gbox2)
hbox.addLayout(gbox3)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(hbox)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def toggleSaveSnapshot(self):
self.saveSnapshot = not self.saveSnapshot
if self.saveSnapshot:
self.button_saveSnapshot.setStyleSheet("background-color: #00ff00")
self.writeSnapshotData()
else:
self.button_saveSnapshot.setStyleSheet("background-color: #ff0000")
def writeSnapshotData(self):
try:
if self.longsnapshotInfo:
if self.saveSnapshot:
lsi = self.longsnapshotInfo
ymdhms = time.strftime("%Y%m%d-%H%M%S",time.gmtime(lsi['startTime']))
pfn = "longsnapshot-%s.pkl"%ymdhms
ffn = os.path.join(os.environ['MKID_DATA_DIR'],pfn)
pickle.dump(lsi,open(ffn,'wb'))
print "pickle file written",ffn,"with nPhases=",len(lsi['phase'])
except AttributeError:
pass
def create_status_bar(self):
self.status_text = QLabel("Awaiting orders.")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_file_action = self.create_action("&Save plot",shortcut="Ctrl+S", slot=self.save_plot, tip="Save the plot")
quit_action = self.create_action("&Quit", slot=self.close, shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu, (load_file_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About", shortcut='F1', slot=self.on_about, tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def save_plot(self):
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self, 'Save file', '',file_choices))
if path:
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Saved to %s' % path, 2000)
def on_about(self):
msg = """ Message to user goes here.
"""
QMessageBox.about(self, "MKID-ROACH software demo", msg.strip())
def main():
app = QApplication(sys.argv)
form = AppForm()
form.show()
app.exec_()
if __name__=='__main__':
if len(sys.argv)!= 2:
print 'Usage: ',sys.argv[0],' roachNum'
exit(1)
roachNum = int(sys.argv[1])
datadir = os.environ['MKID_FREQ_PATH']
main()
| gpl-2.0 |
vermouthmjl/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
hainm/statsmodels | statsmodels/sandbox/examples/example_gam.py | 33 | 2343 | '''original example for checking how far GAM works
Note: uncomment plt.show() to display graphs
'''
example = 2 # 1,2 or 3
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family
from statsmodels.genmod.generalized_linear_model import GLM
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 150
x1 = R.standard_normal(nobs)
x1.sort()
x2 = R.standard_normal(nobs)
x2.sort()
y = R.standard_normal((nobs,))
f1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2/4.))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) * 2 # 0.1
y += z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
y_pred = m.results.predict(d)
plt.figure()
plt.plot(y, '.')
plt.plot(z, 'b-', label='true')
plt.plot(y_pred, 'r-', label='AdditiveModel')
plt.legend()
plt.title('gam.AdditiveModel')
import scipy.stats, time
if example == 2:
print("binomial")
f = family.Binomial()
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
if example == 3:
print("Poisson")
f = family.Poisson()
y = y/y.max() * 3
yp = f.link.inverse(y)
p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/io/clipboard/clipboard.py | 14 | 3793 | """ io on the clipboard """
from pandas import compat, get_option, option_context, DataFrame
from pandas.compat import StringIO, PY2
def read_clipboard(sep='\s+', **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
Parameters
----------
sep : str, default '\s+'.
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
Returns
-------
parsed : DataFrame
"""
encoding = kwargs.pop('encoding', 'utf-8')
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise NotImplementedError(
'reading from clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count('\t') for x in lines])
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = '\t'
if sep is None and kwargs.get('delim_whitespace') is None:
sep = '\s+'
return read_table(StringIO(text), sep=sep, **kwargs)
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
encoding = kwargs.pop('encoding', 'utf-8')
# testing if an invalid encoding is passed to clipboard
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise ValueError('clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs)
text = buf.getvalue()
if PY2:
text = text.decode('utf-8')
clipboard_set(text)
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| agpl-3.0 |
zorojean/tushare | tushare/datayes/basics.py | 14 | 3722 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on 2015年7月4日
@author: JimmyLiu
@QQ:52799046
"""
from tushare.datayes import vars as vs
import pandas as pd
from pandas.compat import StringIO
class Basics():
def __init__(self , client):
self.client = client
def dy_master_secID(self, ticker='000001', partyID='',
cnSpell='', assetClass='', field=''):
"""
证券编码及基本上市信息
getSecID
输入一个或多个证券交易代码,获取证券ID,证券在数据结构中的一个唯一识别的编码;
同时可以获取输入证券的基本上市信息,如交易市场,上市状态,交易币种,ISIN编码等。
"""
code, result = self.client.getData(vs.SEC_ID%(ticker, partyID,
cnSpell, assetClass, field))
return _ret_data(code, result)
def dy_master_tradeCal(self, exchangeCD='XSHG,XSHE', beginDate='',
endDate='', field=''):
"""
交易所交易日历
getTradeCal
输入交易所,选取日期范围,可查询获取日历日期当天是否开市信息
"""
code, result = self.client.getData(vs.TRADE_DATE%(exchangeCD, beginDate,
endDate, field))
return _ret_data(code, result)
def dy_master_equInfo(self, ticker='wx', pagesize='10',
pagenum='1', field=''):
"""
沪深股票键盘精灵
getEquInfo
根据拼音或股票代码,匹配股票代码、名称。包含正在上市的全部沪深股票。
"""
code, result = self.client.getData(vs.EQU_INFO%(ticker, pagesize,
pagenum, field))
return _ret_data(code, result)
def dy_master_region(self, field=''):
"""
获取中国地域分类,以行政划分为标准。
getSecTypeRegion
"""
code, result = self.client.getData(vs.REGION%(field))
return _ret_data(code, result)
def dy_master_regionRel(self, ticker='', typeID='',
secID='', field=''):
"""
获取沪深股票地域分类,以注册地所在行政区域为标准。
getSecTypeRegionRel
"""
code, result = self.client.getData(vs.REGION_REL%(ticker, typeID,
secID, field))
return _ret_data(code, result)
def dy_master_secType(self, field=''):
"""
证券分类列表
一级分类包含有沪深股票、港股、基金、债券、期货、期权等,每个分类又细分有不同类型;
可一次获取全部分类。
getSecType
"""
code, result = self.client.getData(vs.SEC_TYPE%(field))
return _ret_data(code, result)
def dy_master_secTypeRel(self, ticker='', typeID='101001004001001',
secID='', field=''):
"""
录证券每个分类的成分,证券分类可通过在getSecType获取。
getSecTypeRel
"""
code, result = self.client.getData(vs.SEC_TYPE_REL%(ticker, typeID,
secID, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
xuewei4d/scikit-learn | examples/miscellaneous/plot_multilabel.py | 17 | 4133 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`~sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray', edgecolors=(0, 0, 0))
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
dcherian/tools | ROMS/pmacc/tools/post_tools/rompy/tags/rompy-0.1/rompy/plot_utils.py | 1 | 21117 | import datetime as dt
import time
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.axes import Axes
from matplotlib.colors import Normalize, ListedColormap, LinearSegmentedColormap, hsv_to_rgb
from matplotlib.cm import ScalarMappable
from matplotlib import ticker
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import utils
__version__ = '0.1'
def time_series_formatter(x,pos=None):
return dt.datetime.fromtimestamp(x).strftime('%Y-%m-%d %H:%MZ')
def map_varname(v):
mapping = {
'temp':'Temperature',
'salt':'Salinity',
'U':'Velocity',
}
return mapping[v]
def red_blue_cm():
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5,1.0,1.0),
(1.0, 0.83, 0.83)],
'green': [(0.0, 0.34, 0.34),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.75, 0.75),
(0.5, 1.0, 1.0),
(1.0, 0.0, 0.0)]
}
return LinearSegmentedColormap('red_blue_cm',cdict,N=256)
# return ListedColormap(['b','w','r'],name='red_blue',N=None)
def banas_cm(a,b,c,d):
norm = Normalize(vmin=a,vmax=d,clip=False)
cdict = {'red':[],'green':[],'blue':[]}
if not a==b:
# add dark blue
cdict['red'].append((0., 0., 0.))
cdict['green'].append((0., 0., 0.))
cdict['blue'].append((0., 0., 0.25))
# add blue
cdict['red'].append((norm(b), 0., 0.))
cdict['green'].append((norm(b), 0., 0.))
cdict['blue'].append((norm(b), 1.0, 1.0))
else:
cdict['red'].append((0., 0., 0.))
cdict['green'].append((0., 0., 0.))
cdict['blue'].append((0., 0., 1.0))
# add green between blue and yellow
cdict['red'].append((norm(b + (c-b)/4.0), 0., 0.))
cdict['green'].append((norm(b + (c-b)/4.0), 1.0, 1.0))
cdict['blue'].append((norm(b + (c-b)/4.0), 0., 0.))
# add yellow in the middle
cdict['red'].append((norm((b+c)/2.0), 1.0, 1.0))
cdict['green'].append((norm((b+c)/2.0), 1.0, 1.0))
cdict['blue'].append((norm((b+c)/2.0), 0., 0.))
if not c==d:
# add red
cdict['red'].append((norm(c), 1.0, 1.0))
cdict['green'].append((norm(c), 0., 0.))
cdict['blue'].append((norm(c), 0., 0.))
# add dark red
cdict['red'].append((1.0, 0.25, 0.25))
cdict['green'].append((1.0, 0., 0.))
cdict['blue'].append((1.0, 0., 0.))
else:
cdict['red'].append((1.0, 1.0, 1.))
cdict['green'].append((1.0, 0., 0.))
cdict['blue'].append((1.0, 0., 0.))
return LinearSegmentedColormap('banas_cm',cdict,N=100)
def banas_hsv_cm(a,b,c,d,N=100):
norm = Normalize(vmin=a,vmax=d,clip=False)
cdict = {'red':[],'green':[],'blue':[]}
if N >= 100:
n = N
else:
n = 100
aa = norm(a) # 0.0
bb = norm(b)
cc = norm(c)
yy = 0.5*(bb+cc) # yellow is half way between blue and red
dd = norm(d) # 1.0
center_value = 0.87
end_value = 0.65
tail_end_value = 0.3
blue_hue = 0.55
yellow_hue = 1./6.
red_hue = 0.04
green_hue = 1./3.
gg = ((green_hue - blue_hue)/(yellow_hue - blue_hue))*(yy-bb) + bb
green_desaturation_width = 0.67
green_desaturation_amount = 0.5
ii = np.linspace(0.,1.,n)
hue = np.zeros(ii.shape)
sat = np.ones(ii.shape)
val = np.zeros(ii.shape)
hsv = np.zeros((1,n,3))
val_scaler = -(center_value - end_value)/((cc-yy)*(cc-yy))
hue_scaler = -(blue_hue - yellow_hue)/((yy-bb)*(yy-bb))
for i in range(len(ii)):
if ii[i] < bb: # if true then aa is less than bb
#hue[i] = blue_hue
hsv[0,i,0] = blue_hue
#val[i] = tail_end_value*(1 - (ii[i]-aa)/(bb-aa) ) + end_value*( (ii[i]-aa)/(bb-aa) )
hsv[0,i,2] = tail_end_value*(1 - (ii[i]-aa)/(bb-aa) ) + end_value*( (ii[i]-aa)/(bb-aa) )
elif ii[i] <= yy:
#hsv[0,i,0] = blue_hue*(1 - (ii[i]-bb)/(yy-bb) ) + yellow_hue*( (ii[i]-bb)/(yy-bb) )
hsv[0,i,0] = hue_scaler*(ii[i] -2*bb + yy)*(ii[i] - yy)+yellow_hue
hsv[0,i,2] = end_value*(1 - (ii[i]-bb)/(yy-bb) ) + center_value*( (ii[i]-bb)/(yy-bb) )
elif ii[i] <= cc:
hsv[0,i,0] = yellow_hue*(1 - (ii[i]-yy)/(cc-yy) ) + red_hue*( (ii[i]-yy)/(cc-yy) )
#hsv[0,i,2] = center_value*(1 - (ii[i]-yy)/(cc-yy) ) + end_value*( (ii[i]-yy)/(cc-yy) )
hsv[0,i,2] = val_scaler*(ii[i] -2*yy + cc)*(ii[i] - cc)+end_value
elif ii[i] <= dd:
hsv[0,i,0] = red_hue
hsv[0,i,2] = end_value*(1 - (ii[i]-cc)/(dd-cc) ) + tail_end_value*( (ii[i]-cc)/(dd-cc) )
hsv[0,i,1] = 1.0 - green_desaturation_amount * np.exp(-np.power(3.0*(ii[i]-gg)/((cc-bb)*green_desaturation_width),2.0))
# plt.plot(np.linspace(a,d,n),hsv[0,:,0],'r',np.linspace(a,d,n),hsv[0,:,1],'g',np.linspace(a,d,n),hsv[0,:,2],'b')
# plt.show()
rgb = hsv_to_rgb(hsv)
cdict['red'].append((0.,0.,rgb[0,0,0]))
cdict['green'].append((0.,0.,rgb[0,0,1]))
cdict['blue'].append((0.,0.,rgb[0,0,2]))
for j in range(len(ii)-2):
i = j+1
cdict['red'].append((ii[i],rgb[0,i,0],rgb[0,i+1,0]))
cdict['green'].append((ii[i],rgb[0,i,1],rgb[0,i+1,1]))
cdict['blue'].append((ii[i],rgb[0,i,2],rgb[0,i+1,2]))
cdict['red'].append((1.0,rgb[0,-1,0],rgb[0,-1,0]))
cdict['green'].append((1.0,rgb[0,-1,1],rgb[0,-1,1]))
cdict['blue'].append((1.0,rgb[0,-1,2],rgb[0,-1,2]))
return LinearSegmentedColormap('banas_cm',cdict,N=N)
def make_cmap_sm_norm(d=None,clim=None,cmap=None):
if cmap == 'red_blue':
cmap = red_blue_cm()
if cmap == 'banas_cm':
if clim==None:
cmap = banas_cm(np.min(d[:]),np.min(d[:]),np.max(d[:]),np.max(d[:]))
elif len(clim) == 2:
cmap = banas_cm(clim[0],clim[0],clim[1],clim[1])
elif len(clim) == 4:
cmap = banas_cm(clim[0],clim[1],clim[2],clim[3])
elif cmap == 'banas_hsv_cm':
if clim==None:
cmap = banas_hsv_cm(np.min(d[:]),np.min(d[:]),np.max(d[:]),np.max(d[:]))
elif len(clim) == 2:
cmap = banas_hsv_cm(clim[0],clim[0],clim[1],clim[1])
elif len(clim) == 4:
cmap = banas_hsv_cm(clim[0],clim[1],clim[2],clim[3])
norm = Normalize(vmin=clim[0],vmax=clim[-1],clip=False)
sm = ScalarMappable(norm=norm,cmap=cmap)
sm.set_clim(vmin=clim[0],vmax=clim[-1])
sm.set_array(np.array([0]))
return cmap,sm,norm
def plot_surface(x,y,data,filename='/Users/lederer/tmp/rompy.tmp.png'):
print('Making plot')
fig = Figure(facecolor='white',figsize=(12.0,12.0))
ax = fig.add_subplot(111)
ax.pcolormesh(x,y,data)
# ax.contour(x,y,data,20)
ax.axis('tight')
ax.set_aspect('equal')
ax.grid()
FigureCanvas(fig).print_png(filename)
def plot_map(lon,lat,data,filename='/Users/lederer/tmp/rompy.map.png',resolution='h',clim=None,cmap='banas_hsv_cm',title=None, caxis_label=None):
fig = Figure(facecolor='white',figsize=(12.0,9.0))
# ax = fig.add_subplot(111)
longest_side_size = 24.0
#ax = fig.add_axes((0.,0.,1.,1.),axisbg='grey')
cmap,sm,norm = make_cmap_sm_norm(d=data,clim=clim,cmap=cmap)
ax1 = fig.add_axes((0.1,0.1,0.4,0.8),axisbg='grey')
ax2 = fig.add_axes((0.5,0.1,0.4,0.8),axisbg='grey')
cax = fig.add_axes([0.9, 0.1, 0.02, 0.8],frameon=False)
lllat = np.min(lat)
urlat = np.max(lat)
lllon = np.min(lon)
urlon = np.max(lon)
# puget sound bounding box
psbb_lllat = 47.0
psbb_urlat = 48.5
psbb_lllon = -123.2
psbb_urlon = -122.1
# print(lllat,urlat,lllon,urlon)
m1 = Basemap(projection='merc',llcrnrlat=lllat,urcrnrlat=urlat,llcrnrlon=lllon,urcrnrlon=urlon,resolution=resolution,ax=ax1)
m2 = Basemap(projection='merc',llcrnrlat=psbb_lllat,urcrnrlat=psbb_urlat,llcrnrlon=psbb_lllon,urcrnrlon=psbb_urlon,resolution='f',ax=ax2)
x1,y1 = m1(*(lon,lat))
x2,y2 = m2(*(lon,lat))
# Code to make the map fit snuggly with the png
#print(np.max(x), np.min(x), np.max(y),np.min(y))
# width = np.max(x) - np.min(x)
# height = np.max(y) - np.min(y)
# if width >= height:
# fig.set_size_inches(longest_side_size, (height/width)*longest_side_size)
# else:
# fig.set_size_inches((width/height)*longest_side_size, longest_side_size)
# ax.set_position([0.,0.,1.,1.])
# bbox = ax.get_position()
# print(bbox.xmin, bbox.xmax, bbox.ymin, bbox.ymax)
#
# fig.set_size_inches((bbox.xmax - bbox.xmin)*longest_side_size, (bbox.ymax - bbox.ymin)*longest_side_size)
# ax.set_position([0.,0.,1.,1.])
# bbox = ax.get_position()
# print(bbox.xmin, bbox.xmax, bbox.ymin, bbox.ymax)
#
#
# if clim==None:
# cmap = banas_hsv_cm(np.min(data[:]),np.min(data[:]),np.max(data[:]),np.max(data[:]))
# norm = Normalize(vmin=np.min(data[:]),vmax=np.max(data[:]),clip=False)
# elif len(clim) == 2:
# cmap = banas_hsv_cm(clim[0],clim[0],clim[1],clim[1],N=20)
# norm = Normalize(vmin=clim[0],vmax=clim[-1],clip=False)
# elif len(clim) == 4:
# cmap = banas_hsv_cm(clim[0],clim[1],clim[2],clim[3])
# norm = Normalize(vmin=clim[0],vmax=clim[-1],clip=False)
pcm1 = m1.pcolormesh(x1,y1,data,cmap=cmap,norm=norm)
m1.drawcoastlines(linewidth=0.5)
pcm2 = m2.pcolormesh(x2,y2,data,cmap=cmap,norm=norm)
m2.drawcoastlines(linewidth=0.5)
my_colorbar = fig.colorbar(sm,cax=cax)
if not caxis_label == None:
my_colorbar.set_label(caxis_label)
if not title == None:
ax1.set_title(title)
FigureCanvas(fig).print_png(filename)
def plot_profile(data,depth,filename='/Users/lederer/tmp/rompy.profile.png'):
fig = Figure()
ax = fig.add_subplot(111)
ax.plot(data,depth)
ax.grid()
FigureCanvas(fig).print_png(filename)
def plot_mickett(coords,data,varname='',region='',filename='/Users/lederer/tmp/rompy.mickett.png',n=1,x_axis_style='kilometers',x_axis_offset=0,clim=None,cmap=None,labeled_contour_gap=None):
fig = Figure(facecolor='white')
fontsize = 8
cmap,sm,norm = make_cmap_sm_norm(d=data,clim=clim,cmap=cmap)
ax1 = fig.add_axes([0.1, 0.55, 0.75, 0.4])
ax2 = fig.add_axes([0.1, 0.1, 0.75, 0.4])
cax = fig.add_axes([0.9, 0.1, 0.02, 0.8],frameon=False)
x_axis_as_km = utils.coords_to_km(coords)
station_locations = x_axis_as_km[0:-1:n]
#
# if not clim == None:
# norm = Normalize(vmin=clim[0],vmax=clim[-1],clip=False)
# sm = ScalarMappable(norm=norm,cmap=cmap)
# sm.set_clim(vmin=clim[0],vmax=clim[-1])
# sm.set_array(np.array([0]))
# else:
# norm = None
#
my_plot11 = ax1.contourf(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,100,norm=norm,cmap=cmap)
my_plot12 = ax1.contour(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,100,linewidths=1,linestyle=None,norm=norm,cmap=cmap)
if labeled_contour_gap is not None:
if int(labeled_contour_gap) == labeled_contour_gap:
contour_label_fmt = '%d'
else:
contour_label_fmt = '%1.2f'
solid_contours = np.arange(clim[0],clim[-1],labeled_contour_gap)
# ax1_xlim = ax1.get_xlim()
my_plot13 = ax1.contour(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,solid_contours,colors='k',linewidths=0.5)
ax1.clabel(my_plot13,inline=True,fmt=contour_label_fmt,fontsize=fontsize)
# ax1.set_xlim(ax1_xlim)
my_plot14 = ax1.plot(station_locations, 1.5*np.ones(len(station_locations)),'v',color='grey')
ax1.fill_between(x_axis_as_km,coords['zm'][0,:],ax1.get_ylim()[0],color='grey')
# ax1.set_ylim((-20,ax1.get_ylim()[1]))
ax1.set_ylim((-20,2))
ax1.set_xlim((0,x_axis_as_km[-1]))
for yticklabel in ax1.get_yticklabels():
yticklabel.set_fontsize(fontsize)
my_plot21 = ax2.contourf(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,100,norm=norm,cmap=cmap)
my_plot22 = ax2.contour(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,100,linewidths=1,linestyle=None,norm=norm,cmap=cmap)
if labeled_contour_gap is not None:
# ax2_xlim = ax2.get_xlim()
my_plot23 = ax2.contour(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,solid_contours,colors='k',linewidths=0.5)
ax2.clabel(my_plot23,inline=True,fmt=contour_label_fmt,fontsize=fontsize)
# ax2.set_xlim = ax2_xlim
# print(ax2.get_ylim())
# ax2.fill_between(x_axis_as_km,coords['zm'][0,:],ax2.get_ylim()[0],color='grey')
ax2.fill_between(x_axis_as_km,coords['zm'][0,:],-1000.0,color='grey')
# ax2.set_ylim(ax2.get_ylim()[0],2)
# print(ax2.get_ylim())
ax2.set_ylim((np.min(coords['zm'][:])-20.0),2)
# print(ax2.get_ylim())
ax2.set_xlim((0,x_axis_as_km[-1]))
for yticklabel in ax2.get_yticklabels():
yticklabel.set_fontsize(fontsize)
# if clim == None:
# sm = my_plot11
my_colorbar = fig.colorbar(sm,cax=cax)
if labeled_contour_gap is not None:
my_colorbar.add_lines(my_plot23)
ax1.set_title('%s %s from a ROMS run' % (region,varname))
ax1.set_ylabel('depth in meters',position=(0.05,0))
# ax1.set_xticks(10*np.arange(x_axis_as_km[-1]/10))
ax1.set_xticks(station_locations)
ax1.set_xticklabels('')
if x_axis_style == 'kilometers' or x_axis_style == 'kilometer':
#tick_list = x_axis_as_km[::n]
#ax2.set_xticks(tick_list)
#ax2.set_xticklabels([int(tick) for tick in tick_list],size=fontsize)
td = 10 #tick_distance
ax2.set_xticks(td*np.arange(x_axis_as_km[-1]/td) + (x_axis_offset % td))
ax2.set_xticklabels([int(num) for num in np.arange(-int(x_axis_offset - x_axis_offset % td),x_axis_as_km[-1],td)])
for xticklabel in ax2.get_xticklabels():
xticklabel.set_fontsize(fontsize)
ax2.set_xlabel('Kilometers')
elif x_axis_style == 'stations' or x_axis_style == 'station':
if region == 'Hood Canal':
tick_list = x_axis_as_km[::n]
ax2.set_xticks(tick_list)
ax2.set_xticklabels(utils.hood_canal_station_list(),size=fontsize)
ax2.set_xlabel('Station ID')
elif region == 'Main Basin':
tick_list = x_axis_as_km[::n]
ax2.set_xticks(tick_list)
ax2.set_xticklabels(utils.main_basin_station_list(),size=fontsize)
ax2.set_xlabel('Station ID')
else:
ax2.set_xticks(x_axis_as_km)
ax2.set_xticklabels('')
ax2.set_xlabel('Kilometers')
FigureCanvas(fig).print_png(filename)
def plot_time_series_profile(t,z,d,filename='/Users/lederer/tmp/rompy.time_series_profile.png',clim=None,cmap='banas_hsv_cm',varname=None, title=None, caxis_label=None):
fontsize = 8
cmap,sm,norm = make_cmap_sm_norm(d=d,clim=clim,cmap=cmap)
fig = Figure(facecolor='white')
ax1 = fig.add_axes([0.1, 0.55, 0.75, 0.32])
ax2 = fig.add_axes([0.1, 0.18, 0.75, 0.32])
cax = fig.add_axes([0.9, 0.18, 0.02, 0.69],frameon=False)
my_plot11 = ax1.contourf(t,z,d,100,norm=norm,cmap=cmap)
my_plot12 = ax1.contour(t,z,d,100,linewidths=1,linestyle=None,norm=norm,cmap=cmap)
my_plot21 = ax2.contourf(t,z,d,100,norm=norm,cmap=cmap)
my_plot22 = ax2.contour(t,z,d,100,linewidths=1,linestyle=None,norm=norm,cmap=cmap)
my_colorbar = fig.colorbar(sm,cax=cax)
if not caxis_label == None:
my_colorbar.set_label(caxis_label)
ax1.set_ylim(-20,2)
ax1.set_xlim(t[0][0],t[-1][-1])
# lets pick some x ticks that aren't stupid
xmin_dt = dt.datetime.fromtimestamp(t[0][0])
xmax_dt = dt.datetime.fromtimestamp(t[-1][-1])
time_window = xmax_dt -xmin_dt
if (time_window) < dt.timedelta(hours=48):
date_list = []
next_time = xmax_dt- dt.timedelta(seconds = xmax_dt.minute*60 + xmax_dt.second)
while next_time >= xmin_dt:
date_list.append(next_time)
next_time = next_time - dt.timedelta(hours=6)
elif (time_window) < dt.timedelta(days=8):
date_list = []
next_time = xmax_dt - dt.timedelta(seconds = (xmax_dt.hour*60 + xmax_dt.minute)*60 + xmax_dt.second)
while next_time >= xmin_dt:
date_list.append(next_time)
next_time = next_time - dt.timedelta(days=1)
elif (time_window) < dt.timedelta(days=50):
date_list = []
next_time = xmax_dt - dt.timedelta(seconds = (xmax_dt.hour*60 + xmax_dt.minute)*60 + xmax_dt.second)
while next_time >= xmin_dt:
date_list.append(next_time)
next_time = next_time - dt.timedelta(days=7)
else :
date_list = [xmin_dt, xmax_dt]
x_tick_list = []
for date in date_list:
x_tick_list.append(time.mktime(date.timetuple()))
ax2.xaxis.set_major_locator(ticker.FixedLocator(x_tick_list))
for yticklabel in ax1.get_yticklabels():
yticklabel.set_fontsize(fontsize)
ax1.set_xticklabels('')
ax2.set_xlim(t[0][0],t[-1][-1])
ax2.set_ylim(np.min(z[0,:]),np.max(z[-1,:]))
for yticklabel in ax2.get_yticklabels():
yticklabel.set_fontsize(fontsize)
locs = ax2.get_xticks()
new_labels = []
ax2.xaxis.set_major_formatter(ticker.FuncFormatter(time_series_formatter))
for label in ax2.get_xticklabels():
label.set_ha('right')
label.set_rotation(30)
if title == None or title == '':
ax1.set_title('%s Over Time at a Point'% map_varname(varname))
else:
ax1.set_title(title)
FigureCanvas(fig).print_png(filename)
def plot_parker(coords,data,varname='',title=None,region='',filename='/Users/lederer/tmp/rompy.mickett.png',n=1,x_axis_style='kilometers',resolution='i',x_axis_offset=0,clim=None,cmap=None,labeled_contour_gap=None, caxis_label=None):
fig = Figure(facecolor='white',figsize=(12.0,9.0))
fontsize = 8
cmap,sm,norm = make_cmap_sm_norm(d=data,clim=clim,cmap=cmap)
ax1 = fig.add_axes([0.1, 0.55, 0.65, 0.4]) # top 20 meters
ax2 = fig.add_axes([0.1, 0.1, 0.65, 0.4]) # full column
ax3 = fig.add_axes([0.7, 0.55, 0.3, 0.4],axis_bgcolor='white')#'#298FAF') # map of domain containing curtain
cax = fig.add_axes([0.84, 0.1, 0.02, 0.4],frameon=False) # subplot for the color axis
x_axis_as_km = utils.coords_to_km(coords)
station_locations = x_axis_as_km[0:-1:n]
my_plot11 = ax1.contourf(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,100,norm=norm,cmap=cmap)
my_plot12 = ax1.contour(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,100,linewidths=1,linestyle=None,norm=norm,cmap=cmap)
if labeled_contour_gap is not None:
if int(labeled_contour_gap) == labeled_contour_gap:
contour_label_fmt = '%d'
else:
contour_label_fmt = '%1.2f'
solid_contours = np.arange(clim[0],clim[-1],labeled_contour_gap)
my_plot13 = ax1.contour(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,solid_contours,colors='k',linewidths=0.5)
ax1.clabel(my_plot13,inline=True,fmt=contour_label_fmt,fontsize=fontsize)
my_plot14 = ax1.plot(station_locations, 1.5*np.ones(len(station_locations)),'v',color='grey')
ax1.fill_between(x_axis_as_km,coords['zm'][0,:],ax1.get_ylim()[0],color='grey')
ax1.set_ylim((-20,2))
ax1.set_xlim((0,x_axis_as_km[-1]))
for yticklabel in ax1.get_yticklabels():
yticklabel.set_fontsize(fontsize)
my_plot21 = ax2.contourf(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,100,norm=norm,cmap=cmap)
my_plot22 = ax2.contour(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,100,linewidths=1,linestyle=None,norm=norm,cmap=cmap)
if labeled_contour_gap is not None:
my_plot23 = ax2.contour(np.tile(x_axis_as_km,(coords['zm'].shape[0],1)),coords['zm'],data,solid_contours,colors='k',linewidths=0.5)
ax2.clabel(my_plot23,inline=True,fmt=contour_label_fmt,fontsize=fontsize)
ax2.fill_between(x_axis_as_km,coords['zm'][0,:],-1000.0,color='grey')
ax2.set_ylim((np.min(coords['zm'][:])-20.0),2)
ax2.set_xlim((0,x_axis_as_km[-1]))
for yticklabel in ax2.get_yticklabels():
yticklabel.set_fontsize(fontsize)
my_colorbar = fig.colorbar(sm,cax=cax)
if not caxis_label == None:
my_colorbar.set_label(caxis_label)
if labeled_contour_gap is not None:
my_colorbar.add_lines(my_plot23)
if title==None:
ax1.set_title('%s %s from a ROMS run' % (region,varname))
else:
ax1.set_title(title)
ax1.set_ylabel('depth in meters',position=(0.05,0))
ax1.set_xticks(station_locations)
ax1.set_xticklabels('')
if x_axis_style == 'kilometers' or x_axis_style == 'kilometer':
td = 10 #tick_distance
left_most_tick_label = -x_axis_offset + (x_axis_offset % td)
left_most_tick = left_most_tick_label + x_axis_offset
ax2.set_xticks(np.arange(left_most_tick,x_axis_as_km[-1],td))
ax2.set_xticklabels([int(num) for num in np.arange(left_most_tick_label, x_axis_as_km[-1],td)])
# ax2.set_xticks(td*np.arange(x_axis_as_km[-1]/td) + (x_axis_offset % td))
# ax2.set_xticklabels([int(num) for num in np.arange(-int(x_axis_offset - x_axis_offset % td),x_axis_as_km[-1],td)])
for xticklabel in ax2.get_xticklabels():
xticklabel.set_fontsize(fontsize)
ax2.set_xlabel('Kilometers')
elif x_axis_style == 'stations' or x_axis_style == 'station':
if region == 'Hood Canal':
tick_list = x_axis_as_km[::n]
ax2.set_xticks(tick_list)
ax2.set_xticklabels(utils.hood_canal_station_list(),size=fontsize)
ax2.set_xlabel('Station ID')
elif region == 'Main Basin':
tick_list = x_axis_as_km[::n]
ax2.set_xticks(tick_list)
ax2.set_xticklabels(utils.main_basin_station_list(),size=fontsize)
ax2.set_xlabel('Station ID')
else:
ax2.set_xticks(x_axis_as_km)
ax2.set_xticklabels('')
ax2.set_xlabel('Kilometers')
# make map in the top right corner
# these lat lon values are derived from the curtain defined for the plot
# lllat = np.min(coords['ym'])
# urlat = np.max(coords['ym'])
# lllon = np.min(coords['xm'])
# urlon = np.max(coords['xm'])
# lat lon values for the inset map show a close-up of the Puget Sound
lllat = 47.0
urlat = 48.5
lllon = -123.3
urlon = -122.2
m = Basemap(projection='merc',llcrnrlat=lllat,urcrnrlat=urlat,llcrnrlon=lllon,urcrnrlon=urlon,resolution=resolution,ax=ax3)
x,y = m(*(coords['xm'],coords['ym']))
# pcm = m.plot(x,y,'r')
m.drawcoastlines(linewidth=0.5)
m.fillcontinents(color='#ECECEC')
pcm1 = m.plot(x,y,'r',linewidth=0.5)
pcm2 = m.plot(x[0:-1:n],y[0:-1:n],'.k')
FigureCanvas(fig).print_png(filename)
| mit |
ohspite/spectral | spectral/graphics/spypylab.py | 1 | 48675 | ########################################################################
#
# spypylab.py - This file is part of the Spectral Python (SPy) package.
#
# Copyright (C) 2001-2013 Thomas Boggs
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# Thomas Boggs, [email protected]
#
'''
A module to use matplotlib for creating raster and spectral views.
'''
from __future__ import division, print_function, unicode_literals
__all__ = ['ImageView', 'imshow']
import numpy as np
import warnings
_mpl_callbacks_checked = False
def check_disable_mpl_callbacks():
'''Disables matplotlib key event handlers, if appropriate.'''
import matplotlib as mpl
from spectral import settings
global _mpl_callbacks_checked
if _mpl_callbacks_checked is True or \
settings.imshow_disable_mpl_callbacks is False:
return
_mpl_callbacks_checked = True
mpl.rcParams['keymap.back'] = ''
mpl.rcParams['keymap.xscale'] = ''
mpl.rcParams['keymap.yscale'] = ''
mpl.rcParams['keymap.home'] = 'r'
mpl.rcParams['keymap.all_axes'] = ''
def xy_to_rowcol(x, y):
'''Converts image (x, y) coordinate to pixel (row, col).'''
return (int(y + 0.5), int(x + 0.5))
def rowcol_to_xy(r, c):
'''Converts pixel (row, col) coordinate to (x, y) of pixel center.'''
return (float(c), float(r))
class MplCallback(object):
'''Base class for callbacks using matplotlib's CallbackRegistry.
Behavior of MplCallback objects can be customized by providing a callable
object to the constructor (or `connect` method) or by defining a
`handle_event` method in a subclass.
'''
# If the following class attribute is False, callbacks will silently
# disconnect when an exception is encountered during event processing
# (e.g., if an associated window has been closed) . If it is True, the
# associated exception will be rethrown.
raise_event_exceptions = False
show_events = False
def __init__(self, registry=None, event=None, callback=None):
'''
Arguments:
registry (ImageView, CallbackRegistry, or FigureCanvas):
The object that will generate the callback. If the argument is
an ImageView, the callback will be bound to the associated
FigureCanvas.
event (str):
The event type for which callbacks should be generated.
callback (callable):
An optional callable object to handle the event. If not
provided, the `handle_event` method of the MplCallback will
be called to handle the event (this method must be defined by
a derived class if `callback` is not provided.
Note that these arguments can be deferred until `MplCallback.connect`
is called.
'''
self.set_registry(registry)
self.event = event
self.callback = callback
self.cid = None
self.is_connected = False
self.children = []
def set_registry(self, registry=None):
'''
Arguments:
registry (ImageView, CallbackRegistry, or FigureCanvas):
The object that will generate the callback. If the argument is
an ImageView, the callback will be bound to the associated
FigureCanvas.
'''
from matplotlib.cbook import CallbackRegistry
if isinstance(registry, CallbackRegistry):
self.registry = registry
elif isinstance(registry, ImageView):
self.registry = registry.axes.figure.canvas
else:
self.registry = registry
def connect(self, registry=None, event=None, callback=None):
'''Binds the callback to the registry and begins receiving event.
Arguments:
registry (ImageView, CallbackRegistry, or FigureCanvas):
The object that will generate the callback. If the argument is
an ImageView, the callback will be bound to the associated
FigureCanvas.
event (str):
The event type for which callbacks should be generated.
callback (callable):
An optional callable object to handle the event. If not
provided, the `handle_event` method of the MplCallback will
be called to handle the event (this method must be defined by
a derived class if `callback` is not provided.
Note that these arguments can also be provided to the constructor.
'''
from matplotlib.cbook import CallbackRegistry
if self.is_connected:
raise Exception('Callback is already connected.')
if registry is not None:
self.set_registry(registry)
if event is not None:
self.event = event
if callback is not None:
self.callback = callback
if self.callback is None:
cb = self
else:
cb = self.callback
if isinstance(self.registry, CallbackRegistry):
self.cid = self.registry.connect(self.event, self)
elif isinstance(self.registry, ImageView):
self.cid = self.registry.connect(self.event, self)
else:
# Assume registry is an MPL canvas
self.cid = self.registry.mpl_connect(self.event, self)
self.is_connected = True
for c in self.children:
c.connect()
def disconnect(self):
'''Stops the callback from receiving events.'''
from matplotlib.cbook import CallbackRegistry
if isinstance(self.registry, CallbackRegistry):
self.registry.disconnect(self.cid)
else:
# Assume registry is an MPL canvas
self.registry.mpl_disconnect(self.cid)
self.is_connected = False
self.cid = None
for c in self.children:
c.disconnect()
def __call__(self, *args, **kwargs):
if self.callback is not None:
try:
self.callback(*args, **kwargs)
except Exception as e:
self.disconnect()
if self.raise_event_exceptions:
raise e
else:
try:
self.handle_event(*args, **kwargs)
except Exception as e:
self.disconnect()
if self.raise_event_exceptions:
raise e
class ImageViewCallback(MplCallback):
'''Base class for callbacks that operate on ImageView objects.'''
def __init__(self, view, *args, **kwargs):
super(ImageViewCallback, self).__init__(*args, **kwargs)
self.view = view
class ParentViewPanCallback(ImageViewCallback):
'''A callback to pan an image based on a click in another image.'''
def __init__(self, child, parent, *args, **kwargs):
'''
Arguments:
`child` (ImageView):
The view that will be panned based on a parent click event.
`parent` (ImageView):
The view whose click location will cause the child to pan.
See ImageViewCallback and MplCallback for additional arguments.
'''
super(ParentViewPanCallback, self).__init__(parent, *args, **kwargs)
self.child = child
def handle_event(self, event):
if self.show_events:
print(event, 'key = %s' % event.key)
if event.inaxes is not self.view.axes:
return
(r, c) = xy_to_rowcol(event.xdata, event.ydata)
(nrows, ncols) = self.view._image_shape
if r < 0 or r >= nrows or c < 0 or c >= ncols:
return
kp = KeyParser(event.key)
if event.button == 1 and kp.mods_are('ctrl'):
self.child.pan_to(event.ydata, event.xdata)
def connect(self):
super(ParentViewPanCallback, self).connect(registry=self.view,
event='button_press_event')
class ImageViewKeyboardHandler(ImageViewCallback):
'''Default handler for keyboard events in an ImageView.'''
def __init__(self, view, *args, **kwargs):
super(ImageViewKeyboardHandler, self).__init__(view,
registry=view,
event='key_press_event',
*args, **kwargs)
self.cb_key_release = ImageViewCallback(view, registry=view,
event='key_release_event',
callback=self.on_key_release,
*args, **kwargs)
# Must add to children member to automatically connect/disconnect.
self.children.append(self.cb_key_release)
self.idstr = ''
def on_key_release(self, event):
if self.show_events:
print('key = %s' % event.key)
kp = KeyParser(event.key)
key = kp.key
if key is None and self.view.selector is not None and \
self.view.selector.get_active() and kp.mods_are('shift') \
and self.view.selector.eventpress is not None:
print('Resetting selection.')
self.view.selector.eventpress = None
self.view.selector.set_active(False)
self.view.selection = None
self.view.selector.to_draw.set_visible(False)
self.view.refresh()
def handle_event(self, event):
from spectral import settings
if self.show_events:
print('key = %s' % event.key)
kp = KeyParser(event.key)
key = kp.key
#-----------------------------------------------------------
# Handling for keyboard input related to class ID assignment
#-----------------------------------------------------------
if key is None and kp.mods_are('shift') and \
self.view.selector is not None:
# Rectangle selector is active while shift key is pressed
self.view.selector.set_active(True)
return
if key in [str(i) for i in range(10)] and self.view.selector is not None:
if self.view.selection is None:
print('Select an image region before assigning a class ID.')
return
if len(self.idstr) > 0 and self.idstr[-1] == '!':
print('Cancelled class ID assignment.')
self.idstr = ''
return
else:
self.idstr += key
return
if key == 'enter' and self.view.selector is not None:
if self.view.selection is None:
print('Select an image region before assigning a class ID.')
return
if len(self.idstr) == 0:
print('Enter a numeric class ID before assigning a class ID.')
return
if self.idstr[-1] != '!':
print('Press ENTER again to assign class %s to pixel ' \
'region [%d:%d, %d:%d]:' \
% ((self.idstr,) + tuple(self.view.selection)))
self.idstr += '!'
return
else:
i = int(self.idstr[:-1])
n = self.view.label_region(self.view.selection, i)
if n == 0:
print('No pixels reassigned.')
else:
print('%d pixels reassigned to class %d.' % (n, i))
self.idstr = ''
return
if len(self.idstr) > 0:
self.idstr = ''
print('Cancelled class ID assignment.')
#-----------------------------------------------------------
# General keybinds
#-----------------------------------------------------------
if key == 'a' and self.view.display_mode == 'overlay':
self.view.class_alpha = max(self.view.class_alpha - 0.05, 0)
elif key == 'A' and self.view.display_mode == 'overlay':
self.view.class_alpha = min(self.view.class_alpha + 0.05, 1)
elif key == 'c':
if self.view.classes is not None:
self.view.set_display_mode('classes')
elif key == 'C':
if self.view.classes is not None \
and self.view.data_axes is not None:
self.view.set_display_mode('overlay')
elif key == 'd':
if self.view.data_axes is not None:
self.view.set_display_mode('data')
elif key == 'h':
self.print_help()
elif key == 'i':
if self.view.interpolation == 'nearest':
self.view.interpolation = settings.imshow_interpolation
else:
self.view.interpolation = 'nearest'
elif key == 'z':
self.view.open_zoom()
def print_help(self):
print()
print('Mouse Functions:')
print('----------------')
print('ctrl+left-click -> pan zoom window to pixel')
print('shift+left-click&drag -> select rectangular image region')
print('left-dblclick -> plot pixel spectrum')
print()
print('Keybinds:')
print('---------')
print('0-9 -> enter class ID for image pixel labeling')
print('ENTER -> apply specified class ID to selected rectangular region')
print('a/A -> decrease/increase class overlay alpha value')
print('c -> set display mode to "classes" (if classes set)')
print('C -> set display mode to "overlay" (if data and ' \
'classes set)')
print('d -> set display mode to "data" (if data set)')
print('h -> print help message')
print('i -> toggle pixel interpolation between "nearest" and ' \
'SPy default.')
print('z -> open zoom window')
print()
print('See matplotlib imshow documentation for addition key binds.')
print()
class KeyParser(object):
'''Class to handle ambiguities in matplotlib event key values.'''
aliases = {'ctrl': ['ctrl', 'control'],
'alt': ['alt'],
'shift': ['shift'],
'super': ['super']}
def __init__(self, key_str=None):
self.reset()
if key_str is not None:
self.parse(key_str)
def reset(self):
self.key = None
self.modifiers = set()
def parse(self, key_str):
'''Extracts the key value and modifiers from a string.'''
self.reset()
if key_str is None:
return
tokens = key_str.split('+')
for token in tokens[:-1]:
mods = self.get_token_modifiers(token)
if len(mods) == 0:
raise ValueError('Unrecognized modifier: %s' % repr(token))
self.modifiers.update(mods)
# For the final token, need to determine if it is a key or modifier
mods = self.get_token_modifiers(tokens[-1])
if len(mods) > 0:
self.modifiers.update(mods)
else:
self.key = tokens[-1]
def has_mod(self, m):
'''Returns True if `m` is one of the modifiers.'''
return m in self.modifiers
def mods_are(self, *args):
'''Return True if modifiers are exactly the ones specified.'''
for a in args:
if a not in self.modifiers:
return False
return True
def get_token_modifiers(self, token):
mods = set()
for (modifier, aliases) in list(self.aliases.items()):
if token in aliases:
mods.add(modifier)
return mods
class ImageViewMouseHandler(ImageViewCallback):
def __init__(self, view, *args, **kwargs):
super(ImageViewMouseHandler, self).__init__(view,
registry=view,
event='button_press_event',
*args, **kwargs)
def handle_event(self, event):
'''Callback for click event in the image display.'''
if self.show_events:
print(event, ', key = %s' % event.key)
if event.inaxes is not self.view.axes:
return
(r, c) = (int(event.ydata + 0.5), int(event.xdata + 0.5))
(nrows, ncols) = self.view._image_shape
if r < 0 or r >= nrows or c < 0 or c >= ncols:
return
kp = KeyParser(event.key)
if event.button == 1:
if event.dblclick and kp.key is None:
if self.view.source is not None:
from spectral import settings
import matplotlib.pyplot as plt
if self.view.spectrum_plot_fig_id is None:
f = plt.figure()
self.view.spectrum_plot_fig_id = f.number
try:
f = plt.figure(self.view.spectrum_plot_fig_id)
except:
f = plt.figure()
self.view.spectrum_plot_fig_id = f.number
s = plt.subplot(111)
settings.plotter.plot(self.view.source[r, c],
self.view.source)
s.xaxis.axes.relim()
s.xaxis.axes.autoscale(True)
f.canvas.draw()
class SpyMplEvent(object):
def __init__(self, name):
self.name = name
class ImageView(object):
'''Class to manage events and data associated with image raster views.
In most cases, it is more convenient to simply call :func:`~spectral.graphics.spypylab.imshow`,
which creates, displays, and returns an :class:`ImageView` object. Creating
an :class:`ImageView` object directly (or creating an instance of a subclass)
enables additional customization of the image display (e.g., overriding
default event handlers). If the object is created directly, call the
:meth:`show` method to display the image. The underlying image display
functionality is implemented via :func:`matplotlib.pyplot.imshow`.
'''
selector_rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=True)
selector_lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
def __init__(self, data=None, bands=None, classes=None, source=None,
**kwargs):
'''
Arguments:
`data` (ndarray or :class:`SpyFile`):
The source of RGB bands to be displayed. with shape (R, C, B).
If the shape is (R, C, 3), the last dimension is assumed to
provide the red, green, and blue bands (unless the `bands`
argument is provided). If :math:`B > 3` and `bands` is not
provided, the first, middle, and last band will be used.
`bands` (triplet of integers):
Specifies which bands in `data` should be displayed as red,
green, and blue, respectively.
`classes` (ndarray of integers):
An array of integer-valued class labels with shape (R, C). If
the `data` argument is provided, the shape must match the first
two dimensions of `data`.
`source` (ndarray or :class:`SpyFile`):
The source of spectral data associated with the image display.
This optional argument is used to access spectral data (e.g., to
generate a spectrum plot when a user double-clicks on the image
display.
Keyword arguments:
Any keyword that can be provided to :func:`~spectral.graphics.graphics.get_rgb`
or :func:`matplotlib.imshow`.
'''
import spectral
from spectral import settings
self.is_shown = False
self.imshow_data_kwargs = {'cmap': settings.imshow_float_cmap}
self.rgb_kwargs = {}
self.imshow_class_kwargs = {'zorder': 1}
self.data = data
self.data_rgb = None
self.data_rgb_meta = {}
self.classes = None
self.class_rgb = None
self.source = None
self.bands = bands
self.data_axes = None
self.class_axes = None
self.axes = None
self._image_shape = None
self.display_mode = None
self._interpolation = None
self.selection = None
self.interpolation = kwargs.get('interpolation',
settings.imshow_interpolation)
if data is not None:
self.set_data(data, bands, **kwargs)
if classes is not None:
self.set_classes(classes, **kwargs)
if source is not None:
self.set_source(source)
self.class_colors = spectral.spy_colors
self.spectrum_plot_fig_id = None
self.parent = None
self.selector = None
self._on_parent_click_cid = None
self._class_alpha = settings.imshow_class_alpha
# Callbacks for events associated specifically with this window.
self.callbacks = None
# A sharable callback registry for related windows. If this
# CallbackRegistry is set prior to calling ImageView.show (e.g., by
# setting it equal to the `callbacks_common` member of another
# ImageView object), then the registry will be shared. Otherwise, a new
# callback registry will be created for this ImageView.
self.callbacks_common = None
check_disable_mpl_callbacks()
def set_data(self, data, bands=None, **kwargs):
'''Sets the data to be shown in the RGB channels.
Arguments:
`data` (ndarray or SpyImage):
If `data` has more than 3 bands, the `bands` argument can be
used to specify which 3 bands to display. `data` will be
passed to `get_rgb` prior to display.
`bands` (3-tuple of int):
Indices of the 3 bands to display from `data`.
Keyword Arguments:
Any valid keyword for `get_rgb` or `matplotlib.imshow` can be
given.
'''
from .graphics import _get_rgb_kwargs
self.data = data
self.bands = bands
rgb_kwargs = {}
for k in _get_rgb_kwargs:
if k in kwargs:
rgb_kwargs[k] = kwargs.pop(k)
self.set_rgb_options(**rgb_kwargs)
self._update_data_rgb()
if self._image_shape is None:
self._image_shape = data.shape[:2]
elif data.shape[:2] != self._image_shape:
raise ValueError('Image shape is inconsistent with previously ' \
'set data.')
self.imshow_data_kwargs.update(kwargs)
if 'interpolation' in self.imshow_data_kwargs:
self.interpolation = self.imshow_data_kwargs['interpolation']
self.imshow_data_kwargs.pop('interpolation')
if len(kwargs) > 0 and self.is_shown:
msg = 'Keyword args to set_data only have an effect if ' \
'given before the image is shown.'
warnings.warn(UserWarning(msg))
if self.is_shown:
self.refresh()
def set_rgb_options(self, **kwargs):
'''Sets parameters affecting RGB display of data.
Accepts any keyword supported by :func:`~spectral.graphics.graphics.get_rgb`.
'''
from .graphics import _get_rgb_kwargs
for k in kwargs:
if k not in _get_rgb_kwargs:
raise ValueError('Unexpected keyword: {0}'.format(k))
self.rgb_kwargs = kwargs.copy()
if self.is_shown:
self._update_data_rgb()
self.refresh()
def _update_data_rgb(self):
'''Regenerates the RGB values for display.'''
from .graphics import get_rgb_meta
(self.data_rgb, self.data_rgb_meta) = \
get_rgb_meta(self.data, self.bands, **self.rgb_kwargs)
# If it is a gray-scale image, only keep the first RGB component so
# matplotlib imshow's cmap can still be used.
if self.data_rgb_meta['mode'] == 'monochrome' and \
self.data_rgb.ndim ==3:
(self.bands is not None and len(self.bands) == 1)
def set_classes(self, classes, colors=None, **kwargs):
'''Sets the array of class values associated with the image data.
Arguments:
`classes` (ndarray of int):
`classes` must be an integer-valued array with the same
number rows and columns as the display data (if set).
`colors`: (array or 3-tuples):
Color triplets (with values in the range [0, 255]) that
define the colors to be associatd with the integer indices
in `classes`.
Keyword Arguments:
Any valid keyword for `matplotlib.imshow` can be provided.
'''
from .graphics import _get_rgb_kwargs
self.classes = classes
if classes is None:
return
if self._image_shape is None:
self._image_shape = classes.shape[:2]
elif classes.shape[:2] != self._image_shape:
raise ValueError('Class data shape is inconsistent with ' \
'previously set data.')
if colors is not None:
self.class_colors = colors
kwargs = dict([item for item in list(kwargs.items()) if item[0] not in \
_get_rgb_kwargs])
self.imshow_class_kwargs.update(kwargs)
if 'interpolation' in self.imshow_class_kwargs:
self.interpolation = self.imshow_class_kwargs['interpolation']
self.imshow_class_kwargs.pop('interpolation')
if len(kwargs) > 0 and self.is_shown:
msg = 'Keyword args to set_classes only have an effect if ' \
'given before the image is shown.'
warnings.warn(UserWarning(msg))
if self.is_shown:
self.refresh()
def set_source(self, source):
'''Sets the image data source (used for accessing spectral data).
Arguments:
`source` (ndarray or :class:`SpyFile`):
The source for spectral data associated with the view.
'''
self.source = source
def show(self, mode=None, fignum=None):
'''Renders the image data.
Arguments:
`mode` (str):
Must be one of:
"data": Show the data RGB
"classes": Shows indexed color for `classes`
"overlay": Shows class colors overlaid on data RGB.
If `mode` is not provided, a mode will be automatically
selected, based on the data set in the ImageView.
`fignum` (int):
Figure number of the matplotlib figure in which to display
the ImageView. If not provided, a new figure will be created.
'''
import matplotlib.pyplot as plt
from spectral import settings
if self.is_shown:
msg = 'ImageView.show should only be called once.'
warnings.warn(UserWarning(msg))
return
kwargs = {}
if fignum is not None:
kwargs['num'] = fignum
if settings.imshow_figure_size is not None:
kwargs['figsize'] = settings.imshow_figure_size
plt.figure(**kwargs)
if self.data_rgb is not None:
self.show_data()
if self.classes is not None:
self.show_classes()
if mode is None:
self._guess_mode()
else:
self.set_display_mode(mode)
self.axes.format_coord = self.format_coord
self.init_callbacks()
self.is_shown = True
def init_callbacks(self):
'''Creates the object's callback registry and default callbacks.'''
from spectral import settings
from matplotlib.cbook import CallbackRegistry
self.callbacks = CallbackRegistry()
# callbacks_common may have been set to a shared external registry
# (e.g., to the callbacks_common member of another ImageView object). So
# don't create it if it has already been set.
if self.callbacks_common is None:
self.callbacks_common = CallbackRegistry()
# Keyboard callback
self.cb_mouse = ImageViewMouseHandler(self)
self.cb_mouse.connect()
# Mouse callback
self.cb_keyboard = ImageViewKeyboardHandler(self)
self.cb_keyboard.connect()
# Class update event callback
def updater(*args, **kwargs):
if self.classes is None:
self.set_classes(args[0].classes)
self.refresh()
callback = MplCallback(registry=self.callbacks_common,
event='spy_classes_modified',
callback=updater)
callback.connect()
self.cb_classes_modified = callback
if settings.imshow_enable_rectangle_selector is False:
return
try:
from matplotlib.widgets import RectangleSelector
self.selector = RectangleSelector(self.axes,
self._select_rectangle,
button=1,
useblit=True,
spancoords='data',
drawtype='box',
rectprops = \
self.selector_rectprops)
self.selector.set_active(False)
except:
self.selector = None
msg = 'Failed to create RectangleSelector object. Interactive ' \
'pixel class labeling will be unavailable.'
warn(msg)
def label_region(self, rectangle, class_id):
'''Assigns all pixels in the rectangle to the specified class.
Arguments:
`rectangle` (4-tuple of integers):
Tuple or list defining the rectangle bounds. Should have the
form (row_start, row_stop, col_start, col_stop), where the
stop indices are not included (i.e., the effect is
`classes[row_start:row_stop, col_start:col_stop] = id`.
class_id (integer >= 0):
The class to which pixels will be assigned.
Returns the number of pixels reassigned (the number of pixels in the
rectangle whose class has *changed* to `class_id`.
'''
if self.classes is None:
self.classes = np.zeros(self.data_rgb.shape[:2], dtype=np.int16)
r = rectangle
n = np.sum(self.classes[r[0]:r[1], r[2]:r[3]] != class_id)
if n > 0:
self.classes[r[0]:r[1], r[2]:r[3]] = class_id
event = SpyMplEvent('spy_classes_modified')
event.classes = self.classes
event.nchanged = n
self.callbacks_common.process('spy_classes_modified', event)
# Make selection rectangle go away.
self.selector.to_draw.set_visible(False)
self.refresh()
return n
return 0
def _select_rectangle(self, event1, event2):
if event1.inaxes is not self.axes or event2.inaxes is not self.axes:
self.selection = None
return
(r1, c1) = xy_to_rowcol(event1.xdata, event1.ydata)
(r2, c2) = xy_to_rowcol(event2.xdata, event2.ydata)
(r1, r2) = sorted([r1, r2])
(c1, c2) = sorted([c1, c2])
if (r2 < 0) or (r1 >= self._image_shape[0]) or \
(c2 < 0) or (c1 >= self._image_shape[1]):
self.selection = None
return
r1 = max(r1, 0)
r2 = min(r2, self._image_shape[0] - 1)
c1 = max(c1, 0)
c2 = min(c2, self._image_shape[1] - 1)
print('Selected region: [%d: %d, %d: %d]' % (r1, r2 + 1, c1, c2 + 1))
self.selection = [r1, r2 + 1, c1, c2 + 1]
self.selector.set_active(False)
# Make the rectangle display until at least the next event
self.selector.to_draw.set_visible(True)
self.selector.update()
def _guess_mode(self):
'''Select an appropriate display mode, based on current data.'''
if self.data_rgb is not None:
self.set_display_mode('data')
elif self.classes is not None:
self.set_display_mode('classes')
else:
raise Exception('Unable to display image: no data set.')
def show_data(self):
'''Show the image data.'''
import matplotlib.pyplot as plt
if self.data_axes is not None:
msg = 'ImageView.show_data should only be called once.'
warnings.warn(UserWarning(msg))
return
elif self.data_rgb is None:
raise Exception('Unable to display data: data array not set.')
if self.axes is not None:
# A figure has already been created for the view. Make it current.
plt.figure(self.axes.figure.number)
self.imshow_data_kwargs['interpolation'] = self._interpolation
self.data_axes = plt.imshow(self.data_rgb, **self.imshow_data_kwargs)
if self.axes is None:
self.axes = self.data_axes.axes
def show_classes(self):
'''Show the class values.'''
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, NoNorm
from spectral import get_rgb
if self.class_axes is not None:
msg = 'ImageView.show_classes should only be called once.'
warnings.warn(UserWarning(msg))
return
elif self.classes is None:
raise Exception('Unable to display classes: class array not set.')
cm = ListedColormap(np.array(self.class_colors) / 255.)
self._update_class_rgb()
kwargs = self.imshow_class_kwargs.copy()
kwargs.update({'cmap': cm, 'vmin': 0, 'norm': NoNorm(),
'interpolation': self._interpolation})
if self.axes is not None:
# A figure has already been created for the view. Make it current.
plt.figure(self.axes.figure.number)
self.class_axes = plt.imshow(self.class_rgb, **kwargs)
if self.axes is None:
self.axes = self.class_axes.axes
self.class_axes.set_zorder(1)
if self.display_mode == 'overlay':
self.class_axes.set_alpha(self._class_alpha)
else:
self.class_axes.set_alpha(1)
#self.class_axes.axes.set_axis_bgcolor('black')
def refresh(self):
'''Updates the displayed data (if it has been shown).'''
if self.is_shown:
self._update_class_rgb()
if self.class_axes is not None:
self.class_axes.set_data(self.class_rgb)
self.class_axes.set_interpolation(self._interpolation)
elif self.display_mode in ('classes', 'overlay'):
self.show_classes()
if self.data_axes is not None:
self.data_axes.set_data(self.data_rgb)
self.data_axes.set_interpolation(self._interpolation)
elif self.display_mode in ('data', 'overlay'):
self.show_data()
self.axes.figure.canvas.draw()
def _update_class_rgb(self):
if self.display_mode == 'overlay':
self.class_rgb = np.ma.array(self.classes, mask=(self.classes==0))
else:
self.class_rgb = np.array(self.classes)
def set_display_mode(self, mode):
'''`mode` must be one of ("data", "classes", "overlay").'''
if mode not in ('data', 'classes', 'overlay'):
raise ValueError('Invalid display mode: ' + repr(mode))
self.display_mode = mode
show_data = mode in ('data', 'overlay')
if self.data_axes is not None:
self.data_axes.set_visible(show_data)
show_classes = mode in ('classes', 'overlay')
if self.classes is not None and self.class_axes is None:
# Class data values were just set
self.show_classes()
if self.class_axes is not None:
self.class_axes.set_visible(show_classes)
if mode == 'classes':
self.class_axes.set_alpha(1)
else:
self.class_axes.set_alpha(self._class_alpha)
self.refresh()
@property
def class_alpha(self):
'''alpha transparency for the class overlay.'''
return self._class_alpha
@class_alpha.setter
def class_alpha(self, alpha):
if alpha < 0 or alpha > 1:
raise ValueError('Alpha value must be in range [0, 1].')
self._class_alpha = alpha
if self.class_axes is not None:
self.class_axes.set_alpha(alpha)
if self.is_shown:
self.refresh()
@property
def interpolation(self):
'''matplotlib pixel interpolation to use in the image display.'''
return self._interpolation
@interpolation.setter
def interpolation(self, interpolation):
if interpolation == self._interpolation:
return
self._interpolation = interpolation
if not self.is_shown:
return
if self.data_axes is not None:
self.data_axes.set_interpolation(interpolation)
if self.class_axes is not None:
self.class_axes.set_interpolation(interpolation)
self.refresh()
def set_title(self, s):
if self.is_shown:
self.axes.set_title(s)
self.refresh()
def open_zoom(self, center=None, size=None):
'''Opens a separate window with a zoomed view.
If a ctrl-lclick event occurs in the original view, the zoomed window
will pan to the location of the click event.
Arguments:
`center` (two-tuple of int):
Initial (row, col) of the zoomed view.
`size` (int):
Width and height (in source image pixels) of the initial
zoomed view.
Returns:
A new ImageView object for the zoomed view.
'''
from spectral import settings
import matplotlib.pyplot as plt
if size is None:
size = settings.imshow_zoom_pixel_width
(nrows, ncols) = self._image_shape
fig_kwargs = {}
if settings.imshow_zoom_figure_width is not None:
width = settings.imshow_zoom_figure_width
fig_kwargs['figsize'] = (width, width)
fig = plt.figure(**fig_kwargs)
view = ImageView(source=self.source)
view.set_data(self.data, self.bands, **self.rgb_kwargs)
view.set_classes(self.classes, self.class_colors)
view.imshow_data_kwargs = self.imshow_data_kwargs.copy()
kwargs = {'extent': (-0.5, ncols - 0.5, nrows - 0.5, -0.5)}
view.imshow_data_kwargs.update(kwargs)
view.imshow_class_kwargs = self.imshow_class_kwargs.copy()
view.imshow_class_kwargs.update(kwargs)
view.set_display_mode(self.display_mode)
view.callbacks_common = self.callbacks_common
view.show(fignum=fig.number, mode=self.display_mode)
view.axes.set_xlim(0, size)
view.axes.set_ylim(size, 0)
view.interpolation = 'nearest'
if center is not None:
view.pan_to(*center)
view.cb_parent_pan = ParentViewPanCallback(view, self)
view.cb_parent_pan.connect()
return view
def pan_to(self, row, col):
'''Centers view on pixel coordinate (row, col).'''
if self.axes is None:
raise Exception('Cannot pan image until it is shown.')
(xmin, xmax) = self.axes.get_xlim()
(ymin, ymax) = self.axes.get_ylim()
xrange_2 = (xmax - xmin) / 2.0
yrange_2 = (ymax - ymin) / 2.0
self.axes.set_xlim(col - xrange_2, col + xrange_2)
self.axes.set_ylim(row - yrange_2, row + yrange_2)
self.axes.figure.canvas.draw()
def zoom(self, scale):
'''Zooms view in/out (`scale` > 1 zooms in).'''
(xmin, xmax) = self.axes.get_xlim()
(ymin, ymax) = self.axes.get_ylim()
x = (xmin + xmax) / 2.0
y = (ymin + ymax) / 2.0
dx = (xmax - xmin) / 2.0 / scale
dy = (ymax - ymin) / 2.0 / scale
self.axes.set_xlim(x - dx, x + dx)
self.axes.set_ylim(y - dy, y + dy)
self.refresh()
def format_coord(self, x, y):
'''Formats pixel coordinate string displayed in the window.'''
(nrows, ncols) = self._image_shape
if x < -0.5 or x > ncols - 0.5 or y < -0.5 or y > nrows - 0.5:
return ""
(r, c) = xy_to_rowcol(x, y)
s = 'pixel=[%d,%d]' % (r, c)
if self.classes is not None:
try:
s += ' class=%d' % self.classes[r, c]
except:
pass
return s
def __str__(self):
meta = self.data_rgb_meta
s = 'ImageView object:\n'
if 'bands' in meta:
s += ' {0:<20}: {1}\n'.format("Display bands", meta['bands'])
if self.interpolation == None:
interp = "<default>"
else:
interp = self.interpolation
s += ' {0:<20}: {1}\n'.format("Interpolation", interp)
if meta.has_key('rgb range'):
s += ' {0:<20}:\n'.format("RGB data limits")
for (c, r) in zip('RGB', meta['rgb range']):
s += ' {0}: {1}\n'.format(c, str(r))
return s
def __repr__(self):
return str(self)
def imshow(data=None, bands=None, classes=None, source=None, colors=None,
figsize=None, fignum=None, title=None, **kwargs):
'''A wrapper around matplotlib's imshow for multi-band images.
Arguments:
`data` (SpyFile or ndarray):
Can have shape (R, C) or (R, C, B).
`bands` (tuple of integers, optional)
If `bands` has 3 values, the bands specified are extracted from
`data` to be plotted as the red, green, and blue colors,
respectively. If it contains a single value, then a single band
will be extracted from the image.
`classes` (ndarray of integers):
An array of integer-valued class labels with shape (R, C). If
the `data` argument is provided, the shape must match the first
two dimensions of `data`. The returned `ImageView` object will use
a copy of this array. To access class values that were altered
after calling `imshow`, access the `classes` attribute of the
returned `ImageView` object.
`source` (optional, SpyImage or ndarray):
Object used for accessing image source data. If this argument is
not provided, events such as double-clicking will have no effect
(i.e., a spectral plot will not be created).
`colors` (optional, array of ints):
Custom colors to be used for class image view. If provided, this
argument should be an array of 3-element arrays, each of which
specifies an RGB triplet with integer color components in the
range [0, 256).
`figsize` (optional, 2-tuple of scalar):
Specifies the width and height (in inches) of the figure window
to be created. If this value is not provided, the value specified
in `spectral.settings.imshow_figure_size` will be used.
`fignum` (optional, integer):
Specifies the figure number of an existing matplotlib figure. If
this argument is None, a new figure will be created.
`title` (str):
The title to be displayed above the image.
Keywords:
Keywords accepted by :func:`~spectral.graphics.graphics.get_rgb` or
:func:`matplotlib.imshow` will be passed on to the appropriate
function.
This function defaults the color scale (imshow's "cmap" keyword) to
"gray". To use imshow's default color scale, call this function with
keyword `cmap=None`.
Returns:
An `ImageView` object, which can be subsequently used to refine the
image display.
See :class:`~spectral.graphics.spypylab.ImageView` for additional details.
Examples:
Show a true color image of a hyperspectral image:
>>> data = open_image('92AV3C.lan').load()
>>> view = imshow(data, bands=(30, 20, 10))
Show ground truth in a separate window:
>>> classes = open_image('92AV3GT.GIS').read_band(0)
>>> cview = imshow(classes=classes)
Overlay ground truth data on the data display:
>>> view.set_classes(classes)
>>> view.set_display_mode('overlay')
Show RX anomaly detector results in the view and a zoom window showing
true color data:
>>> x = rx(data)
>>> zoom = view.open_zoom()
>>> view.set_data(x)
Note that pressing ctrl-lclick with the mouse in the main window will
cause the zoom window to pan to the clicked location.
Opening zoom windows, changing display modes, and other functions can
also be achieved via keys mapped directly to the displayed image. Press
"h" with focus on the displayed image to print a summary of mouse/
keyboard commands accepted by the display.
'''
import matplotlib.pyplot as plt
from spectral import settings
from .graphics import get_rgb
view = ImageView()
if data is not None:
view.set_data(data, bands, **kwargs)
if classes is not None:
view.set_classes(classes, colors, **kwargs)
if source is not None:
view.set_source(source)
elif data is not None and len(data.shape) == 3 and data.shape[2] > 3:
view.set_source(data)
if fignum is not None or figsize is not None:
fig = plt.figure(num=fignum, figsize=figsize)
view.show(fignum=fig.number)
else:
view.show()
if title is not None:
view.set_title(title)
return view
def plot(data, source=None):
'''
Creates an x-y plot.
USAGE: plot(data)
If data is a vector, all the values in data will be drawn in a
single series. If data is a 2D array, each column of data will
be drawn as a separate series.
'''
import pylab
from numpy import shape
import spectral
s = shape(data)
if source is not None and hasattr(source, 'bands'):
xvals = source.bands.centers
else:
xvals = None
if len(s) == 1:
if not xvals:
xvals = list(range(len(data)))
p = pylab.plot(xvals, data)
elif len(s) == 2:
if not xvals:
xvals = list(range(s[1]))
p = pylab.plot(xvals, data[0, :])
pylab.hold(1)
for i in range(1, s[0]):
p = pylab.plot(xvals, data[i, :])
spectral._xyplot = p
pylab.grid(1)
if source is not None and hasattr(source, 'bands'):
xlabel = source.bands.band_quantity
if len(source.bands.band_unit) > 0:
xlabel = xlabel + ' (' + source.bands.band_unit + ')'
pylab.xlabel(xlabel)
return p
| gpl-2.0 |
plotly/dash-table | tests/selenium/test_basic_copy_paste.py | 1 | 6622 | import dash
import pytest
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import dash_html_components as html
from dash_table import DataTable
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import pandas as pd
url = "https://github.com/plotly/datasets/raw/master/" "26k-consumer-complaints.csv"
rawDf = pd.read_csv(url)
df = rawDf.to_dict("records")
def get_app():
app = dash.Dash(__name__)
app.layout = html.Div(
[
DataTable(
id="table",
data=df[0:250],
columns=[
{"name": i, "id": i, "hideable": i == "Complaint ID"}
for i in rawDf.columns
],
editable=True,
sort_action="native",
include_headers_on_copy_paste=True,
),
DataTable(
id="table2",
data=df[0:10],
columns=[
{"name": i, "id": i, "deletable": True} for i in rawDf.columns
],
editable=True,
sort_action="native",
include_headers_on_copy_paste=True,
),
]
)
@app.callback(
Output("table", "data"),
[Input("table", "data_timestamp")],
[State("table", "data"), State("table", "data_previous")],
)
# pylint: disable=unused-argument
def update_data(timestamp, current, previous):
# pylint: enable=unused-argument
if timestamp is None or current is None or previous is None:
raise PreventUpdate
modified = False
if len(current) == len(previous):
for (i, datum) in enumerate(current):
previous_datum = previous[i]
if datum["Unnamed: 0"] != previous_datum["Unnamed: 0"]:
datum["Complaint ID"] = "MODIFIED"
modified = True
if modified:
return current
else:
raise PreventUpdate
return app
def test_tbcp001_copy_paste_callback(test):
test.start_server(get_app())
target = test.table("table")
target.cell(0, 0).click()
test.copy()
target.cell(1, 0).click()
test.paste()
assert target.cell(1, 0).get_text() == "0"
assert target.cell(1, 1).get_text() == "MODIFIED"
assert test.get_log_errors() == []
def test_tbcp002_sorted_copy_paste_callback(test):
test.start_server(get_app())
target = test.table("table")
target.column(rawDf.columns[2]).sort()
assert target.cell(0, 0).get_text() == "11"
target.cell(0, 0).click()
test.copy()
target.cell(1, 0).click()
test.paste()
assert target.cell(1, 0).get_text() == "11"
assert target.cell(1, 1).get_text() == "MODIFIED"
target.cell(1, 1).click()
test.copy()
target.cell(2, 1).click()
test.paste()
assert target.cell(1, 0).get_text() == "11"
assert target.cell(2, 1).get_text() == "MODIFIED"
assert test.get_log_errors() == []
@pytest.mark.parametrize("mouse_navigation", [True, False])
def test_tbcp003_copy_multiple_rows(test, mouse_navigation):
test.start_server(get_app())
target = test.table("table")
if mouse_navigation:
with test.hold(Keys.SHIFT):
target.cell(0, 0).click()
target.cell(2, 0).click()
else:
target.cell(0, 0).click()
with test.hold(Keys.SHIFT):
test.send_keys(Keys.ARROW_DOWN + Keys.ARROW_DOWN)
test.copy()
target.cell(3, 0).click()
test.paste()
for i in range(3):
assert target.cell(i + 3, 0).get_text() == target.cell(i, 0).get_text()
assert target.cell(i + 3, 1).get_text() == "MODIFIED"
assert test.get_log_errors() == []
def test_tbcp004_copy_9_and_10(test):
test.start_server(get_app())
source = test.table("table")
target = test.table("table2")
source.cell(9, 0).click()
with test.hold(Keys.SHIFT):
ActionChains(test.driver).send_keys(Keys.DOWN).perform()
test.copy()
target.cell(0, 0).click()
test.paste()
for row in range(2):
for col in range(1):
assert (
target.cell(row, col).get_text() == source.cell(row + 9, col).get_text()
)
assert test.get_log_errors() == []
def test_tbcp005_copy_multiple_rows_and_columns(test):
test.start_server(get_app())
target = test.table("table")
target.cell(0, 1).click()
with test.hold(Keys.SHIFT):
target.cell(2, 2).click()
test.copy()
target.cell(3, 1).click()
test.paste()
for row in range(3):
for col in range(1, 3):
assert (
target.cell(row + 3, col).get_text() == target.cell(row, col).get_text()
)
assert test.get_log_errors() == []
def test_tbcp006_copy_paste_between_tables(test):
test.start_server(get_app())
source = test.table("table")
target = test.table("table2")
source.cell(10, 0).click()
with test.hold(Keys.SHIFT):
source.cell(13, 3).click()
test.copy()
target.cell(0, 0).click()
test.paste()
for row in range(4):
for col in range(4):
assert (
source.cell(row + 10, col).get_text()
== target.cell(row, col).get_text()
)
assert test.get_log_errors() == []
def test_tbcp007_copy_paste_with_hidden_column(test):
test.start_server(get_app())
target = test.table("table")
target.column("Complaint ID").hide()
target.cell(0, 0).click()
with test.hold(Keys.SHIFT):
target.cell(2, 2).click()
test.copy()
target.cell(3, 1).click()
test.paste()
for row in range(3):
for col in range(3):
assert (
target.cell(row, col).get_text()
== target.cell(row + 3, col + 1).get_text()
)
assert test.get_log_errors() == []
def test_tbcp008_copy_paste_between_tables_with_hidden_columns(test):
test.start_server(get_app())
target = test.table("table")
target.column("Complaint ID").hide()
target.cell(10, 0).click()
with test.hold(Keys.SHIFT):
target.cell(13, 2).click()
test.copy()
target.cell(0, 0).click()
test.paste()
for row in range(4):
for col in range(3):
assert (
target.cell(row + 10, col).get_text()
== target.cell(row, col).get_text()
)
assert test.get_log_errors() == []
| mit |
Darthone/bug-free-octo-parakeet | tinkering/ml/sklearn_svm.py | 2 | 2558 | #!/usr/bin/env python
import numpy as np
import pandas as pd
from sklearn import preprocessing, cross_validation, neighbors, svm
import peewee
from peewee import *
import ifc.ta as ta
def addDailyReturn(dataset):
"""
Adding in daily return to create binary classifiers (Up or Down in relation to the previous day)
"""
#will normalize labels
le = preprocessing.LabelEncoder()
dataset['UpDown'] = -(dataset['Adj_Close']-dataset['Adj_Close'].shift(-1))/dataset['Adj_Close'].shift(-1)
print dataset['UpDown']
# will be denoted by 2 when transformed
dataset.UpDown[dataset.UpDown >= 0] = "up"
# will be denoted by 1 when transformed
dataset.UpDown[dataset.UpDown < 0] = "down"
dataset.UpDown = le.fit(dataset.UpDown).transform(dataset.UpDown)
print dataset['UpDown']
accuracies = []
def preProcessing(stock_name, start_date, end_date):
"""
Clean up data to allow for classifiers to prict
"""
x = ta.get_series(stock_name, start_date, end_date)
x.run_calculations()
x.trim_fat()
df = x.df
#df = pd.read_csv(csv)
addDailyReturn(df)
#The columns left will be the ones that are being used to predict
df.drop(['Date'], 1, inplace=True)
df.drop(['Low'], 1, inplace=True)
df.drop(['Volume'], 1, inplace=True)
#df.drop(['Open'], 1, inplace=True)
df.drop(['Adj_Close'],1, inplace=True)
#df.drop(['Close'],1, inplace=True)
df.drop(['High'],1, inplace=True)
df.drop(['mavg_10'],1, inplace=True)
df.drop(['mavg_30'],1, inplace=True)
df.drop(['rsi_14'],1, inplace=True)
return df
for i in range(3):
#calling in date ranges plus stock name to be pulled
train_df = preProcessing("TGT", "2015-04-17", "2016-04-17")
test_df = preProcessing("TGT", "2016-04-17", "2017-04-17")
# separating the binary predictor into different arryays so the algo knows what to predict on
X_train = np.array(train_df.drop(['UpDown'],1))
y_train = np.array(train_df['UpDown'])
X_test = np.array(test_df.drop(['UpDown'],1))
y_test = np.array(test_df['UpDown'])
#X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.5)
# performing the classifier
clf = svm.SVC()
clf.fit(X_train,y_train)
accuracy = clf.score(X_test,y_test)
# iterate and print average accuracy rate
print accuracy
accuracies.append(accuracy)
# test value
test_set = np.array([[104,106]])
prediction = clf.predict(test_set)
print prediction
print sum(accuracies)/len(accuracies)
| mit |
AlexRobson/nilmtk | nilmtk/dataset_converters/iawe/convert_iawe.py | 6 | 3735 | from __future__ import print_function, division
import pandas as pd
import numpy as np
from os.path import join, isdir, isfile, dirname, abspath
from os import getcwd
from sys import getfilesystemencoding
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.utils import check_directory_exists, get_datastore
from nilm_metadata import convert_yaml_to_hdf5
from inspect import currentframe, getfile, getsourcefile
from copy import deepcopy
def reindex_fill_na(df, idx):
df_copy = deepcopy(df)
df_copy = df_copy.reindex(idx)
power_columns = [
x for x in df.columns if x[0] in ['power']]
non_power_columns = [x for x in df.columns if x not in power_columns]
for power in power_columns:
df_copy[power].fillna(0, inplace=True)
for measurement in non_power_columns:
df_copy[measurement].fillna(
df[measurement].median(), inplace=True)
return df_copy
column_mapping = {
'frequency': ('frequency', ""),
'voltage': ('voltage', ""),
'W': ('power', 'active'),
'energy': ('energy', 'apparent'),
'A': ('current', ''),
'reactive_power': ('power', 'reactive'),
'apparent_power': ('power', 'apparent'),
'power_factor': ('pf', ''),
'PF': ('pf', ''),
'phase_angle': ('phi', ''),
'VA': ('power', 'apparent'),
'VAR': ('power', 'reactive'),
'VLN': ('voltage', ""),
'V': ('voltage', ""),
'f': ('frequency', "")
}
TIMESTAMP_COLUMN_NAME = "timestamp"
TIMEZONE = "Asia/Kolkata"
START_DATETIME, END_DATETIME = '7-13-2013', '8-4-2013'
FREQ = "1T"
def convert_iawe(iawe_path, output_filename, format="HDF"):
"""
Parameters
----------
iawe_path : str
The root path of the iawe dataset.
output_filename : str
The destination filename (including path and suffix).
"""
check_directory_exists(iawe_path)
idx = pd.DatetimeIndex(start=START_DATETIME, end=END_DATETIME, freq=FREQ)
idx = idx.tz_localize('GMT').tz_convert(TIMEZONE)
# Open data store
store = get_datastore(output_filename, format, mode='w')
electricity_path = join(iawe_path, "electricity")
# Mains data
for chan in range(1, 12):
key = Key(building=1, meter=chan)
filename = join(electricity_path, "%d.csv" % chan)
print('Loading ', chan)
df = pd.read_csv(filename)
df.drop_duplicates(subset=["timestamp"], inplace=True)
df.index = pd.to_datetime(df.timestamp.values, unit='s', utc=True)
df = df.tz_convert(TIMEZONE)
df = df.drop(TIMESTAMP_COLUMN_NAME, 1)
df.rename(columns=lambda x: column_mapping[x], inplace=True)
df.columns.set_names(LEVEL_NAMES, inplace=True)
df = df.convert_objects(convert_numeric=True)
df = df.dropna()
df = df.astype(np.float32)
df = df.sort_index()
df = df.resample("1T")
df = reindex_fill_na(df, idx)
assert df.isnull().sum().sum() == 0
store.put(str(key), df)
store.close()
convert_yaml_to_hdf5(join(_get_module_directory(), 'metadata'),
output_filename)
print("Done converting iAWE to HDF5!")
def _get_module_directory():
# Taken from http://stackoverflow.com/a/6098238/732596
path_to_this_file = dirname(getfile(currentframe()))
if not isdir(path_to_this_file):
encoding = getfilesystemencoding()
path_to_this_file = dirname(unicode(__file__, encoding))
if not isdir(path_to_this_file):
abspath(getsourcefile(lambda _: None))
if not isdir(path_to_this_file):
path_to_this_file = getcwd()
assert isdir(path_to_this_file), path_to_this_file + ' is not a directory'
return path_to_this_file
| apache-2.0 |
timcera/tsgettoolbox | tsgettoolbox/ulmo/noaa/goes/core.py | 1 | 5731 | # -*- coding: utf-8 -*-
import logging
import os
import shutil
from datetime import datetime, timedelta
import isodate
import pandas as pd
import requests
from ulmo import util
from . import parsers
dcs_url = "https://dcs1.noaa.gov/Account/FieldTestData"
DEFAULT_FILE_PATH = "noaa/goes/"
# configure logging
LOG_FORMAT = "%(message)s"
logging.basicConfig(format=LOG_FORMAT)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def decode(dataframe, parser, **kwargs):
"""decodes goes message data in pandas dataframe returned by
ulmo.noaa.goes.get_data().
Parameters
----------
dataframe : pandas.DataFrame
pandas.DataFrame returned by ulmo.noaa.goes.get_data()
parser : {function, str}
function that acts on dcp_message each row of the dataframe and returns
a new dataframe containing several rows of decoded data. This returned
dataframe may have different (but derived) timestamps than that the
original row. If a string is passed then a matching parser function is
looked up from ulmo.noaa.goes.parsers
Returns
-------
decoded_data : pandas.DataFrame
pandas dataframe, the format and parameters in the returned dataframe
depend wholly on the parser used
"""
if isinstance(parser, str):
parser = getattr(parsers, parser)
if dataframe.empty:
return dataframe
df = []
for timestamp, data in dataframe.iterrows():
parsed = parser(data, **kwargs)
parsed.dropna(how="all", inplace=True)
if parsed.empty:
empty_df = pd.DataFrame()
df.append(empty_df)
df.append(parsed)
df = pd.concat(df)
# preserve metadata in df if it exists, since pivot will lose it
df_save = df.drop(["channel", "channel_data"], axis=1)
df = df.pivot_table(index=df.index, columns="channel", values="channel_data").join(
df_save
)
# to properly drop duplicate rows, need to include index; unfortunately,
df["idx"] = df.index.values
df = df.drop_duplicates().drop("idx", axis=1)
return df
def get_data(dcp_address, hours, use_cache=False, cache_path=None, as_dataframe=True):
"""Fetches GOES Satellite DCP messages from NOAA Data Collection System
(DCS) field test.
Parameters
----------
dcp_address : str, iterable of strings
DCP address or list of DCP addresses to be fetched; lists will be
joined by a ','.
use_cache : bool,
If True (default) use hdf file to cache data and retrieve new data on
subsequent requests
cache_path : {``None``, str},
If ``None`` use default ulmo location for cached files otherwise use
specified path. files are named using dcp_address.
as_dataframe : bool
If True (default) return data in a pandas dataframe otherwise return a
dict.
Returns
-------
message_data : {pandas.DataFrame, dict}
Either a pandas dataframe or a dict indexed by dcp message times
"""
if isinstance(dcp_address, list):
dcp_address = ",".join(dcp_address)
data = pd.DataFrame()
if use_cache:
dcp_data_path = _get_store_path(cache_path, dcp_address + ".h5")
if os.path.exists(dcp_data_path):
data = pd.read_hdf(dcp_data_path, dcp_address)
params = {}
params["addr"] = (dcp_address,)
params["hours"] = (hours,)
messages = _fetch_url(params)
new_data = pd.DataFrame([_parse(row) for row in messages])
if not new_data.empty:
new_data.index = new_data.message_timestamp_utc
data = new_data.combine_first(data)
data.sort_index(inplace=True)
if use_cache:
# write to a tmp file and move to avoid ballooning h5 file
tmp = dcp_data_path + ".tmp"
data.to_hdf(tmp, dcp_address)
shutil.move(tmp, dcp_data_path)
if data.empty:
if as_dataframe:
return data
else:
return {}
if not as_dataframe:
data = data.T.to_dict()
return data
def _fetch_url(params):
r = requests.post(dcs_url, params=params, timeout=60)
messages = r.json()
return messages
def _format_period(period):
days, hours, minutes = (
period.days,
period.seconds // 3600,
(period.seconds // 60) % 60,
)
if minutes:
return "now -%s minutes" % period.seconds / 60
if hours:
return "now -%s hours" % period.seconds / 3600
if days:
return "now -%s days" % days
def _format_time(timestamp):
if isinstance(timestamp, str):
if timestamp.startswith("P"):
timestamp = isodate.parse_duration(timestamp)
else:
timestamp = isodate.parse_datetime(timestamp)
if isinstance(timestamp, datetime):
return timestamp.strftime("%Y/%j %H:%M:%S")
elif isinstance(timestamp, timedelta):
return _format_period(timestamp)
def _get_store_path(path, default_file_name):
if path is None:
path = os.path.join(util.get_ulmo_dir(), DEFAULT_FILE_PATH)
if not os.path.exists(path):
os.makedirs(path)
return os.path.join(path, default_file_name)
def _parse(entry):
return {
"dcp_address": entry["TblDcpDataAddrCorr"],
"message_timestamp_utc": datetime.fromtimestamp(
int(entry["TblDcpDataDtMsgCar"].strip("/Date()")) / 1000
),
"failure_code": entry["TblDcpDataProcessInfo"],
"signal_strength": entry["TblDcpDataSigStrength"],
"goes_receive_channel": entry["TblDcpDataChan"],
"message_data_length": entry["TblDcpDataDataLen"],
"dcp_message": entry["TblDcpDataData"],
}
| bsd-3-clause |
draperjames/qtpandas | tests/test_DataFrameModel.py | 1 | 27206 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
from builtins import round
from builtins import str
from future import standard_library
standard_library.install_aliases()
import random
from qtpandas.compat import Qt, QtCore, QtGui
import pytest
import pytestqt
import decimal
import numpy
import pandas
from qtpandas.models.DataFrameModel import DataFrameModel, DATAFRAME_ROLE
from qtpandas.models.DataSearch import DataSearch
from qtpandas.models.SupportedDtypes import SupportedDtypes
def test_initDataFrame():
model = DataFrameModel()
assert model.dataFrame().empty
def test_initDataFrameWithDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
def test_setDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel()
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
with pytest.raises(TypeError) as excinfo:
model.setDataFrame(None)
assert "pandas.core.frame.DataFrame" in str(excinfo.value)
@pytest.mark.parametrize(
"copy, operator",
[
(True, numpy.not_equal),
(False, numpy.equal)
]
)
def test_copyDataFrame(copy, operator):
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
model.setDataFrame(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
def test_TimestampFormat():
model = DataFrameModel()
assert model.timestampFormat == Qt.ISODate
newFormat = "yy-MM-dd hh:mm"
model.timestampFormat = newFormat
assert model.timestampFormat == newFormat
# with pytest.raises(TypeError) as excinfo:
# model.timestampFormat = "yy-MM-dd hh:mm"
# assert "unicode" in str(excinfo.value)
# def test_signalUpdate(qtbot):
# model = DataFrameModel()
# with qtbot.waitSignal(model.layoutAboutToBeChanged) as layoutAboutToBeChanged:
# model.signalUpdate()
# assert layoutAboutToBeChanged.signal_triggered
#
# with qtbot.waitSignal(model.layoutChanged) as blocker:
# model.signalUpdate()
# assert blocker.signal_triggered
@pytest.mark.parametrize(
"orientation, role, index, expectedHeader",
[
(Qt.Horizontal, Qt.EditRole, 0, None),
(Qt.Vertical, Qt.EditRole, 0, None),
(Qt.Horizontal, Qt.DisplayRole, 0, 'A'),
(Qt.Horizontal, Qt.DisplayRole, 1, None), # run into IndexError
(Qt.Vertical, Qt.DisplayRole, 0, 0),
(Qt.Vertical, Qt.DisplayRole, 1, 1)
]
)
def test_headerData(orientation, role, index, expectedHeader):
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.headerData(index, orientation, role) == expectedHeader
def test_flags():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
index = model.index(0, 0)
assert index.isValid()
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled
model.enableEditing(True)
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
model.setDataFrame(pandas.DataFrame([True], columns=['A']))
index = model.index(0, 0)
model.enableEditing(True)
assert model.flags(index) != Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable
def test_rowCount():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.rowCount() == 1
model = DataFrameModel(pandas.DataFrame(numpy.arange(100), columns=['A']))
assert model.rowCount() == 100
def test_columnCount():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.columnCount() == 1
model = DataFrameModel( pandas.DataFrame(numpy.arange(100).reshape(1, 100), columns=numpy.arange(100)) )
assert model.columnCount() == 100
#
# class TestSort(object):
#
# @pytest.fixture
# def dataFrame(self):
# return pandas.DataFrame(numpy.random.rand(10), columns=['A'])
#
# @pytest.fixture
# def model(self, dataFrame):
# return DataFrameModel(dataFrame)
#
# @pytest.mark.parametrize(
# "signal",
# [
# "layoutAboutToBeChanged",
# "layoutChanged",
# "sortingAboutToStart",
# "sortingFinished", ]
# )
# def test_signals(self, model, qtbot, signal):
# with qtbot.waitSignal(getattr(model, signal)) as blocker:
# model.sort(0)
# assert blocker.signal_triggered
#
# @pytest.fixture
# def test_returnValues(self, model):
# model.sort(0)
#
# @pytest.mark.parametrize(
# "testAscending, modelAscending, isIdentic",
# [
# (True, Qt.AscendingOrder, True),
# (False, Qt.DescendingOrder, True),
# (True, Qt.DescendingOrder, False),
# ]
# )
# def test_sort(self, model, dataFrame, testAscending, modelAscending, isIdentic):
# temp = dataFrame.sort('A', ascending=testAscending)
# model.sort(0, order=modelAscending)
#
# assert (dataFrame['A'] == temp['A']).all() == isIdentic
class TestData(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame(numpy.random.rand(10), columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def index(self, model):
index = model.index(0, 0)
assert index.isValid()
return index
@pytest.fixture
def test_invalidIndex(self, model):
assert model.data(QtCore.QModelIndex()) is None
def test_unknownRole(self, model, index):
assert index.isValid()
assert model.data(index, role="unknownRole") == None
def test_unhandledDtype(self, model, index):
dataFrame = pandas.DataFrame([92.289+151.96j], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.complex64)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index) == None
# with pytest.raises(TypeError) as excinfo:
# model.data(index)
# assert "unhandled data type" in unicode(excinfo.value)
@pytest.mark.parametrize(
"value, dtype", [
("test", object),
("äöü", object),
]
)
def test_strAndUnicode(self, model, index, value, dtype):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
@pytest.mark.parametrize(
"value, dtype, precision", [
(1, numpy.int8, None),
(1, numpy.int16, None),
(1, numpy.int32, None),
(1, numpy.int64, None),
(1, numpy.uint8, None),
(1, numpy.uint16, None),
(1, numpy.uint32, None),
(1, numpy.uint64, None),
(1.11111, numpy.float16, DataFrameModel._float_precisions[str('float16')]),
(1.11111111, numpy.float32, DataFrameModel._float_precisions[str('float32')]),
(1.1111111111111111, numpy.float64, DataFrameModel._float_precisions[str('float64')])
]
)
def test_numericalValues(self, model, index, value, dtype, precision):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
if precision:
modelValue = model.data(index, role=Qt.DisplayRole)
assert model.data(index) == round(value, precision)
assert model.data(index, role=Qt.DisplayRole) == round(value, precision)
assert model.data(index, role=Qt.EditRole) == round(value, precision)
else:
assert model.data(index) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
assert model.data(index, role=DATAFRAME_ROLE).dtype == dtype
#@pytest.mark.parametrize(
#"border1, modifier, border2, dtype", [
#("min", -1, "max", numpy.uint8),
#("max", +1, "min", numpy.uint8),
#("min", -1, "max", numpy.uint16),
#("max", +1, "min", numpy.uint16),
#("min", -1, "max", numpy.uint32),
#("max", +1, "min", numpy.uint32),
#("min", -1, "max", numpy.uint64),
##("max", +1, "min", numpy.uint64), # will raise OverFlowError caused by astype function,
## uneffects models data method
#("min", -1, "max", numpy.int8),
#("max", +1, "min", numpy.int8),
#("min", -1, "max", numpy.int16),
#("max", +1, "min", numpy.int16),
#("min", -1, "max", numpy.int32),
#("max", +1, "min", numpy.int32),
##("min", -1, "max", numpy.int64), # will raise OverFlowError caused by astype function
## uneffects models data method
##("max", +1, "min", numpy.int64), # will raise OverFlowError caused by astype function
## uneffects models data method
#]
#)
#def test_integerBorderValues(self, model, index, border1, modifier, border2, dtype):
#ii = numpy.iinfo(dtype)
#dataFrame = pandas.DataFrame([getattr(ii, border1) + modifier], columns=['A'])
#dataFrame['A'] = dataFrame['A'].astype(dtype)
#model.setDataFrame(dataFrame)
#assert not model.dataFrame().empty
#assert model.dataFrame() is dataFrame
#assert index.isValid()
#assert model.data(index) == getattr(ii, border2)
@pytest.mark.parametrize(
"value, qtbool",
[
(True, Qt.Checked),
(False, Qt.Unchecked)
]
)
def test_bool(self, model, index, value, qtbool):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.bool_)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == qtbool
assert model.data(index, role=DATAFRAME_ROLE) == value
assert isinstance(model.data(index), numpy.bool_)
# assert isinstance(model.data(index, role=DATAFRAME_ROLE), numpy.bool_)
def test_date(self, model, index):
pandasDate = pandas.Timestamp("1990-10-08T10:15:45")
qDate = QtCore.QDateTime.fromString(str(pandasDate), Qt.ISODate)
dataFrame = pandas.DataFrame([pandasDate], columns=['A'])
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index, role=Qt.DisplayRole) == qDate
assert model.data(index, role=Qt.EditRole) == qDate
assert model.data(index, role=Qt.CheckStateRole) == None
assert model.data(index, role=DATAFRAME_ROLE) == pandasDate
assert isinstance(model.data(index, role=DATAFRAME_ROLE), pandas.Timestamp)
class TestSetData(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame([10], columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def index(self, model):
return model.index(0, 0)
def test_invalidIndex(self, model):
assert model.setData(QtCore.QModelIndex(), None) == False
def test_nothingHasChanged(self, model, index):
assert model.setData(index, 10) == False
def test_unhandledDtype(self, model, index):
dataFrame = pandas.DataFrame([92.289+151.96j], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.complex64)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
model.enableEditing(True)
with pytest.raises(TypeError) as excinfo:
model.setData(index, numpy.complex64(92+151j))
assert "unhandled data type" in str(excinfo.value)
@pytest.mark.parametrize(
"value, dtype", [
("test", object),
("äöü", object),
]
)
def test_strAndUnicode(self, model, index, value, dtype):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
newValue = "{}123".format(value)
model.enableEditing(True)
assert model.setData(index, newValue)
assert model.data(index) == newValue
assert model.data(index, role=Qt.DisplayRole) == newValue
assert model.data(index, role=Qt.EditRole) == newValue
assert model.data(index, role=Qt.CheckStateRole) == None
assert model.data(index, role=DATAFRAME_ROLE) == newValue
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
@pytest.mark.parametrize(
"value, qtbool",
[
(True, Qt.Checked),
(False, Qt.Unchecked)
]
)
def test_bool(self, model, index, value, qtbool):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.bool_)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
model.enableEditing(True)
# pytest.set_trace()
# everything is already set as false and since Qt.Unchecked = 0, 0 == False
# therefore the assert will fail without further constraints
assert model.setData(index, qtbool) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == qtbool
assert model.data(index, role=DATAFRAME_ROLE) == value
assert isinstance(model.data(index, role=DATAFRAME_ROLE), numpy.bool_)
def test_date(self, model, index):
numpyDate = numpy.datetime64("1990-10-08T10:15:45+0100")
dataFrame = pandas.DataFrame([numpyDate], columns=['A'])
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
newDate = pandas.Timestamp("2000-12-08T10:15:45")
newQDate = QtCore.QDateTime.fromString(str(newDate), Qt.ISODate)
model.enableEditing(True)
assert model.setData(index, newQDate)
assert model.data(index, role=Qt.DisplayRole) == newQDate
assert model.data(index, role=Qt.EditRole) == newQDate
assert model.data(index, role=Qt.CheckStateRole) == None
assert model.data(index, role=DATAFRAME_ROLE) == newDate
assert isinstance(model.data(index, role=DATAFRAME_ROLE), pandas.Timestamp)
with pytest.raises(Exception) as err:
model.setData(index, 'foobar')
assert "Can't convert 'foobar' into a datetime" in str(err.value)
@pytest.mark.parametrize(
"value, dtype, precision", [
(1, numpy.int8, None),
(1, numpy.int16, None),
(1, numpy.int32, None),
(1, numpy.int64, None),
(1, numpy.uint8, None),
(1, numpy.uint16, None),
(1, numpy.uint32, None),
(1, numpy.uint64, None),
(1.11111, numpy.float16, DataFrameModel._float_precisions[str('float16')]),
(1.11111111, numpy.float32, DataFrameModel._float_precisions[str('float32')]),
(1.11111111111111111, numpy.float64, DataFrameModel._float_precisions[str('float64')])
]
)
def test_numericalValues(self, model, index, value, dtype, precision):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
newValue = value + 1
model.enableEditing(True)
assert model.setData(index, newValue)
if precision:
modelValue = model.data(index, role=Qt.DisplayRole)
#assert abs(decimal.Decimal(str(modelValue)).as_tuple().exponent) == precision
assert model.data(index) == round(newValue, precision)
assert model.data(index, role=Qt.DisplayRole) == round(newValue, precision)
assert model.data(index, role=Qt.EditRole) == round(newValue, precision)
else:
assert model.data(index) == newValue
assert model.data(index, role=Qt.DisplayRole) == newValue
assert model.data(index, role=Qt.EditRole) == newValue
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
assert model.data(index, role=DATAFRAME_ROLE).dtype == dtype
@pytest.mark.parametrize(
"border, modifier, dtype", [
("min", -1, numpy.uint8),
("max", +1, numpy.uint8),
("min", -1, numpy.uint16),
("max", +1, numpy.uint16),
("min", -1, numpy.uint32),
("max", +1, numpy.uint32),
("min", -1, numpy.uint64),
("max", +1, numpy.uint64),
("min", -1, numpy.int8),
("max", +1, numpy.int8),
("min", -1, numpy.int16),
("max", +1, numpy.int16),
("min", -1, numpy.int32),
("max", +1, numpy.int32),
("min", -1, numpy.int64),
("max", +1, numpy.int64),
]
)
def test_integerBorderValues(self, model, index, border, modifier, dtype):
ii = numpy.iinfo(dtype)
value = getattr(ii, border) + modifier
dataFrame = pandas.DataFrame([getattr(ii, border)], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
model.enableEditing(True)
assert model.setData(index, value)
assert model.data(index) == getattr(ii, border)
class TestFilter(object):
@pytest.fixture
def dataFrame(self):
data = [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]
]
columns = ['Foo', 'Bar', 'Spam', 'Eggs', 'Baz']
dataFrame = pandas.DataFrame(data, columns=columns)
return dataFrame
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def index(self, model):
return model.index(0, 0)
def test_filter_single_column(self, model, index):
filterString = 'Foo < 10'
search = DataSearch("Test", filterString)
preFilterRows = model.rowCount()
model.setFilter(search)
postFilterRows = model.rowCount()
assert preFilterRows > postFilterRows
assert preFilterRows == (postFilterRows + 1)
def test_filter_freeSearch(self, model, index):
filterString = 'freeSearch("10")'
search = DataSearch("Test", filterString)
preFilterRows = model.rowCount()
model.setFilter(search)
postFilterRows = model.rowCount()
assert preFilterRows > postFilterRows
assert preFilterRows == (postFilterRows + 2)
def test_filter_multiColumn(self, model, index):
filterString = '(Foo < 10) & (Bar > 1)'
search = DataSearch("Test", filterString)
preFilterRows = model.rowCount()
model.setFilter(search)
postFilterRows = model.rowCount()
assert preFilterRows > postFilterRows
assert preFilterRows == (postFilterRows + 2)
def test_filter_unknown_keyword(self, model, index):
filterString = '(Foo < 10) and (Bar > 1)'
search = DataSearch("Test", filterString)
preFilterRows = model.rowCount()
model.setFilter(search)
postFilterRows = model.rowCount()
assert preFilterRows == postFilterRows
class TestEditMode(object):
@pytest.fixture
def dataFrame(self):
data = [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]
]
columns = ['Foo', 'Bar', 'Spam', 'Eggs', 'Baz']
dataFrame = pandas.DataFrame(data, columns=columns)
return dataFrame
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def newColumns(self):
columns = []
for dtype, description in SupportedDtypes._all:
columns.append((description, dtype))
for _type in [int, float, bool, object]:
desc = 'default_%s' % (str(_type),)
columns.append((desc, _type))
return columns
def test_rename(self, model, dataFrame):
renames = {'Foo':'Booyah', 'Bar':'Boogam'}
cols = dataFrame.columns.tolist()
assert not 'Booyah' in cols and not 'Boogam' in cols
model.rename(columns=renames)
cols = model._dataFrame.columns.tolist()
assert 'Booyah' in cols and 'Boogam' in cols
assert 'Foo' not in cols and 'Bar' not in cols
def test_apply_function(self, model):
def mini_func(df):
return df
def bad_func(df):
return False
model.applyFunction(mini_func)
expected = False
try:
model.applyFunction(bad_func)
except Exception:
expected = True
assert expected
def test_edit_data(self, model):
index = model.index(0, 0)
currentData = index.data()
assert not model.setData(index, 42)
assert index.data() == currentData
model.enableEditing(True)
assert model.setData(index, 42)
assert index.data() != currentData
assert index.data() == 42
def test_add_column(self, model, newColumns):
model.enableEditing(True)
columnCount = model.columnCount()
rowCount = model.rowCount()
for index, data in enumerate(newColumns):
desc, _type = data
if isinstance(_type, numpy.dtype):
defaultVal = _type.type()
if _type.type == numpy.datetime64:
defaultVal = pandas.Timestamp('1-01-01 00:00:00')
else:
defaultVal = _type()
assert model.addDataFrameColumn(desc, _type, defaultVal)
for row in range(rowCount):
idx = model.index(row, columnCount + index)
newVal = idx.data(DATAFRAME_ROLE)
assert newVal == defaultVal
def test_remove_columns(self, model):
model.enableEditing(True)
df = model.dataFrame().copy()
columnNames = model.dataFrame().columns.tolist()
#remove a column which doesn't exist
assert not model.removeDataFrameColumns([(3, 'monty')])
assert model.columnCount() == len(columnNames)
#remove one column at a time
for index, column in enumerate(columnNames):
assert model.removeDataFrameColumns([(index, column)])
assert model.columnCount() == 0
model.setDataFrame(df, copyDataFrame=True)
assert model.columnCount() == len(columnNames)
# remove all columns
columnNames = [(i, n) for i, n in enumerate(columnNames)]
assert model.removeDataFrameColumns(columnNames)
assert model.columnCount() == 0
def test_remove_columns_random(self, dataFrame):
columnNames = dataFrame.columns.tolist()
columnNames = [(i, n) for i, n in enumerate(columnNames)]
for cycle in range(1000):
elements = random.randint(1, len(columnNames))
names = random.sample(columnNames, elements)
df = dataFrame.copy()
model = DataFrameModel(df)
assert not model.removeDataFrameColumns(names)
model.enableEditing(True)
model.removeDataFrameColumns(names)
_columnSet = set(columnNames)
_removedSet = set(names)
remainingColumns = _columnSet - _removedSet
for idx, col in remainingColumns:
assert col in model.dataFrame().columns.tolist()
def test_add_rows(self, model):
assert not model.addDataFrameRows()
model.enableEditing(True)
rows = model.rowCount()
assert not model.addDataFrameRows(count=0)
assert model.rowCount() == rows
assert model.addDataFrameRows()
assert model.rowCount() == rows + 1
assert model.addDataFrameRows(count=5)
assert model.rowCount() == rows + 1 + 5
idx = model.index(rows+4, 0)
assert idx.data() == 0
def test_remove_rows(self, model):
assert not model.removeDataFrameRows([0])
model.enableEditing(True)
df = model.dataFrame().copy()
rows = model.rowCount()
model.removeDataFrameRows([0])
assert model.rowCount() < rows
assert model.rowCount() == rows - 1
assert numpy.all(df.loc[1:].values == model.dataFrame().values)
model.removeDataFrameRows([0, 1])
assert model.dataFrame().empty
model.setDataFrame(df, copyDataFrame=True)
assert not model.removeDataFrameRows([5, 6, 7])
rows = model.rowCount()
assert model.removeDataFrameRows([0, 1, 7, 10])
assert model.rowCount() < rows
assert model.rowCount() == 1
if __name__ == '__main__':
pytest.main()
| mit |
MMesch/SHTOOLS | examples/python/GravMag/TestCT.py | 1 | 5243 | #!/usr/bin/env python
"""
This script creates a crustal thickness map of Mars.
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
from pyshtools import shio
from pyshtools import expand
from pyshtools import gravmag
from pyshtools import constant
sys.path.append(os.path.join(os.path.dirname(__file__), "../Common"))
from FigStyle import style_shtools
# set shtools plot style:
mpl.rcParams.update(style_shtools)
# ==== MAIN FUNCTION ====
def main():
TestCrustalThickness()
# ==== TEST FUNCTIONS ====
def TestCrustalThickness():
"""
Example routine that calculates the crustal thickness of Mars
"""
delta_max = 5.0
nmax = 6
degmax = 50
lmax = 200
rho_c = 2900.0
rho_m = 3500.0
filter_type = 0
half = 0
gravfile = '../../ExampleDataFiles/jgmro_110b_sha.tab'
pot, lmaxp, header = shio.SHReadH(gravfile, degmax, 2)
gm = header[1] * 1.e9
mass = gm / constant.grav_constant
r_grav = header[0] * 1.e3
print(r_grav, gm, mass, lmaxp)
topofile = '../../ExampleDataFiles/MarsTopo719.shape'
hlm, lmaxt = shio.SHRead(topofile, 719)
r0 = hlm[0, 0, 0]
d = r0 - 45.217409924028445e3
print(r0, lmaxt)
for l in range(2, lmaxp + 1):
pot[:, l, :l + 1] = pot[:, l, :l + 1] * (r_grav / r0)**l
topo_grid = expand.MakeGridDH(hlm, lmax=lmax, sampling=2,
lmax_calc=degmax)
print("Maximum radius (km) = ", topo_grid.max() / 1.e3)
print("Minimum radius (km) = ", topo_grid.min() / 1.e3)
bc, r0 = gravmag.CilmPlusDH(topo_grid, nmax, mass, rho_c, lmax=degmax)
ba = pot - bc
moho_c = np.zeros([2, degmax + 1, degmax + 1], dtype=float)
moho_c[0, 0, 0] = d
for l in range(1, degmax + 1):
if filter_type == 0:
moho_c[:, l, :l + 1] = ba[:, l, :l + 1] * mass * (2 * l + 1) * \
((r0 / d)**l) \
/ (4.0 * np.pi * (rho_m - rho_c) * d**2)
elif filter_type == 1:
moho_c[:, l, :l + 1] = DownContFilterMA(l, half, r0, d) * \
ba[:, l, :l + 1] * mass * (2 * l + 1) * \
((r0 / d)**l) / \
(4.0 * np.pi * (rho_m - rho_c) * d**2)
else:
moho_c[:, l, :l + 1] = DownContFilterMC(l, half, r0, d) * \
ba[:, l, :l + 1] * mass * (2 * l + 1) *\
((r0 / d)**l) / \
(4.0 * np.pi * (rho_m - rho_c) * d**2)
moho_grid3 = expand.MakeGridDH(moho_c, lmax=lmax, sampling=2,
lmax_calc=degmax)
print('Maximum Crustal thickness (km) = ',
(topo_grid - moho_grid3).max() / 1.e3)
print('Minimum Crustal thickness (km) = ',
(topo_grid - moho_grid3).min() / 1.e3)
moho_c = gravmag.BAtoHilmDH(ba, moho_grid3, nmax, mass, r0,
(rho_m - rho_c), lmax=lmax,
filter_type=filter_type, filter_deg=half,
lmax_calc=degmax)
moho_grid2 = expand.MakeGridDH(moho_c, lmax=lmax, sampling=2,
lmax_calc=degmax)
print('Delta (km) = ', abs(moho_grid3 - moho_grid2).max() / 1.e3)
temp_grid = topo_grid - moho_grid2
print('Maximum Crustal thickness (km) = ', temp_grid.max() / 1.e3)
print('Minimum Crustal thickness (km) = ', temp_grid.min() / 1.e3)
iter = 0
delta = 1.0e9
while delta > delta_max:
iter += 1
print('Iteration ', iter)
moho_grid = (moho_grid2 + moho_grid3) / 2.0
print("Delta (km) = ", abs(moho_grid - moho_grid2).max() / 1.e3)
temp_grid = topo_grid - moho_grid
print('Maximum Crustal thickness (km) = ', temp_grid.max() / 1.e3)
print('Minimum Crustal thickness (km) = ', temp_grid.min() / 1.e3)
moho_grid3 = moho_grid2
moho_grid2 = moho_grid
iter += 1
print('Iteration ', iter)
moho_c = gravmag.BAtoHilmDH(ba, moho_grid2, nmax, mass, r0,
rho_m - rho_c, lmax=lmax,
filter_type=filter_type, filter_deg=half,
lmax_calc=degmax)
moho_grid = expand.MakeGridDH(moho_c, lmax=lmax, sampling=2,
lmax_calc=degmax)
delta = abs(moho_grid - moho_grid2).max()
print('Delta (km) = ', delta / 1.e3)
temp_grid = topo_grid - moho_grid
print('Maximum Crustal thickness (km) = ', temp_grid.max() / 1.e3)
print('Minimum Crustal thickness (km) = ', temp_grid.min() / 1.e3)
moho_grid3 = moho_grid2
moho_grid2 = moho_grid
if temp_grid.max() > 100.e3:
print('Not converging')
exit(1)
fig_map = plt.figure()
plt.imshow(temp_grid)
fig_map.savefig('Mars_CrustalThicknes.png')
# ==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/manifold/t_sne.py | 3 | 35216 | # Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from time import time
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..decomposition import PCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..externals.six import string_types
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples, k)
Distances of samples to its k nearest neighbors.
neighbors : array, shape (n_samples, k)
Indices of the k nearest-neighbors for each samples.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : csr sparse matrix, shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
n_samples, k = neighbors.shape
distances = distances.astype(np.float32, copy=False)
neighbors = neighbors.astype(np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
assert np.all(np.isfinite(conditional_P)), \
"All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix((conditional_P.ravel(), neighbors.ravel(),
range(0, n_samples * k + 1, k)),
shape=(n_samples, n_samples))
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s"
.format(duration))
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist += 1.
dist /= degrees_of_freedom
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order='K'),
X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components,
angle=0.5, skip_num_points=0, verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : csr sparse matrix, shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter,
n_iter_check=1, n_iter_without_progress=300,
momentum=0.8, learning_rate=200.0, min_gain=0.01,
min_grad_norm=1e-7, verbose=0, args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.8)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = i = it
tic = time()
for i in range(it, n_iter):
error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
toc = time()
duration = toc - tic
tic = toc
if verbose >= 2:
print("[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i} (r(i, j) - k)
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 12.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 200.0)
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 250.
n_iter_without_progress : int, optional (default: 300)
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string or numpy array, optional (default: "random")
Initialization of embedding. Possible options are 'random', 'pca',
and a numpy array of shape (n_samples, n_components).
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default: None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Note that different initializations might result in
different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2).fit_transform(X)
>>> X_embedded.shape
(4, 2)
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_N_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=12.0, learning_rate=200.0, n_iter=1000,
n_iter_without_progress=300, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TruncatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.metric == "precomputed":
if isinstance(self.init, string_types) and self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
if np.any(X < 0):
raise ValueError("All distances should be positive, the "
"precomputed distances given as X is not "
"correct")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float32, np.float64])
if self.method == 'barnes_hut' and self.n_components > 3:
raise ValueError("'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree.")
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is {}"
.format(self.early_exaggeration))
if self.n_iter < 250:
raise ValueError("n_iter should be at least 250")
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if np.any(distances < 0):
raise ValueError("All distances should be positive, the "
"metric given is not correct")
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
else:
# Cpmpute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
k = min(n_samples - 1, int(3. * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(k))
# Find the nearest neighbors for every point
neighbors_method = 'ball_tree'
if (self.metric == 'precomputed'):
neighbors_method = 'brute'
knn = NearestNeighbors(algorithm=neighbors_method, n_neighbors=k,
metric=self.metric)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print("[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration))
t0 = time()
distances_nn, neighbors_nn = knn.kneighbors(
None, n_neighbors=k)
duration = time() - t0
if self.verbose:
print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..."
.format(n_samples, duration))
# Free the memory used by the ball_tree
del knn
if self.metric == "euclidean":
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, neighbors_nn,
self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'pca':
pca = PCA(n_components=self.n_components, svd_solver='randomized',
random_state=random_state)
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
elif self.init == 'random':
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.randn(
n_samples, self.n_components).astype(np.float32)
else:
raise ValueError("'init' must be 'pca', 'random', or "
"a numpy array")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
@property
@deprecated("Attribute n_iter_final was deprecated in version 0.19 and "
"will be removed in 0.21. Use ``n_iter_`` instead")
def n_iter_final(self):
return self.n_iter_
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded,
neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_N_ITER,
"n_iter": self._EXPLORATION_N_ITER,
"momentum": 0.5,
}
if self.method == 'barnes_hut':
obj_func = _kl_divergence_bh
opt_args['kwargs']['angle'] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exageration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
if self.verbose:
print("[t-SNE] KL divergence after %d iterations with early "
"exaggeration: %f" % (it + 1, kl_divergence))
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.n_iter - self._EXPLORATION_N_ITER
if it < self._EXPLORATION_N_ITER or remaining > 0:
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
opt_args['momentum'] = 0.8
opt_args['n_iter_without_progress'] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params,
**opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print("[t-SNE] Error after %d iterations: %f"
% (it + 1, kl_divergence))
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
| bsd-3-clause |
vincentlooi/FCIS | fcis/core/tester.py | 1 | 9616 | # --------------------------------------------------------
# Fully Convolutional Instance-aware Semantic Segmentation
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Haozhi Qi, Haochen Zhang, Guodong Zhang, Yi Li
# --------------------------------------------------------
import cPickle
import os
import time
import mxnet as mx
import numpy as np
from module import MutableModule
from utils import image
from bbox.bbox_transform import bbox_pred, clip_boxes, filter_boxes
from nms.nms import py_nms_wrapper
from utils.PrefetchingIter import PrefetchingIter
from mask.mask_transform import gpu_mask_voting, cpu_mask_voting
class Predictor(object):
def __init__(self, symbol, data_names, label_names,
context=mx.cpu(), max_data_shapes=None,
provide_data=None, provide_label=None,
arg_params=None, aux_params=None):
self._mod = MutableModule(symbol, data_names, label_names,
context=context, max_data_shapes=max_data_shapes)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return [dict(zip(self._mod.output_names, _)) for _ in zip(*self._mod.get_outputs(merge_multi_context=False))]
def im_detect(predictor, data_batch, data_names, scales, cfg):
output_all = predictor.predict(data_batch)
data_dict_all = [dict(zip(data_names, data_batch.data[i])) for i in xrange(len(data_batch.data))]
scores_all = []
pred_boxes_all = []
pred_masks_all = []
for output, data_dict, scale in zip(output_all, data_dict_all, scales):
if cfg.TEST.HAS_RPN:
rois = output['rois_output'].asnumpy()[:, 1:]
else:
raise NotImplementedError
# save output
scores = output['cls_prob_reshape_output'].asnumpy()[0]
pred_masks = output['seg_pred_output'].asnumpy()
if cfg.TEST.ITER == 2 and cfg.TEST.MIN_DROP_SIZE > 0:
keep_inds = filter_boxes(rois, cfg.TEST.MIN_DROP_SIZE)
rois = rois[keep_inds, :]
scores = scores[keep_inds, :]
pred_masks = pred_masks[keep_inds, ...]
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = rois / scale
scores_all.append(scores)
pred_boxes_all.append(pred_boxes)
pred_masks_all.append(pred_masks)
return scores_all, pred_boxes_all, pred_masks_all, data_dict_all
def pred_eval(predictor, test_data, imdb, cfg, vis=False, thresh=1e-3, logger=None, ignore_cache=False):
det_file = os.path.join(imdb.result_path, imdb.name + '_detections.pkl')
seg_file = os.path.join(imdb.result_path, imdb.name + '_masks.pkl')
if os.path.exists(det_file) and os.path.exists(seg_file) and not ignore_cache:
with open(det_file, 'rb') as f:
all_boxes = cPickle.load(f)
with open(seg_file, 'rb') as f:
all_masks = cPickle.load(f)
else:
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data[0]]
if not isinstance(test_data, PrefetchingIter):
test_data = PrefetchingIter(test_data)
# function pointers
nms = py_nms_wrapper(cfg.TEST.NMS)
mask_voting = gpu_mask_voting if cfg.TEST.USE_GPU_MASK_MERGE else cpu_mask_voting
max_per_image = 100 if cfg.TEST.USE_MASK_MERGE else -1
num_images = imdb.num_images
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
all_masks = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
idx = 0
t = time.time()
for data_batch in test_data:
t1 = time.time() - t
t = time.time()
scales = [data_batch.data[i][1].asnumpy()[0, 2] for i in xrange(len(data_batch.data))]
scores_all, boxes_all, masks_all, data_dict_all = im_detect(predictor, data_batch, data_names, scales, cfg)
im_shapes = [data_batch.data[i][0].shape[2:4] for i in xrange(len(data_batch.data))]
t2 = time.time() - t
t = time.time()
# post processing
for delta, (scores, boxes, masks, data_dict) in enumerate(zip(scores_all, boxes_all, masks_all, data_dict_all)):
if not cfg.TEST.USE_MASK_MERGE:
for j in range(1, imdb.num_classes):
indexes = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_masks = masks[indexes, 1, :, :]
try:
if cfg.CLASS_AGNOSTIC:
cls_boxes = boxes[indexes, :]
else:
raise Exception()
except:
cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets)
all_boxes[j][idx + delta] = cls_dets[keep, :]
all_masks[j][idx + delta] = cls_masks[keep, :]
else:
masks = masks[:, 1:, :, :]
im_height = np.round(im_shapes[delta][0] / scales[delta]).astype('int')
im_width = np.round(im_shapes[delta][1] / scales[delta]).astype('int')
# print(im_height)
# print(im_width)
boxes = clip_boxes(boxes, (im_height, im_width))
result_mask, result_box = mask_voting(masks, boxes, scores, imdb.num_classes,
max_per_image, im_width, im_height,
cfg.TEST.NMS, cfg.TEST.MASK_MERGE_THRESH,
cfg.BINARY_THRESH)
for j in xrange(1, imdb.num_classes):
all_boxes[j][idx+delta] = result_box[j]
all_masks[j][idx+delta] = result_mask[j][:,0,:,:]
if vis:
boxes_this_image = [[]] + [all_boxes[j][idx + delta] for j in range(1, imdb.num_classes)]
masks_this_image = [[]] + [all_masks[j][idx + delta] for j in range(1, imdb.num_classes)]
vis_all_mask(data_dict['data'].asnumpy(), boxes_this_image, masks_this_image, imdb.classes, scales[delta], cfg)
idx += test_data.batch_size
t3 = time.time() - t
t = time.time()
msg_ = 'testing {}/{} data {:.4f}s net {:.4f}s post {:.4f}s'.format(idx, imdb.num_images, t1, t2, t3)
print(msg_)
if logger:
logger.info(msg_)
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, protocol=cPickle.HIGHEST_PROTOCOL)
with open(seg_file, 'wb') as f:
cPickle.dump(all_masks, f, protocol=cPickle.HIGHEST_PROTOCOL)
info_str = imdb.evaluate_sds(all_boxes, all_masks)
if logger:
logger.info('evaluate detections: \n{}'.format(info_str))
def vis_all_mask(im_array, detections, masks, class_names, scale, cfg):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
import random
import cv2
import os
im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
plt.cla()
plt.axis('off')
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__':
continue
dets = detections[j]
msks = masks[j]
for det, msk in zip(dets, msks):
if det[-1] < 0.7:
continue
color = (random.random(), random.random(), random.random()) # generate a random color
bbox = det[:4] * scale
cod = np.zeros(4).astype(int)
cod[0] = int(bbox[0])
cod[1] = int(bbox[1])
cod[2] = int(bbox[2])
cod[3] = int(bbox[3])
if im[cod[1]:cod[3], cod[0]:cod[2], 0].size > 0:
msk = cv2.resize(msk, im[cod[1]:cod[3], cod[0]:cod[2], 0].T.shape)
bimsk = msk > cfg.BINARY_THRESH
bimsk = bimsk.astype(int)
bimsk = np.repeat(bimsk[:, :, np.newaxis], 3, axis=2)
mskd = im[cod[1]:cod[3], cod[0]:cod[2], :] * bimsk
clmsk = np.ones(bimsk.shape) * bimsk
clmsk[:, :, 0] = clmsk[:, :, 0] * color[0] * 256
clmsk[:, :, 1] = clmsk[:, :, 1] * color[1] * 256
clmsk[:, :, 2] = clmsk[:, :, 2] * color[2] * 256
im[cod[1]:cod[3], cod[0]:cod[2], :] = im[cod[1]:cod[3], cod[0]:cod[2], :] + 0.8 * clmsk - 0.8 * mskd
score = det[-1]
plt.gca().text((bbox[2]+bbox[0])/2, bbox[1],
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.imshow(im)
plt.show()
| apache-2.0 |
duncanmmacleod/gwpy | gwpy/plot/tests/test_rc.py | 3 | 1263 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwpy.plot.rc`
"""
import pytest
from matplotlib import rcParams
from .. import rc as plot_rc
DEFAULT_LRTB = [rcParams['figure.subplot.{0}'.format(x)] for
x in ('left', 'right', 'bottom', 'top')]
@pytest.mark.parametrize('figsize, lrbt', [
((6.4, 4.8), (.1875, .87, .16, .88)),
((0, 0), DEFAULT_LRTB),
])
def test_get_subplot_params(figsize, lrbt):
params = plot_rc.get_subplot_params(figsize)
for key, val in zip(('left', 'right', 'bottom', 'top'), lrbt):
assert getattr(params, key) == val
| gpl-3.0 |
rahuldhote/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
computationalEpidemiology/biobankAccelerometerAnalysis | utilities/collateConfusionMatrices.py | 3 | 1702 | """Command line tool to collate multiple confusion matrices *.txt into single .csv"""
import argparse
import os
import pandas as pd
import sys
parser = argparse.ArgumentParser(
description="Collate multiple confusion matrices *.txt into single .csv",
add_help=True
)
# inputs
parser.add_argument('--matrixDIR', type=str, default="activityModels/",
help="input dir with confusion matrix txt files")
# outputs
parser.add_argument('--outCSV', type=str, help="output main CSV matrix file",
default="collatedMatrix.csv")
args = parser.parse_args()
phenoOrder = {'sleep':1, 'sedentary':2, 'tasks-light':3, 'walking':4, 'moderate':5}
def main():
bigMatrix = None
# combine confusion matrices of all participants
all_files = [e for e in os.listdir(args.matrixDIR) if e.endswith('.txt') and e.startswith('confusion')]
for pidMatrix in sorted(all_files):
if bigMatrix is None:
bigMatrix = pd.read_csv(args.matrixDIR + pidMatrix)
else:
userMatrix = pd.read_csv(args.matrixDIR + pidMatrix)
bigMatrix = bigMatrix + userMatrix
# rename y_true column as it contains many appended string duplicates e.g. sleepsleepsleepsleep...
for state in phenoOrder.keys():
bigMatrix.loc[bigMatrix['y_true'].str.contains(state), 'y_true'] = state
bigMatrix['stateOrder'] = bigMatrix['y_true'].replace(phenoOrder)
bigMatrix = bigMatrix.set_index('y_true')
bigMatrix = bigMatrix.sort_values('stateOrder')
outCols = bigMatrix.index.tolist()
print(bigMatrix[outCols])
bigMatrix[outCols].to_csv(args.outCSV)
print('\n\nfinished')
if __name__ == '__main__':
main()
| bsd-2-clause |
tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/matplotlib/_mathtext_data.py | 69 | 57988 | """
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
r'\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 47),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
r'\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
r'\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
r'\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\prime' : ('cmsy10', 73),
r"'" : ('cmsy10', 73),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
r'\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
r'\updownarrow' : ('cmsy10', 94),
r'\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
r'\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
r'\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
r'\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
r'\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0136),
r'\Omega' : ('psyr', 0127),
r'\leftbracket' : ('psyr', 0133),
r'\rightbracket' : ('psyr', 0135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 050),
r'\prime' : ('psyr', 0242),
r'\sharp' : ('psyr', 043),
r'\slash' : ('psyr', 057),
r'\Lamda' : ('psyr', 0114),
r'\neg' : ('psyr', 0330),
r'\Upsilon' : ('psyr', 0241),
r'\rightbrace' : ('psyr', 0175),
r'\rfloor' : ('psyr', 0373),
r'\lambda' : ('psyr', 0154),
r'\to' : ('psyr', 0256),
r'\Xi' : ('psyr', 0130),
r'\emptyset' : ('psyr', 0306),
r'\lfloor' : ('psyr', 0353),
r'\rightparen' : ('psyr', 051),
r'\rceil' : ('psyr', 0371),
r'\ni' : ('psyr', 047),
r'\epsilon' : ('psyr', 0145),
r'\Theta' : ('psyr', 0121),
r'\langle' : ('psyr', 0341),
r'\leftangle' : ('psyr', 0341),
r'\rangle' : ('psyr', 0361),
r'\rightangle' : ('psyr', 0361),
r'\rbrace' : ('psyr', 0175),
r'\circ' : ('psyr', 0260),
r'\diamond' : ('psyr', 0340),
r'\mu' : ('psyr', 0155),
r'\mid' : ('psyr', 0352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {'uni24C8': 9416,
'aring': 229,
'uni22A0': 8864,
'uni2292': 8850,
'quotedblright': 8221,
'uni03D2': 978,
'uni2215': 8725,
'uni03D0': 976,
'V': 86,
'dollar': 36,
'uni301E': 12318,
'uni03D5': 981,
'four': 52,
'uni25A0': 9632,
'uni013C': 316,
'uni013B': 315,
'uni013E': 318,
'Yacute': 221,
'uni25DE': 9694,
'uni013F': 319,
'uni255A': 9562,
'uni2606': 9734,
'uni0180': 384,
'uni22B7': 8887,
'uni044F': 1103,
'uni22B5': 8885,
'uni22B4': 8884,
'uni22AE': 8878,
'uni22B2': 8882,
'uni22B1': 8881,
'uni22B0': 8880,
'uni25CD': 9677,
'uni03CE': 974,
'uni03CD': 973,
'uni03CC': 972,
'uni03CB': 971,
'uni03CA': 970,
'uni22B8': 8888,
'uni22C9': 8905,
'uni0449': 1097,
'uni20DD': 8413,
'uni20DC': 8412,
'uni20DB': 8411,
'uni2231': 8753,
'uni25CF': 9679,
'uni306E': 12398,
'uni03D1': 977,
'uni01A1': 417,
'uni20D7': 8407,
'uni03D6': 982,
'uni2233': 8755,
'uni20D2': 8402,
'uni20D1': 8401,
'uni20D0': 8400,
'P': 80,
'uni22BE': 8894,
'uni22BD': 8893,
'uni22BC': 8892,
'uni22BB': 8891,
'underscore': 95,
'uni03C8': 968,
'uni03C7': 967,
'uni0328': 808,
'uni03C5': 965,
'uni03C4': 964,
'uni03C3': 963,
'uni03C2': 962,
'uni03C1': 961,
'uni03C0': 960,
'uni2010': 8208,
'uni0130': 304,
'uni0133': 307,
'uni0132': 306,
'uni0135': 309,
'uni0134': 308,
'uni0137': 311,
'uni0136': 310,
'uni0139': 313,
'uni0138': 312,
'uni2244': 8772,
'uni229A': 8858,
'uni2571': 9585,
'uni0278': 632,
'uni2239': 8761,
'p': 112,
'uni3019': 12313,
'uni25CB': 9675,
'uni03DB': 987,
'uni03DC': 988,
'uni03DA': 986,
'uni03DF': 991,
'uni03DD': 989,
'uni013D': 317,
'uni220A': 8714,
'uni220C': 8716,
'uni220B': 8715,
'uni220E': 8718,
'uni220D': 8717,
'uni220F': 8719,
'uni22CC': 8908,
'Otilde': 213,
'uni25E5': 9701,
'uni2736': 10038,
'perthousand': 8240,
'zero': 48,
'uni279B': 10139,
'dotlessi': 305,
'uni2279': 8825,
'Scaron': 352,
'zcaron': 382,
'uni21D8': 8664,
'egrave': 232,
'uni0271': 625,
'uni01AA': 426,
'uni2332': 9010,
'section': 167,
'uni25E4': 9700,
'Icircumflex': 206,
'ntilde': 241,
'uni041E': 1054,
'ampersand': 38,
'uni041C': 1052,
'uni041A': 1050,
'uni22AB': 8875,
'uni21DB': 8667,
'dotaccent': 729,
'uni0416': 1046,
'uni0417': 1047,
'uni0414': 1044,
'uni0415': 1045,
'uni0412': 1042,
'uni0413': 1043,
'degree': 176,
'uni0411': 1041,
'K': 75,
'uni25EB': 9707,
'uni25EF': 9711,
'uni0418': 1048,
'uni0419': 1049,
'uni2263': 8803,
'uni226E': 8814,
'uni2251': 8785,
'uni02C8': 712,
'uni2262': 8802,
'acircumflex': 226,
'uni22B3': 8883,
'uni2261': 8801,
'uni2394': 9108,
'Aring': 197,
'uni2260': 8800,
'uni2254': 8788,
'uni0436': 1078,
'uni2267': 8807,
'k': 107,
'uni22C8': 8904,
'uni226A': 8810,
'uni231F': 8991,
'smalltilde': 732,
'uni2201': 8705,
'uni2200': 8704,
'uni2203': 8707,
'uni02BD': 701,
'uni2205': 8709,
'uni2204': 8708,
'Agrave': 192,
'uni2206': 8710,
'uni2209': 8713,
'uni2208': 8712,
'uni226D': 8813,
'uni2264': 8804,
'uni263D': 9789,
'uni2258': 8792,
'uni02D3': 723,
'uni02D2': 722,
'uni02D1': 721,
'uni02D0': 720,
'uni25E1': 9697,
'divide': 247,
'uni02D5': 725,
'uni02D4': 724,
'ocircumflex': 244,
'uni2524': 9508,
'uni043A': 1082,
'uni24CC': 9420,
'asciitilde': 126,
'uni22B9': 8889,
'uni24D2': 9426,
'uni211E': 8478,
'uni211D': 8477,
'uni24DD': 9437,
'uni211A': 8474,
'uni211C': 8476,
'uni211B': 8475,
'uni25C6': 9670,
'uni017F': 383,
'uni017A': 378,
'uni017C': 380,
'uni017B': 379,
'uni0346': 838,
'uni22F1': 8945,
'uni22F0': 8944,
'two': 50,
'uni2298': 8856,
'uni24D1': 9425,
'E': 69,
'uni025D': 605,
'scaron': 353,
'uni2322': 8994,
'uni25E3': 9699,
'uni22BF': 8895,
'F': 70,
'uni0440': 1088,
'uni255E': 9566,
'uni22BA': 8890,
'uni0175': 373,
'uni0174': 372,
'uni0177': 375,
'uni0176': 374,
'bracketleft': 91,
'uni0170': 368,
'uni0173': 371,
'uni0172': 370,
'asciicircum': 94,
'uni0179': 377,
'uni2590': 9616,
'uni25E2': 9698,
'uni2119': 8473,
'uni2118': 8472,
'uni25CC': 9676,
'f': 102,
'ordmasculine': 186,
'uni229B': 8859,
'uni22A1': 8865,
'uni2111': 8465,
'uni2110': 8464,
'uni2113': 8467,
'uni2112': 8466,
'mu': 181,
'uni2281': 8833,
'paragraph': 182,
'nine': 57,
'uni25EC': 9708,
'v': 118,
'uni040C': 1036,
'uni0113': 275,
'uni22D0': 8912,
'uni21CC': 8652,
'uni21CB': 8651,
'uni21CA': 8650,
'uni22A5': 8869,
'uni21CF': 8655,
'uni21CE': 8654,
'uni21CD': 8653,
'guilsinglleft': 8249,
'backslash': 92,
'uni2284': 8836,
'uni224E': 8782,
'uni224D': 8781,
'uni224F': 8783,
'uni224A': 8778,
'uni2287': 8839,
'uni224C': 8780,
'uni224B': 8779,
'uni21BD': 8637,
'uni2286': 8838,
'uni030F': 783,
'uni030D': 781,
'uni030E': 782,
'uni030B': 779,
'uni030C': 780,
'uni030A': 778,
'uni026E': 622,
'uni026D': 621,
'six': 54,
'uni026A': 618,
'uni026C': 620,
'uni25C1': 9665,
'uni20D6': 8406,
'uni045B': 1115,
'uni045C': 1116,
'uni256B': 9579,
'uni045A': 1114,
'uni045F': 1119,
'uni045E': 1118,
'A': 65,
'uni2569': 9577,
'uni0458': 1112,
'uni0459': 1113,
'uni0452': 1106,
'uni0453': 1107,
'uni2562': 9570,
'uni0451': 1105,
'uni0456': 1110,
'uni0457': 1111,
'uni0454': 1108,
'uni0455': 1109,
'icircumflex': 238,
'uni0307': 775,
'uni0304': 772,
'uni0305': 773,
'uni0269': 617,
'uni0268': 616,
'uni0300': 768,
'uni0301': 769,
'uni0265': 613,
'uni0264': 612,
'uni0267': 615,
'uni0266': 614,
'uni0261': 609,
'uni0260': 608,
'uni0263': 611,
'uni0262': 610,
'a': 97,
'uni2207': 8711,
'uni2247': 8775,
'uni2246': 8774,
'uni2241': 8769,
'uni2240': 8768,
'uni2243': 8771,
'uni2242': 8770,
'uni2312': 8978,
'ogonek': 731,
'uni2249': 8777,
'uni2248': 8776,
'uni3030': 12336,
'q': 113,
'uni21C2': 8642,
'uni21C1': 8641,
'uni21C0': 8640,
'uni21C7': 8647,
'uni21C6': 8646,
'uni21C5': 8645,
'uni21C4': 8644,
'uni225F': 8799,
'uni212C': 8492,
'uni21C8': 8648,
'uni2467': 9319,
'oacute': 243,
'uni028F': 655,
'uni028E': 654,
'uni026F': 623,
'uni028C': 652,
'uni028B': 651,
'uni028A': 650,
'uni2510': 9488,
'ograve': 242,
'edieresis': 235,
'uni22CE': 8910,
'uni22CF': 8911,
'uni219F': 8607,
'comma': 44,
'uni22CA': 8906,
'uni0429': 1065,
'uni03C6': 966,
'uni0427': 1063,
'uni0426': 1062,
'uni0425': 1061,
'uni0424': 1060,
'uni0423': 1059,
'uni0422': 1058,
'uni0421': 1057,
'uni0420': 1056,
'uni2465': 9317,
'uni24D0': 9424,
'uni2464': 9316,
'uni0430': 1072,
'otilde': 245,
'uni2661': 9825,
'uni24D6': 9430,
'uni2466': 9318,
'uni24D5': 9429,
'uni219A': 8602,
'uni2518': 9496,
'uni22B6': 8886,
'uni2461': 9313,
'uni24D4': 9428,
'uni2460': 9312,
'uni24EA': 9450,
'guillemotright': 187,
'ecircumflex': 234,
'greater': 62,
'uni2011': 8209,
'uacute': 250,
'uni2462': 9314,
'L': 76,
'bullet': 8226,
'uni02A4': 676,
'uni02A7': 679,
'cedilla': 184,
'uni02A2': 674,
'uni2015': 8213,
'uni22C4': 8900,
'uni22C5': 8901,
'uni22AD': 8877,
'uni22C7': 8903,
'uni22C0': 8896,
'uni2016': 8214,
'uni22C2': 8898,
'uni22C3': 8899,
'uni24CF': 9423,
'uni042F': 1071,
'uni042E': 1070,
'uni042D': 1069,
'ydieresis': 255,
'l': 108,
'logicalnot': 172,
'uni24CA': 9418,
'uni0287': 647,
'uni0286': 646,
'uni0285': 645,
'uni0284': 644,
'uni0283': 643,
'uni0282': 642,
'uni0281': 641,
'uni027C': 636,
'uni2664': 9828,
'exclamdown': 161,
'uni25C4': 9668,
'uni0289': 649,
'uni0288': 648,
'uni039A': 922,
'endash': 8211,
'uni2640': 9792,
'uni20E4': 8420,
'uni0473': 1139,
'uni20E1': 8417,
'uni2642': 9794,
'uni03B8': 952,
'uni03B9': 953,
'agrave': 224,
'uni03B4': 948,
'uni03B5': 949,
'uni03B6': 950,
'uni03B7': 951,
'uni03B0': 944,
'uni03B1': 945,
'uni03B2': 946,
'uni03B3': 947,
'uni2555': 9557,
'Adieresis': 196,
'germandbls': 223,
'Odieresis': 214,
'space': 32,
'uni0126': 294,
'uni0127': 295,
'uni0124': 292,
'uni0125': 293,
'uni0122': 290,
'uni0123': 291,
'uni0120': 288,
'uni0121': 289,
'quoteright': 8217,
'uni2560': 9568,
'uni2556': 9558,
'ucircumflex': 251,
'uni2561': 9569,
'uni2551': 9553,
'uni25B2': 9650,
'uni2550': 9552,
'uni2563': 9571,
'uni2553': 9555,
'G': 71,
'uni2564': 9572,
'uni2552': 9554,
'quoteleft': 8216,
'uni2565': 9573,
'uni2572': 9586,
'uni2568': 9576,
'uni2566': 9574,
'W': 87,
'uni214A': 8522,
'uni012F': 303,
'uni012D': 301,
'uni012E': 302,
'uni012B': 299,
'uni012C': 300,
'uni255C': 9564,
'uni012A': 298,
'uni2289': 8841,
'Q': 81,
'uni2320': 8992,
'uni2321': 8993,
'g': 103,
'uni03BD': 957,
'uni03BE': 958,
'uni03BF': 959,
'uni2282': 8834,
'uni2285': 8837,
'uni03BA': 954,
'uni03BB': 955,
'uni03BC': 956,
'uni2128': 8488,
'uni25B7': 9655,
'w': 119,
'uni0302': 770,
'uni03DE': 990,
'uni25DA': 9690,
'uni0303': 771,
'uni0463': 1123,
'uni0462': 1122,
'uni3018': 12312,
'uni2514': 9492,
'question': 63,
'uni25B3': 9651,
'uni24E1': 9441,
'one': 49,
'uni200A': 8202,
'uni2278': 8824,
'ring': 730,
'uni0195': 405,
'figuredash': 8210,
'uni22EC': 8940,
'uni0339': 825,
'uni0338': 824,
'uni0337': 823,
'uni0336': 822,
'uni0335': 821,
'uni0333': 819,
'uni0332': 818,
'uni0331': 817,
'uni0330': 816,
'uni01C1': 449,
'uni01C0': 448,
'uni01C3': 451,
'uni01C2': 450,
'uni2353': 9043,
'uni0308': 776,
'uni2218': 8728,
'uni2219': 8729,
'uni2216': 8726,
'uni2217': 8727,
'uni2214': 8724,
'uni0309': 777,
'uni2609': 9737,
'uni2213': 8723,
'uni2210': 8720,
'uni2211': 8721,
'uni2245': 8773,
'B': 66,
'uni25D6': 9686,
'iacute': 237,
'uni02E6': 742,
'uni02E7': 743,
'uni02E8': 744,
'uni02E9': 745,
'uni221D': 8733,
'uni221E': 8734,
'Ydieresis': 376,
'uni221C': 8732,
'uni22D7': 8919,
'uni221A': 8730,
'R': 82,
'uni24DC': 9436,
'uni033F': 831,
'uni033E': 830,
'uni033C': 828,
'uni033B': 827,
'uni033A': 826,
'b': 98,
'uni228A': 8842,
'uni22DB': 8923,
'uni2554': 9556,
'uni046B': 1131,
'uni046A': 1130,
'r': 114,
'uni24DB': 9435,
'Ccedilla': 199,
'minus': 8722,
'uni24DA': 9434,
'uni03F0': 1008,
'uni03F1': 1009,
'uni20AC': 8364,
'uni2276': 8822,
'uni24C0': 9408,
'uni0162': 354,
'uni0163': 355,
'uni011E': 286,
'uni011D': 285,
'uni011C': 284,
'uni011B': 283,
'uni0164': 356,
'uni0165': 357,
'Lslash': 321,
'uni0168': 360,
'uni0169': 361,
'uni25C9': 9673,
'uni02E5': 741,
'uni21C3': 8643,
'uni24C4': 9412,
'uni24E2': 9442,
'uni2277': 8823,
'uni013A': 314,
'uni2102': 8450,
'Uacute': 218,
'uni2317': 8983,
'uni2107': 8455,
'uni221F': 8735,
'yacute': 253,
'uni3012': 12306,
'Ucircumflex': 219,
'uni015D': 349,
'quotedbl': 34,
'uni25D9': 9689,
'uni2280': 8832,
'uni22AF': 8879,
'onehalf': 189,
'uni221B': 8731,
'Thorn': 222,
'uni2226': 8742,
'M': 77,
'uni25BA': 9658,
'uni2463': 9315,
'uni2336': 9014,
'eight': 56,
'uni2236': 8758,
'multiply': 215,
'uni210C': 8460,
'uni210A': 8458,
'uni21C9': 8649,
'grave': 96,
'uni210E': 8462,
'uni0117': 279,
'uni016C': 364,
'uni0115': 277,
'uni016A': 362,
'uni016F': 367,
'uni0112': 274,
'uni016D': 365,
'uni016E': 366,
'Ocircumflex': 212,
'uni2305': 8965,
'm': 109,
'uni24DF': 9439,
'uni0119': 281,
'uni0118': 280,
'uni20A3': 8355,
'uni20A4': 8356,
'uni20A7': 8359,
'uni2288': 8840,
'uni24C3': 9411,
'uni251C': 9500,
'uni228D': 8845,
'uni222F': 8751,
'uni222E': 8750,
'uni222D': 8749,
'uni222C': 8748,
'uni222B': 8747,
'uni222A': 8746,
'uni255B': 9563,
'Ugrave': 217,
'uni24DE': 9438,
'guilsinglright': 8250,
'uni250A': 9482,
'Ntilde': 209,
'uni0279': 633,
'questiondown': 191,
'uni256C': 9580,
'Atilde': 195,
'uni0272': 626,
'uni0273': 627,
'uni0270': 624,
'ccedilla': 231,
'uni0276': 630,
'uni0277': 631,
'uni0274': 628,
'uni0275': 629,
'uni2252': 8786,
'uni041F': 1055,
'uni2250': 8784,
'Z': 90,
'uni2256': 8790,
'uni2257': 8791,
'copyright': 169,
'uni2255': 8789,
'uni043D': 1085,
'uni043E': 1086,
'uni043F': 1087,
'yen': 165,
'uni041D': 1053,
'uni043B': 1083,
'uni043C': 1084,
'uni21B0': 8624,
'uni21B1': 8625,
'uni21B2': 8626,
'uni21B3': 8627,
'uni21B4': 8628,
'uni21B5': 8629,
'uni21B6': 8630,
'uni21B7': 8631,
'uni21B8': 8632,
'Eacute': 201,
'uni2311': 8977,
'uni2310': 8976,
'uni228F': 8847,
'uni25DB': 9691,
'uni21BA': 8634,
'uni21BB': 8635,
'uni21BC': 8636,
'uni2017': 8215,
'uni21BE': 8638,
'uni21BF': 8639,
'uni231C': 8988,
'H': 72,
'uni0293': 659,
'uni2202': 8706,
'uni22A4': 8868,
'uni231E': 8990,
'uni2232': 8754,
'uni225B': 8795,
'uni225C': 8796,
'uni24D9': 9433,
'uni225A': 8794,
'uni0438': 1080,
'uni0439': 1081,
'uni225D': 8797,
'uni225E': 8798,
'uni0434': 1076,
'X': 88,
'uni007F': 127,
'uni0437': 1079,
'Idieresis': 207,
'uni0431': 1073,
'uni0432': 1074,
'uni0433': 1075,
'uni22AC': 8876,
'uni22CD': 8909,
'uni25A3': 9635,
'bar': 124,
'uni24BB': 9403,
'uni037E': 894,
'uni027B': 635,
'h': 104,
'uni027A': 634,
'uni027F': 639,
'uni027D': 637,
'uni027E': 638,
'uni2227': 8743,
'uni2004': 8196,
'uni2225': 8741,
'uni2224': 8740,
'uni2223': 8739,
'uni2222': 8738,
'uni2221': 8737,
'uni2220': 8736,
'x': 120,
'uni2323': 8995,
'uni2559': 9561,
'uni2558': 9560,
'uni2229': 8745,
'uni2228': 8744,
'udieresis': 252,
'uni029D': 669,
'ordfeminine': 170,
'uni22CB': 8907,
'uni233D': 9021,
'uni0428': 1064,
'uni24C6': 9414,
'uni22DD': 8925,
'uni24C7': 9415,
'uni015C': 348,
'uni015B': 347,
'uni015A': 346,
'uni22AA': 8874,
'uni015F': 351,
'uni015E': 350,
'braceleft': 123,
'uni24C5': 9413,
'uni0410': 1040,
'uni03AA': 938,
'uni24C2': 9410,
'uni03AC': 940,
'uni03AB': 939,
'macron': 175,
'uni03AD': 941,
'uni03AF': 943,
'uni0294': 660,
'uni0295': 661,
'uni0296': 662,
'uni0297': 663,
'uni0290': 656,
'uni0291': 657,
'uni0292': 658,
'atilde': 227,
'Acircumflex': 194,
'uni2370': 9072,
'uni24C1': 9409,
'uni0298': 664,
'uni0299': 665,
'Oslash': 216,
'uni029E': 670,
'C': 67,
'quotedblleft': 8220,
'uni029B': 667,
'uni029C': 668,
'uni03A9': 937,
'uni03A8': 936,
'S': 83,
'uni24C9': 9417,
'uni03A1': 929,
'uni03A0': 928,
'exclam': 33,
'uni03A5': 933,
'uni03A4': 932,
'uni03A7': 935,
'Zcaron': 381,
'uni2133': 8499,
'uni2132': 8498,
'uni0159': 345,
'uni0158': 344,
'uni2137': 8503,
'uni2005': 8197,
'uni2135': 8501,
'uni2134': 8500,
'uni02BA': 698,
'uni2033': 8243,
'uni0151': 337,
'uni0150': 336,
'uni0157': 343,
'equal': 61,
'uni0155': 341,
'uni0154': 340,
's': 115,
'uni233F': 9023,
'eth': 240,
'uni24BE': 9406,
'uni21E9': 8681,
'uni2060': 8288,
'Egrave': 200,
'uni255D': 9565,
'uni24CD': 9421,
'uni21E1': 8673,
'uni21B9': 8633,
'hyphen': 45,
'uni01BE': 446,
'uni01BB': 443,
'period': 46,
'igrave': 236,
'uni01BA': 442,
'uni2296': 8854,
'uni2297': 8855,
'uni2294': 8852,
'uni2295': 8853,
'colon': 58,
'uni2293': 8851,
'uni2290': 8848,
'uni2291': 8849,
'uni032D': 813,
'uni032E': 814,
'uni032F': 815,
'uni032A': 810,
'uni032B': 811,
'uni032C': 812,
'uni231D': 8989,
'Ecircumflex': 202,
'uni24D7': 9431,
'uni25DD': 9693,
'trademark': 8482,
'Aacute': 193,
'cent': 162,
'uni0445': 1093,
'uni266E': 9838,
'uni266D': 9837,
'uni266B': 9835,
'uni03C9': 969,
'uni2003': 8195,
'uni2047': 8263,
'lslash': 322,
'uni03A6': 934,
'uni2043': 8259,
'uni250C': 9484,
'uni2040': 8256,
'uni255F': 9567,
'uni24CB': 9419,
'uni0472': 1138,
'uni0446': 1094,
'uni0474': 1140,
'uni0475': 1141,
'uni2508': 9480,
'uni2660': 9824,
'uni2506': 9478,
'uni2502': 9474,
'c': 99,
'uni2500': 9472,
'N': 78,
'uni22A6': 8870,
'uni21E7': 8679,
'uni2130': 8496,
'uni2002': 8194,
'breve': 728,
'uni0442': 1090,
'Oacute': 211,
'uni229F': 8863,
'uni25C7': 9671,
'uni229D': 8861,
'uni229E': 8862,
'guillemotleft': 171,
'uni0329': 809,
'uni24E5': 9445,
'uni011F': 287,
'uni0324': 804,
'uni0325': 805,
'uni0326': 806,
'uni0327': 807,
'uni0321': 801,
'uni0322': 802,
'n': 110,
'uni2032': 8242,
'uni2269': 8809,
'uni2268': 8808,
'uni0306': 774,
'uni226B': 8811,
'uni21EA': 8682,
'uni0166': 358,
'uni203B': 8251,
'uni01B5': 437,
'idieresis': 239,
'uni02BC': 700,
'uni01B0': 432,
'braceright': 125,
'seven': 55,
'uni02BB': 699,
'uni011A': 282,
'uni29FB': 10747,
'brokenbar': 166,
'uni2036': 8246,
'uni25C0': 9664,
'uni0156': 342,
'uni22D5': 8917,
'uni0258': 600,
'ugrave': 249,
'uni22D6': 8918,
'uni22D1': 8913,
'uni2034': 8244,
'uni22D3': 8915,
'uni22D2': 8914,
'uni203C': 8252,
'uni223E': 8766,
'uni02BF': 703,
'uni22D9': 8921,
'uni22D8': 8920,
'uni25BD': 9661,
'uni25BE': 9662,
'uni25BF': 9663,
'uni041B': 1051,
'periodcentered': 183,
'uni25BC': 9660,
'uni019E': 414,
'uni019B': 411,
'uni019A': 410,
'uni2007': 8199,
'uni0391': 913,
'uni0390': 912,
'uni0393': 915,
'uni0392': 914,
'uni0395': 917,
'uni0394': 916,
'uni0397': 919,
'uni0396': 918,
'uni0399': 921,
'uni0398': 920,
'uni25C8': 9672,
'uni2468': 9320,
'sterling': 163,
'uni22EB': 8939,
'uni039C': 924,
'uni039B': 923,
'uni039E': 926,
'uni039D': 925,
'uni039F': 927,
'I': 73,
'uni03E1': 993,
'uni03E0': 992,
'uni2319': 8985,
'uni228B': 8843,
'uni25B5': 9653,
'uni25B6': 9654,
'uni22EA': 8938,
'uni24B9': 9401,
'uni044E': 1102,
'uni0199': 409,
'uni2266': 8806,
'Y': 89,
'uni22A2': 8866,
'Eth': 208,
'uni266F': 9839,
'emdash': 8212,
'uni263B': 9787,
'uni24BD': 9405,
'uni22DE': 8926,
'uni0360': 864,
'uni2557': 9559,
'uni22DF': 8927,
'uni22DA': 8922,
'uni22DC': 8924,
'uni0361': 865,
'i': 105,
'uni24BF': 9407,
'uni0362': 866,
'uni263E': 9790,
'uni028D': 653,
'uni2259': 8793,
'uni0323': 803,
'uni2265': 8805,
'daggerdbl': 8225,
'y': 121,
'uni010A': 266,
'plusminus': 177,
'less': 60,
'uni21AE': 8622,
'uni0315': 789,
'uni230B': 8971,
'uni21AF': 8623,
'uni21AA': 8618,
'uni21AC': 8620,
'uni21AB': 8619,
'uni01FB': 507,
'uni01FC': 508,
'uni223A': 8762,
'uni01FA': 506,
'uni01FF': 511,
'uni01FD': 509,
'uni01FE': 510,
'uni2567': 9575,
'uni25E0': 9696,
'uni0104': 260,
'uni0105': 261,
'uni0106': 262,
'uni0107': 263,
'uni0100': 256,
'uni0101': 257,
'uni0102': 258,
'uni0103': 259,
'uni2038': 8248,
'uni2009': 8201,
'uni2008': 8200,
'uni0108': 264,
'uni0109': 265,
'uni02A1': 673,
'uni223B': 8763,
'uni226C': 8812,
'uni25AC': 9644,
'uni24D3': 9427,
'uni21E0': 8672,
'uni21E3': 8675,
'Udieresis': 220,
'uni21E2': 8674,
'D': 68,
'uni21E5': 8677,
'uni2621': 9761,
'uni21D1': 8657,
'uni203E': 8254,
'uni22C6': 8902,
'uni21E4': 8676,
'uni010D': 269,
'uni010E': 270,
'uni010F': 271,
'five': 53,
'T': 84,
'uni010B': 267,
'uni010C': 268,
'uni2605': 9733,
'uni2663': 9827,
'uni21E6': 8678,
'uni24B6': 9398,
'uni22C1': 8897,
'oslash': 248,
'acute': 180,
'uni01F0': 496,
'd': 100,
'OE': 338,
'uni22E3': 8931,
'Igrave': 204,
'uni2308': 8968,
'uni2309': 8969,
'uni21A9': 8617,
't': 116,
'uni2313': 8979,
'uni03A3': 931,
'uni21A4': 8612,
'uni21A7': 8615,
'uni21A6': 8614,
'uni21A1': 8609,
'uni21A0': 8608,
'uni21A3': 8611,
'uni21A2': 8610,
'parenright': 41,
'uni256A': 9578,
'uni25DC': 9692,
'uni24CE': 9422,
'uni042C': 1068,
'uni24E0': 9440,
'uni042B': 1067,
'uni0409': 1033,
'uni0408': 1032,
'uni24E7': 9447,
'uni25B4': 9652,
'uni042A': 1066,
'uni228E': 8846,
'uni0401': 1025,
'adieresis': 228,
'uni0403': 1027,
'quotesingle': 39,
'uni0405': 1029,
'uni0404': 1028,
'uni0407': 1031,
'uni0406': 1030,
'uni229C': 8860,
'uni2306': 8966,
'uni2253': 8787,
'twodotenleader': 8229,
'uni2131': 8497,
'uni21DA': 8666,
'uni2234': 8756,
'uni2235': 8757,
'uni01A5': 421,
'uni2237': 8759,
'uni2230': 8752,
'uni02CC': 716,
'slash': 47,
'uni01A0': 416,
'ellipsis': 8230,
'uni2299': 8857,
'uni2238': 8760,
'numbersign': 35,
'uni21A8': 8616,
'uni223D': 8765,
'uni01AF': 431,
'uni223F': 8767,
'uni01AD': 429,
'uni01AB': 427,
'odieresis': 246,
'uni223C': 8764,
'uni227D': 8829,
'uni0280': 640,
'O': 79,
'uni227E': 8830,
'uni21A5': 8613,
'uni22D4': 8916,
'uni25D4': 9684,
'uni227F': 8831,
'uni0435': 1077,
'uni2302': 8962,
'uni2669': 9833,
'uni24E3': 9443,
'uni2720': 10016,
'uni22A8': 8872,
'uni22A9': 8873,
'uni040A': 1034,
'uni22A7': 8871,
'oe': 339,
'uni040B': 1035,
'uni040E': 1038,
'uni22A3': 8867,
'o': 111,
'uni040F': 1039,
'Edieresis': 203,
'uni25D5': 9685,
'plus': 43,
'uni044D': 1101,
'uni263C': 9788,
'uni22E6': 8934,
'uni2283': 8835,
'uni258C': 9612,
'uni219E': 8606,
'uni24E4': 9444,
'uni2136': 8502,
'dagger': 8224,
'uni24B7': 9399,
'uni219B': 8603,
'uni22E5': 8933,
'three': 51,
'uni210B': 8459,
'uni2534': 9524,
'uni24B8': 9400,
'uni230A': 8970,
'hungarumlaut': 733,
'parenleft': 40,
'uni0148': 328,
'uni0149': 329,
'uni2124': 8484,
'uni2125': 8485,
'uni2126': 8486,
'uni2127': 8487,
'uni0140': 320,
'uni2129': 8489,
'uni25C5': 9669,
'uni0143': 323,
'uni0144': 324,
'uni0145': 325,
'uni0146': 326,
'uni0147': 327,
'uni210D': 8461,
'fraction': 8260,
'uni2031': 8241,
'uni2196': 8598,
'uni2035': 8245,
'uni24E6': 9446,
'uni016B': 363,
'uni24BA': 9402,
'uni266A': 9834,
'uni0116': 278,
'uni2115': 8469,
'registered': 174,
'J': 74,
'uni25DF': 9695,
'uni25CE': 9678,
'uni273D': 10045,
'dieresis': 168,
'uni212B': 8491,
'uni0114': 276,
'uni212D': 8493,
'uni212E': 8494,
'uni212F': 8495,
'uni014A': 330,
'uni014B': 331,
'uni014C': 332,
'uni014D': 333,
'uni014E': 334,
'uni014F': 335,
'uni025E': 606,
'uni24E8': 9448,
'uni0111': 273,
'uni24E9': 9449,
'Ograve': 210,
'j': 106,
'uni2195': 8597,
'uni2194': 8596,
'uni2197': 8599,
'uni2037': 8247,
'uni2191': 8593,
'uni2190': 8592,
'uni2193': 8595,
'uni2192': 8594,
'uni29FA': 10746,
'uni2713': 10003,
'z': 122,
'uni2199': 8601,
'uni2198': 8600,
'uni2667': 9831,
'ae': 230,
'uni0448': 1096,
'semicolon': 59,
'uni2666': 9830,
'uni038F': 911,
'uni0444': 1092,
'uni0447': 1095,
'uni038E': 910,
'uni0441': 1089,
'uni038C': 908,
'uni0443': 1091,
'uni038A': 906,
'uni0250': 592,
'uni0251': 593,
'uni0252': 594,
'uni0253': 595,
'uni0254': 596,
'at': 64,
'uni0256': 598,
'uni0257': 599,
'uni0167': 359,
'uni0259': 601,
'uni228C': 8844,
'uni2662': 9826,
'uni0319': 793,
'uni0318': 792,
'uni24BC': 9404,
'uni0402': 1026,
'uni22EF': 8943,
'Iacute': 205,
'uni22ED': 8941,
'uni22EE': 8942,
'uni0311': 785,
'uni0310': 784,
'uni21E8': 8680,
'uni0312': 786,
'percent': 37,
'uni0317': 791,
'uni0316': 790,
'uni21D6': 8662,
'uni21D7': 8663,
'uni21D4': 8660,
'uni21D5': 8661,
'uni21D2': 8658,
'uni21D3': 8659,
'uni21D0': 8656,
'uni2138': 8504,
'uni2270': 8816,
'uni2271': 8817,
'uni2272': 8818,
'uni2273': 8819,
'uni2274': 8820,
'uni2275': 8821,
'bracketright': 93,
'uni21D9': 8665,
'uni21DF': 8671,
'uni21DD': 8669,
'uni21DE': 8670,
'AE': 198,
'uni03AE': 942,
'uni227A': 8826,
'uni227B': 8827,
'uni227C': 8828,
'asterisk': 42,
'aacute': 225,
'uni226F': 8815,
'uni22E2': 8930,
'uni0386': 902,
'uni22E0': 8928,
'uni22E1': 8929,
'U': 85,
'uni22E7': 8935,
'uni22E4': 8932,
'uni0387': 903,
'uni031A': 794,
'eacute': 233,
'uni22E8': 8936,
'uni22E9': 8937,
'uni24D8': 9432,
'uni025A': 602,
'uni025B': 603,
'uni025C': 604,
'e': 101,
'uni0128': 296,
'uni025F': 607,
'uni2665': 9829,
'thorn': 254,
'uni0129': 297,
'uni253C': 9532,
'uni25D7': 9687,
'u': 117,
'uni0388': 904,
'uni0389': 905,
'uni0255': 597,
'uni0171': 369,
'uni0384': 900,
'uni0385': 901,
'uni044A': 1098,
'uni252C': 9516,
'uni044C': 1100,
'uni044B': 1099}
uni2type1 = dict([(v,k) for k,v in type12uni.items()])
tex2uni = {
'widehat': 0x0302,
'widetilde': 0x0303,
'langle': 0x27e8,
'rangle': 0x27e9,
'perp': 0x27c2,
'neq': 0x2260,
'Join': 0x2a1d,
'leqslant': 0x2a7d,
'geqslant': 0x2a7e,
'lessapprox': 0x2a85,
'gtrapprox': 0x2a86,
'lesseqqgtr': 0x2a8b,
'gtreqqless': 0x2a8c,
'triangleeq': 0x225c,
'eqslantless': 0x2a95,
'eqslantgtr': 0x2a96,
'backepsilon': 0x03f6,
'precapprox': 0x2ab7,
'succapprox': 0x2ab8,
'fallingdotseq': 0x2252,
'subseteqq': 0x2ac5,
'supseteqq': 0x2ac6,
'varpropto': 0x221d,
'precnapprox': 0x2ab9,
'succnapprox': 0x2aba,
'subsetneqq': 0x2acb,
'supsetneqq': 0x2acc,
'lnapprox': 0x2ab9,
'gnapprox': 0x2aba,
'longleftarrow': 0x27f5,
'longrightarrow': 0x27f6,
'longleftrightarrow': 0x27f7,
'Longleftarrow': 0x27f8,
'Longrightarrow': 0x27f9,
'Longleftrightarrow': 0x27fa,
'longmapsto': 0x27fc,
'leadsto': 0x21dd,
'dashleftarrow': 0x290e,
'dashrightarrow': 0x290f,
'circlearrowleft': 0x21ba,
'circlearrowright': 0x21bb,
'leftrightsquigarrow': 0x21ad,
'leftsquigarrow': 0x219c,
'rightsquigarrow': 0x219d,
'Game': 0x2141,
'hbar': 0x0127,
'hslash': 0x210f,
'ldots': 0x22ef,
'vdots': 0x22ee,
'doteqdot': 0x2251,
'doteq': 8784,
'partial': 8706,
'gg': 8811,
'asymp': 8781,
'blacktriangledown': 9662,
'otimes': 8855,
'nearrow': 8599,
'varpi': 982,
'vee': 8744,
'vec': 8407,
'smile': 8995,
'succnsim': 8937,
'gimel': 8503,
'vert': 124,
'|': 124,
'varrho': 1009,
'P': 182,
'approxident': 8779,
'Swarrow': 8665,
'textasciicircum': 94,
'imageof': 8887,
'ntriangleleft': 8938,
'nleq': 8816,
'div': 247,
'nparallel': 8742,
'Leftarrow': 8656,
'lll': 8920,
'oiint': 8751,
'ngeq': 8817,
'Theta': 920,
'origof': 8886,
'blacksquare': 9632,
'solbar': 9023,
'neg': 172,
'sum': 8721,
'Vdash': 8873,
'coloneq': 8788,
'degree': 176,
'bowtie': 8904,
'blacktriangleright': 9654,
'varsigma': 962,
'leq': 8804,
'ggg': 8921,
'lneqq': 8808,
'scurel': 8881,
'stareq': 8795,
'BbbN': 8469,
'nLeftarrow': 8653,
'nLeftrightarrow': 8654,
'k': 808,
'bot': 8869,
'BbbC': 8450,
'Lsh': 8624,
'leftleftarrows': 8647,
'BbbZ': 8484,
'digamma': 989,
'BbbR': 8477,
'BbbP': 8473,
'BbbQ': 8474,
'vartriangleright': 8883,
'succsim': 8831,
'wedge': 8743,
'lessgtr': 8822,
'veebar': 8891,
'mapsdown': 8615,
'Rsh': 8625,
'chi': 967,
'prec': 8826,
'nsubseteq': 8840,
'therefore': 8756,
'eqcirc': 8790,
'textexclamdown': 161,
'nRightarrow': 8655,
'flat': 9837,
'notin': 8713,
'llcorner': 8990,
'varepsilon': 949,
'bigtriangleup': 9651,
'aleph': 8501,
'dotminus': 8760,
'upsilon': 965,
'Lambda': 923,
'cap': 8745,
'barleftarrow': 8676,
'mu': 956,
'boxplus': 8862,
'mp': 8723,
'circledast': 8859,
'tau': 964,
'in': 8712,
'backslash': 92,
'varnothing': 8709,
'sharp': 9839,
'eqsim': 8770,
'gnsim': 8935,
'Searrow': 8664,
'updownarrows': 8645,
'heartsuit': 9825,
'trianglelefteq': 8884,
'ddag': 8225,
'sqsubseteq': 8849,
'mapsfrom': 8612,
'boxbar': 9707,
'sim': 8764,
'Nwarrow': 8662,
'nequiv': 8802,
'succ': 8827,
'vdash': 8866,
'Leftrightarrow': 8660,
'parallel': 8741,
'invnot': 8976,
'natural': 9838,
'ss': 223,
'uparrow': 8593,
'nsim': 8769,
'hookrightarrow': 8618,
'Equiv': 8803,
'approx': 8776,
'Vvdash': 8874,
'nsucc': 8833,
'leftrightharpoons': 8651,
'Re': 8476,
'boxminus': 8863,
'equiv': 8801,
'Lleftarrow': 8666,
'thinspace': 8201,
'll': 8810,
'Cup': 8915,
'measeq': 8798,
'upharpoonleft': 8639,
'lq': 8216,
'Upsilon': 933,
'subsetneq': 8842,
'greater': 62,
'supsetneq': 8843,
'Cap': 8914,
'L': 321,
'spadesuit': 9824,
'lrcorner': 8991,
'not': 824,
'bar': 772,
'rightharpoonaccent': 8401,
'boxdot': 8865,
'l': 322,
'leftharpoondown': 8637,
'bigcup': 8899,
'iint': 8748,
'bigwedge': 8896,
'downharpoonleft': 8643,
'textasciitilde': 126,
'subset': 8834,
'leqq': 8806,
'mapsup': 8613,
'nvDash': 8877,
'looparrowleft': 8619,
'nless': 8814,
'rightarrowbar': 8677,
'Vert': 8214,
'downdownarrows': 8650,
'uplus': 8846,
'simeq': 8771,
'napprox': 8777,
'ast': 8727,
'twoheaduparrow': 8607,
'doublebarwedge': 8966,
'Sigma': 931,
'leftharpoonaccent': 8400,
'ntrianglelefteq': 8940,
'nexists': 8708,
'times': 215,
'measuredangle': 8737,
'bumpeq': 8783,
'carriagereturn': 8629,
'adots': 8944,
'checkmark': 10003,
'lambda': 955,
'xi': 958,
'rbrace': 125,
'rbrack': 93,
'Nearrow': 8663,
'maltese': 10016,
'clubsuit': 9827,
'top': 8868,
'overarc': 785,
'varphi': 966,
'Delta': 916,
'iota': 953,
'nleftarrow': 8602,
'candra': 784,
'supset': 8835,
'triangleleft': 9665,
'gtreqless': 8923,
'ntrianglerighteq': 8941,
'quad': 8195,
'Xi': 926,
'gtrdot': 8919,
'leftthreetimes': 8907,
'minus': 8722,
'preccurlyeq': 8828,
'nleftrightarrow': 8622,
'lambdabar': 411,
'blacktriangle': 9652,
'kernelcontraction': 8763,
'Phi': 934,
'angle': 8736,
'spadesuitopen': 9828,
'eqless': 8924,
'mid': 8739,
'varkappa': 1008,
'Ldsh': 8626,
'updownarrow': 8597,
'beta': 946,
'textquotedblleft': 8220,
'rho': 961,
'alpha': 945,
'intercal': 8890,
'beth': 8502,
'grave': 768,
'acwopencirclearrow': 8634,
'nmid': 8740,
'nsupset': 8837,
'sigma': 963,
'dot': 775,
'Rightarrow': 8658,
'turnednot': 8985,
'backsimeq': 8909,
'leftarrowtail': 8610,
'approxeq': 8778,
'curlyeqsucc': 8927,
'rightarrowtail': 8611,
'Psi': 936,
'copyright': 169,
'yen': 165,
'vartriangleleft': 8882,
'rasp': 700,
'triangleright': 9655,
'precsim': 8830,
'infty': 8734,
'geq': 8805,
'updownarrowbar': 8616,
'precnsim': 8936,
'H': 779,
'ulcorner': 8988,
'looparrowright': 8620,
'ncong': 8775,
'downarrow': 8595,
'circeq': 8791,
'subseteq': 8838,
'bigstar': 9733,
'prime': 8242,
'lceil': 8968,
'Rrightarrow': 8667,
'oiiint': 8752,
'curlywedge': 8911,
'vDash': 8872,
'lfloor': 8970,
'ddots': 8945,
'exists': 8707,
'underbar': 817,
'Pi': 928,
'leftrightarrows': 8646,
'sphericalangle': 8738,
'coprod': 8720,
'circledcirc': 8858,
'gtrsim': 8819,
'gneqq': 8809,
'between': 8812,
'theta': 952,
'complement': 8705,
'arceq': 8792,
'nVdash': 8878,
'S': 167,
'wr': 8768,
'wp': 8472,
'backcong': 8780,
'lasp': 701,
'c': 807,
'nabla': 8711,
'dotplus': 8724,
'eta': 951,
'forall': 8704,
'eth': 240,
'colon': 58,
'sqcup': 8852,
'rightrightarrows': 8649,
'sqsupset': 8848,
'mapsto': 8614,
'bigtriangledown': 9661,
'sqsupseteq': 8850,
'propto': 8733,
'pi': 960,
'pm': 177,
'dots': 8230,
'nrightarrow': 8603,
'textasciiacute': 180,
'Doteq': 8785,
'breve': 774,
'sqcap': 8851,
'twoheadrightarrow': 8608,
'kappa': 954,
'vartriangle': 9653,
'diamondsuit': 9826,
'pitchfork': 8916,
'blacktriangleleft': 9664,
'nprec': 8832,
'vdots': 8942,
'curvearrowright': 8631,
'barwedge': 8892,
'multimap': 8888,
'textquestiondown': 191,
'cong': 8773,
'rtimes': 8906,
'rightzigzagarrow': 8669,
'rightarrow': 8594,
'leftarrow': 8592,
'__sqrt__': 8730,
'twoheaddownarrow': 8609,
'oint': 8750,
'bigvee': 8897,
'eqdef': 8797,
'sterling': 163,
'phi': 981,
'Updownarrow': 8661,
'backprime': 8245,
'emdash': 8212,
'Gamma': 915,
'i': 305,
'rceil': 8969,
'leftharpoonup': 8636,
'Im': 8465,
'curvearrowleft': 8630,
'wedgeq': 8793,
'fallingdotseq': 8786,
'curlyeqprec': 8926,
'questeq': 8799,
'less': 60,
'upuparrows': 8648,
'tilde': 771,
'textasciigrave': 96,
'smallsetminus': 8726,
'ell': 8467,
'cup': 8746,
'danger': 9761,
'nVDash': 8879,
'cdotp': 183,
'cdots': 8943,
'hat': 770,
'eqgtr': 8925,
'enspace': 8194,
'psi': 968,
'frown': 8994,
'acute': 769,
'downzigzagarrow': 8623,
'ntriangleright': 8939,
'cupdot': 8845,
'circleddash': 8861,
'oslash': 8856,
'mho': 8487,
'd': 803,
'sqsubset': 8847,
'cdot': 8901,
'Omega': 937,
'OE': 338,
'veeeq': 8794,
'Finv': 8498,
't': 865,
'leftrightarrow': 8596,
'swarrow': 8601,
'rightthreetimes': 8908,
'rightleftharpoons': 8652,
'lesssim': 8818,
'searrow': 8600,
'because': 8757,
'gtrless': 8823,
'star': 8902,
'nsubset': 8836,
'zeta': 950,
'dddot': 8411,
'bigcirc': 9675,
'Supset': 8913,
'circ': 8728,
'slash': 8725,
'ocirc': 778,
'prod': 8719,
'twoheadleftarrow': 8606,
'daleth': 8504,
'upharpoonright': 8638,
'odot': 8857,
'Uparrow': 8657,
'O': 216,
'hookleftarrow': 8617,
'trianglerighteq': 8885,
'nsime': 8772,
'oe': 339,
'nwarrow': 8598,
'o': 248,
'ddddot': 8412,
'downharpoonright': 8642,
'succcurlyeq': 8829,
'gamma': 947,
'scrR': 8475,
'dag': 8224,
'thickspace': 8197,
'frakZ': 8488,
'lessdot': 8918,
'triangledown': 9663,
'ltimes': 8905,
'scrB': 8492,
'endash': 8211,
'scrE': 8496,
'scrF': 8497,
'scrH': 8459,
'scrI': 8464,
'rightharpoondown': 8641,
'scrL': 8466,
'scrM': 8499,
'frakC': 8493,
'nsupseteq': 8841,
'circledR': 174,
'circledS': 9416,
'ngtr': 8815,
'bigcap': 8898,
'scre': 8495,
'Downarrow': 8659,
'scrg': 8458,
'overleftrightarrow': 8417,
'scro': 8500,
'lnsim': 8934,
'eqcolon': 8789,
'curlyvee': 8910,
'urcorner': 8989,
'lbrace': 123,
'Bumpeq': 8782,
'delta': 948,
'boxtimes': 8864,
'overleftarrow': 8406,
'prurel': 8880,
'clubsuitopen': 9831,
'cwopencirclearrow': 8635,
'geqq': 8807,
'rightleftarrows': 8644,
'ac': 8766,
'ae': 230,
'int': 8747,
'rfloor': 8971,
'risingdotseq': 8787,
'nvdash': 8876,
'diamond': 8900,
'ddot': 776,
'backsim': 8765,
'oplus': 8853,
'triangleq': 8796,
'check': 780,
'ni': 8715,
'iiint': 8749,
'ne': 8800,
'lesseqgtr': 8922,
'obar': 9021,
'supseteq': 8839,
'nu': 957,
'AA': 8491,
'AE': 198,
'models': 8871,
'ominus': 8854,
'dashv': 8867,
'omega': 969,
'rq': 8217,
'Subset': 8912,
'rightharpoonup': 8640,
'Rdsh': 8627,
'bullet': 8729,
'divideontimes': 8903,
'lbrack': 91,
'textquotedblright': 8221,
'Colon': 8759,
'%': 37,
'$': 36,
'{': 123,
'}': 125,
'_': 95,
'imath': 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to': 8594,
'succeq': 8829,
'emptyset': 8709,
'leftparen': 40,
'rightparen': 41,
'bigoplus': 10753,
'leftangle': 10216,
'rightangle': 10217,
'leftbrace': 124,
'rightbrace': 125,
'jmath': 567,
'bigodot': 10752,
'preceq': 8828,
'biguplus': 10756,
'epsilon': 949,
'vartheta': 977,
'bigotimes': 10754
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C (missing in beta STIX fonts)
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H (missing in beta STIX fonts)
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N (missing in beta STIX fonts)
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q (missing in beta STIX fonts)
(0x0052, 0x0052, 'it', 0x211d), # R (missing in beta STIX fonts)
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z (missing in beta STIX fonts)
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x005a, 'bf', 0xe38a), # A-Z
(0x0061, 0x007a, 'bf', 0xe39d), # a-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x003d, 'it', 0x2113), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
| gpl-3.0 |
vortex-ape/scikit-learn | doc/conf.py | 5 | 10156 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.imgconverter',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get('NO_MATHJAX'):
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
else:
extensions.append('sphinx.ext.mathjax')
mathjax_path = ('https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/'
'MathJax.js?config=TeX-AMS_SVG')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2018, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'templates', 'includes', 'themes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'joblib': ('https://joblib.readthedocs.io/en/latest/', None),
}
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url': {
'sklearn': None}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.add_javascript('js/extra.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
CoucheLimite/Oi-and-Cs-from-FTIR | FTIR.py | 1 | 24590 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
import numpy as np
from PyQt5.QtWidgets import (QWidget, QPushButton,QLabel,QFileDialog,QComboBox,
QHBoxLayout, QVBoxLayout, QGridLayout, QLineEdit, QApplication,QRadioButton,QErrorMessage)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from scipy.optimize import minimize
class FTIRAnalysis(QWidget):
sampledata=None
refdata=None
diffFCA=None
diffOLine=None
diffCLine=None
poptFCA=None
sample_thick=None
PurespectrumFCA=None
PureOxygen=None
PureCarbon=None
refdatasubC=None
sampledatasubC=None
refdatasubO=None
sampledatasubO=None
thicknessfactorLine=None
thicknessfactorFCA=None
thicknessfactor=None
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
SampleSection = QVBoxLayout()
SampleSection1= QHBoxLayout()
LoadSampleButton = QPushButton('Load Sample')
LoadSampleButton.clicked.connect(self.loadsample)
LSampleThick = QLabel('Thickness [\u03BCm]')
LSample = QLabel('Sample')
SampleSection1.addStretch(1)
SampleSection1.addWidget(LSample)
SampleSection1.addWidget(LoadSampleButton)
SampleSection1.addStretch(1)
SampleSection1.addWidget(LSampleThick)
SampleSection2= QHBoxLayout()
self.SampleName = QLineEdit()
self.SampleName.setMinimumWidth(300)
self.SampleThick = QLineEdit()
self.SampleThick.textChanged[str].connect(self.ThickChange)
SampleSection2.addWidget(self.SampleName)
SampleSection2.addWidget(self.SampleThick)
SampleSection.addLayout(SampleSection1)
SampleSection.addLayout(SampleSection2)
RefSection = QVBoxLayout()
RefSection1= QHBoxLayout()
LRef = QLabel('Reference')
LoadRefButton = QPushButton('Load Reference')
LoadRefButton.clicked.connect(self.loadref)
LRefThick = QLabel('Thickness [\u03BCm]')
RefSection1.addStretch(1)
RefSection1.addWidget(LRef)
RefSection1.addWidget(LoadRefButton)
RefSection1.addStretch(1)
RefSection1.addWidget(LRefThick)
RefSection2= QHBoxLayout()
self.RefName = QLineEdit()
self.RefName.setMinimumWidth(300)
self.RefThick = QLineEdit()
self.RefThick.textChanged[str].connect(self.ThickChange)
RefSection2.addWidget(self.RefName)
RefSection2.addWidget(self.RefThick)
RefSection.addLayout(RefSection1)
RefSection.addLayout(RefSection2)
DataSection=QHBoxLayout()
DataSection.addLayout(SampleSection)
DataSection.addLayout(RefSection)
PhononCorrectSection = QHBoxLayout()
ThickSection = QVBoxLayout()
Lthickfactor = QLabel('Thickness Ratio')
self.Thickfactor = QLineEdit()
ThickSection.addWidget(Lthickfactor)
ThickSection.addWidget(self.Thickfactor)
self.Thickfactor.setReadOnly(True)
CorrectSection = QGridLayout()
self.ChooseSL = QRadioButton('Straght lines Correction')
self.ChooseSL.setChecked(True)
self.ChooseSL.toggled.connect(self.Choosechange)
self.ChooseFCA = QRadioButton('FCA Correction')
self.GetSLfactor = QPushButton('Get Phonon factor from Straight lines correction')
self.GetSLfactor.setDisabled(True)
self.GetSLfactor.clicked.connect(self.getfactorlineButtom)
self.GetFCAfactor = QPushButton('Get Phonon factor from FCA correction')
self.GetFCAfactor.setDisabled(True)
self.GetFCAfactor.clicked.connect(self.getfactorFCAButtom)
self.SLfactor = QLineEdit()
self.SLfactor.textChanged[str].connect(self.FactorChange)
self.FCAfactor = QLineEdit()
self.FCAfactor.textChanged[str].connect(self.FactorChange)
CorrectSection.addWidget(self.ChooseSL,0,0)
CorrectSection.addWidget(self.ChooseFCA,0,1)
CorrectSection.addWidget(self.GetSLfactor,1,0)
CorrectSection.addWidget(self.GetFCAfactor,1,1)
CorrectSection.addWidget(self.SLfactor,2,0)
CorrectSection.addWidget(self.FCAfactor,2,1)
PhononCorrectSection.addLayout(ThickSection)
PhononCorrectSection.addLayout(CorrectSection)
CalcuSection = QVBoxLayout()
self.CalOxygen = QPushButton('Calculate Oxygen')
self.CalOxygen.clicked.connect(self.CalculateOxygen)
self.CalOxygen.setDisabled(True)
self.CalCarbon = QPushButton('Calculate Carbon')
self.CalCarbon.setDisabled(True)
self.CalCarbon.clicked.connect(self.CalculateCarbon)
CalcuSection.addWidget(self.CalOxygen)
CalcuSection.addWidget(self.CalCarbon)
h1box = QHBoxLayout()
h1box.addLayout(DataSection)
h1box.addStretch(1)
h1box.addLayout(PhononCorrectSection)
h1box.addLayout(CalcuSection)
SpectrumPlot=QVBoxLayout()
self.figure1 = plt.figure()
self.canvas1 = FigureCanvas(self.figure1)
self.toolbar1 = NavigationToolbar(self.canvas1, self)
SpectrumPlot.addWidget(self.canvas1)
SpectrumPlot.addWidget(self.toolbar1)
self.ax1 = self.figure1.add_subplot(111)
self.ax1.set_xlabel(r'Wavenumber $[cm^{-1}]$')
self.ax1.set_ylabel('Absorbance')
self.ax1.set_xlim([1800,400])
self.figure1.tight_layout()
ZoominPlot=QVBoxLayout()
self.figure2 = plt.figure()
self.canvas2 = FigureCanvas(self.figure2)
self.toolbar2 = NavigationToolbar(self.canvas2, self)
ZoominPlot.addWidget(self.canvas2)
ZoominPlot.addWidget(self.toolbar2)
self.ax2 = self.figure2.add_subplot(211)
self.ax2.set_title('Oxygen')
self.ax2.set_xlim([1250,1000])
self.ax3 = self.figure2.add_subplot(212)
self.ax3.set_xlabel(r'Wavenumber $[cm^{-1}]$')
self.ax3.set_title('Carbon')
self.ax3.set_xlim([700,500])
self.figure2.tight_layout()
h2box = QHBoxLayout()
h2box.addLayout(SpectrumPlot)
h2box.addLayout(ZoominPlot)
vbox = QVBoxLayout()
vbox.addLayout(h1box)
vbox.addLayout(h2box)
self.setLayout(vbox)
self.setGeometry(300, 200, 1400, 700)
self.setWindowTitle('FTIR for Oxygen and Carbon concentrations')
self.show()
def loadsample(self):
Samplename = QFileDialog.getOpenFileName(self, caption='Choose Sample file',filter='*.csv')
if Samplename[0] != '':
self.sampledata=np.genfromtxt(Samplename[0],delimiter=',')
if self.sampledata.shape[1] != 2:
self.sampledata = None
self.SampleName.setText('Invalid Data File!')
self.SampleName.setStyleSheet("color:red")
elif (self.refdata is not None) and (self.sampledata.shape[0] != self.refdata.shape[0] +1):
self.sampledata = None
self.diffFCA = None
self.poptFCA = None
self.PurespectrumFCA = None
self.diffCLine = None
self.diffOLine = None
self.refdata = None
self.refdatasubC = None
self.refdatasubO = None
self.sampledatasubC = None
self.sampledatasubO = None
self.RefName.clear()
self.RefThick.clear()
self.SampleName.clear()
self.SampleThick.clear()
self.plotdata()
error_dialog = QErrorMessage()
error_dialog.showMessage('The length of sample and reference spectrum does not match!')
error_dialog.exec_()
else:
self.SampleThick.setText((os.path.basename(Samplename[0])).split()[0])
self.sampledata=self.sampledata[1:,:]
index = np.argsort(self.sampledata[:,0])
self.sampledata=self.sampledata[index,:]
self.SampleName.setStyleSheet("color:black")
self.SampleName.setText(os.path.basename(Samplename[0]))
self.diffFCA = None
self.poptFCA = None
self.PurespectrumFCA = None
self.diffCLine = None
self.diffOLine = None
self.FCAfactor.clear()
self.SLfactor.clear()
self.subtractlineSample()
self.plotdata()
if self.refdata is not None:
self.GetSLfactor.setDisabled(False)
self.GetFCAfactor.setDisabled(False)
if self.sample_thick is not None:
self.CalOxygen.setDisabled(False)
self.CalCarbon.setDisabled(False)
def loadref(self):
Refname = QFileDialog.getOpenFileName(self, caption='Choose Reference file',filter='*.csv')
if Refname[0] != '':
self.refdata=np.genfromtxt(Refname[0],delimiter=',')
if self.refdata.shape[1] != 2:
self.refdata = None
self.RefName.setText('Invalid Data File!')
self.RefName.setStyleSheet("color:red")
elif (self.sampledata is not None) and (self.sampledata.shape[0] != self.refdata.shape[0]-1):
self.sampledata = None
self.diffFCA = None
self.poptFCA = None
self.PurespectrumFCA = None
self.diffCLine = None
self.diffOLine = None
self.refdata = None
self.refdatasubC = None
self.refdatasubO = None
self.sampledatasubC = None
self.sampledatasubO = None
self.RefName.clear()
self.RefThick.clear()
self.SampleName.clear()
self.SampleThick.clear()
self.plotdata()
error_dialog = QErrorMessage()
error_dialog.showMessage('The length of sample and reference spectrum does not match!')
error_dialog.exec_()
else:
self.RefThick.setText((os.path.basename(Refname[0])).split()[0])
self.refdata=self.refdata[1:,:]
index = np.argsort(self.refdata[:,0])
self.refdata=self.refdata[index,:]
self.RefName.setStyleSheet("color:black")
self.RefName.setText(os.path.basename(Refname[0]))
self.diffFCA = None
self.poptFCA = None
self.PurespectrumFCA = None
self.diffCLine = None
self.diffOLine = None
self.FCAfactor.clear()
self.SLfactor.clear()
self.subtractlineRef()
self.plotdata()
if self.refdata is not None:
self.GetSLfactor.setDisabled(False)
self.GetFCAfactor.setDisabled(False)
if self.sample_thick is not None:
self.CalOxygen.setDisabled(False)
self.CalCarbon.setDisabled(False)
def getfactorlineButtom(self):
self.FitThicknessfactorL()
def getfactorFCAButtom(self):
self.FitThicknessfactorFCA()
def Choosechange(self):
self.plotdata()
def ThickChange(self):
try:
self.sample_thick = float(self.SampleThick.text())
if (self.refdata is not None) and (self.sampledata is not None):
self.CalOxygen.setDisabled(False)
self.CalCarbon.setDisabled(False)
except ValueError:
self.sample_thick = None
self.thicknessfactor = None
self.CalOxygen.setDisabled(True)
self.CalCarbon.setDisabled(True)
self.Thickfactor.clear()
self.SampleThick.clear()
try:
ref_thick = float(self.RefThick.text())
if self.sample_thick is not None:
self.Thickfactor.setText(str(round(self.sample_thick/ref_thick,3)))
self.thicknessfactor = self.sample_thick/ref_thick
except ValueError:
self.thicknessfactor = None
self.Thickfactor.clear()
self.RefThick.clear()
def FactorChange(self):
try:
self.thicknessfactorLine = float(self.SLfactor.text())
self.substractreferenceline()
except ValueError:
self.thicknessfactorLine = None
self.SLfactor.clear()
try:
self.thicknessfactorFCA = float(self.FCAfactor.text())
self.substractreferenceFCA()
self.fitFCA()
self.substractFCA()
except ValueError:
self.thicknessfactorFCA = None
self.FCAfactor.clear()
self.plotdata()
def subtractlineRef(self):
index550 = (np.abs(self.refdata[:,0]-550)).argmin()
index650 = (np.abs(self.refdata[:,0]-650)).argmin()
index1020 = (np.abs(self.refdata[:,0]-1020)).argmin()
index1220 = (np.abs(self.refdata[:,0]-1220)).argmin()
Ref550 = np.average(self.refdata[index550-3:index550+3,1])
Ref650 = np.average(self.refdata[index650-3:index650+3,1])
refliene = self.refdata[:,0]*(Ref650-Ref550)/100+55/10*(Ref550*65/55-Ref650)
self.refdatasubC = self.refdata[:,1]-refliene
Ref1020 = np.average(self.refdata[index1020-3:index1020+3,1])
Ref1220 = np.average(self.refdata[index1220-3:index1220+3,1])
refliene = self.refdata[:,0]*(Ref1220-Ref1020)/200+1020/200*(Ref1020*1220/1020-Ref1220)
self.refdatasubO = self.refdata[:,1]-refliene
def subtractlineSample(self):
index550 = (np.abs(self.sampledata[:,0]-550)).argmin()
index650 = (np.abs(self.sampledata[:,0]-650)).argmin()
index1020 = (np.abs(self.sampledata[:,0]-1020)).argmin()
index1220 = (np.abs(self.sampledata[:,0]-1220)).argmin()
sample550 = np.average(self.sampledata[index550-3:index550+3,1])
sample650 = np.average(self.sampledata[index650-3:index650+3,1])
sampleliene = self.sampledata[:,0]*(sample650-sample550)/100+55/10*(sample550*65/55-sample650)
self.sampledatasubC = self.sampledata[:,1]-sampleliene
sample1020 = np.average(self.sampledata[index1020-3:index1020+3,1])
sample1220 = np.average(self.sampledata[index1220-3:index1220+3,1])
sampleliene = self.sampledata[:,0]*(sample1220-sample1020)/200+1020/200*(sample1020*1220/1020-sample1220)
self.sampledatasubO = self.sampledata[:,1]-sampleliene
def substractreferenceline(self):
if (self.sampledata is not None) and (self.refdata is not None) and (self.thicknessfactorLine is not None):
self.diffCLine=self.sampledatasubC-self.refdatasubC*self.thicknessfactorLine
self.diffOLine=self.sampledatasubO-self.refdatasubO*self.thicknessfactorLine
def FitThicknessfactorL(self):
try:
index615=(np.abs(self.sampledata[:,0]-615)).argmin()
index621=(np.abs(self.sampledata[:,0]-621)).argmin()
self.thicknessfactorLine=np.sum(self.sampledatasubC[index615:index621])/np.sum(self.refdatasubC[index615:index621])
self.SLfactor.setText(str(self.thicknessfactorLine))
except:
pass
def substractreferenceFCA(self):
if (self.sampledata is not None) and (self.refdata is not None) and (self.thicknessfactorFCA is not None):
self.diffFCA=self.sampledata[:,1]-self.refdata[:,1]*self.thicknessfactorFCA
def FitThicknessfactorFCA(self):
try:
index1 = self.refdata[:,0]>500
index1 *= self.refdata[:,0]<590
index2 = self.refdata[:,0]>610
index2 *= self.refdata[:,0]<1020
index3 = self.refdata[:,0]>1200
index3 *= self.refdata[:,0]<1600
index = index1+index2+index3
def residual(k):
diff = self.sampledata[index,1]-self.refdata[index,1]*k
p = np.polyfit(1/self.refdata[index,0],diff,2)
return np.sum(abs(diff-np.polyval(p,1/self.refdata[index,0])))
res = minimize(residual,1,method='Nelder-Mead', tol=1e-6)
self.thicknessfactorFCA = res.x[0]
self.FCAfactor.setText(str(self.thicknessfactorFCA))
except:
pass
def fitFCA(self):
if (self.refdata is not None) and (self.diffFCA is not None):
index = self.refdata[:,0]<1020
index += self.refdata[:,0]>1200
self.poptFCA= np.polyfit(1/self.refdata[index,0], self.diffFCA[index], 2)
def substractFCA(self):
if (self.diffFCA is not None) and (self.poptFCA is not None) and (self.refdata is not None):
self.PurespectrumFCA=self.diffFCA-np.polyval(self.poptFCA,1/self.refdata[:,0])
def CalculateOxygen(self):
def Calalpha(x,T):
a = -1/x*((0.09-np.exp(1.7*x))+np.sqrt((0.09-np.exp(1.7*x))**2+0.36*T**2*np.exp(1.7*x)))/0.18/T
return a
if self.sample_thick is not None:
if self.ChooseSL.isChecked() and (self.diffOLine is not None):
spectrum = self.diffOLine
elif self.ChooseFCA.isChecked() and (self.PurespectrumFCA is not None):
spectrum = self.PurespectrumFCA
ind1040 = (np.abs(self.refdata[:,0]-1040)).argmin()
ind1160 = (np.abs(self.refdata[:,0]-1160)).argmin()
OxygenSpectrum = spectrum[ind1040:ind1160]
OxygenWn = self.refdata[ind1040:ind1160,0]
indpeak = OxygenSpectrum.argmax()
Tp = 10**(-OxygenSpectrum[indpeak])
Tb = 10** (-(OxygenSpectrum[0]+(OxygenSpectrum[-1]-OxygenSpectrum[0])/(OxygenWn[-1]-OxygenWn[0])*(OxygenWn[indpeak]-OxygenWn[0])))
alphap = Calalpha(1e-4*self.sample_thick,Tp)
alphab = Calalpha(1e-4*self.sample_thick,Tb)
alphaO = alphap - alphab
OxygenConcentration = 6.28*alphaO
self.ax2.plot([OxygenWn[0],OxygenWn[-1]],[OxygenSpectrum[0],OxygenSpectrum[-1]],'k--')
self.ax2.plot([OxygenWn[indpeak],OxygenWn[indpeak]],[-np.log10(Tb),OxygenSpectrum[indpeak]],'k--')
self.ax2.plot(OxygenWn[indpeak],OxygenSpectrum[indpeak],'b*')
self.ax2.set_title('Oxygen concentration is ' + str(round(OxygenConcentration,3)) + r' $ppma$')
self.canvas2.draw()
def CalculateCarbon(self):
if self.sample_thick is not None:
if self.ChooseSL.isChecked() and (self.diffCLine is not None):
spectrum = self.diffCLine
elif self.ChooseFCA.isChecked() and (self.PurespectrumFCA is not None):
spectrum = self.PurespectrumFCA
ind590 = (np.abs(self.refdata[:,0]-590)).argmin()
ind620 = (np.abs(self.refdata[:,0]-620)).argmin()
CarbonSpectrum = spectrum[ind590:ind620]
CarbonWn = self.refdata[ind590:ind620,0]
indpeak = CarbonSpectrum.argmax()
Ap = CarbonSpectrum[indpeak]
Ab = (CarbonSpectrum[0]+(CarbonSpectrum[-1]-CarbonSpectrum[0])/(CarbonWn[-1]-CarbonWn[0])*(CarbonWn[indpeak]-CarbonWn[0]))
alphaC = 23.03*(Ap - Ab)/self.sample_thick
CarbonConcentration = 1.64*alphaC
self.ax3.plot([CarbonWn[0],CarbonWn[-1]],[CarbonSpectrum[0],CarbonSpectrum[-1]],'k--')
self.ax3.plot([CarbonWn[indpeak],CarbonWn[indpeak]],[Ab,CarbonSpectrum[indpeak]],'k--')
self.ax3.plot(CarbonWn[indpeak],CarbonSpectrum[indpeak],'b*')
self.ax3.set_title('Carbon concentration is ' + str(round(CarbonConcentration,3)) + r' $ppma$')
self.canvas2.draw()
def plotdata(self):
self.ax1.clear()
self.ax1.set_xlabel(r'Wavenumber $[cm^{-1}]$')
self.ax1.set_ylabel('Absorbance')
self.ax1.set_xlim([1800,400])
self.ax2.clear()
self.ax2.set_title('Oxygen')
self.ax2.set_xlim([1250,1000])
self.ax3.clear()
self.ax3.set_title('Carbon')
self.ax3.set_xlabel(r'Wavenumber $[cm^{-1}]$')
self.ax3.set_xlim([700,500])
if self.sampledata is not None:
self.ax1.plot(self.sampledata[:,0],self.sampledata[:,1],'r',label='Sample')
if self.refdata is not None:
self.ax1.plot(self.refdata[:,0],self.refdata[:,1],'b',label='Reference')
if (self.refdatasubO is not None) and self.ChooseSL.isChecked():
index1000 = (np.abs(self.refdata[:,0]-1000)).argmin()
index1250 = (np.abs(self.refdata[:,0]-1250)).argmin()
self.ax2.plot(self.refdata[index1000:index1250,0],self.refdatasubO[index1000:index1250],'b',label='Reference line subtracted')
if (self.sampledatasubO is not None) and self.ChooseSL.isChecked():
index1000 = (np.abs(self.sampledata[:,0]-1000)).argmin()
index1250 = (np.abs(self.sampledata[:,0]-1250)).argmin()
self.ax2.plot(self.sampledata[index1000:index1250,0],self.sampledatasubO[index1000:index1250],'r',label='Sample line subtracted')
if (self.refdatasubC is not None) and self.ChooseSL.isChecked():
index500 = (np.abs(self.refdata[:,0]-500)).argmin()
index700 = (np.abs(self.refdata[:,0]-700)).argmin()
self.ax3.plot(self.refdata[index500:index700,0],self.refdatasubC[index500:index700],'b',label='Reference line subtracted')
if (self.sampledatasubC is not None) and self.ChooseSL.isChecked():
index500 = (np.abs(self.sampledata[:,0]-500)).argmin()
index700 = (np.abs(self.sampledata[:,0]-700)).argmin()
self.ax3.plot(self.sampledata[index500:index700,0],self.sampledatasubC[index500:index700],'r',label='Sample line subtracted')
if (self.diffFCA is not None) and self.ChooseFCA.isChecked():
self.ax1.plot(self.refdata[:,0],self.diffFCA,'.',color='lime',label='Phonon substracted')
if (self.poptFCA is not None) and self.ChooseFCA.isChecked():
self.ax1.plot(self.refdata[:,0],np.poly1d(self.poptFCA)(1/self.refdata[:,0]),color='darkgreen',label='Fitting FCA')
if (self.PurespectrumFCA is not None) and self.ChooseFCA.isChecked():
index1000 = (np.abs(self.refdata[:,0]-1000)).argmin()
index1250 = (np.abs(self.refdata[:,0]-1250)).argmin()
index500 = (np.abs(self.refdata[:,0]-500)).argmin()
index700 = (np.abs(self.refdata[:,0]-700)).argmin()
self.ax1.plot(self.refdata[:,0],self.PurespectrumFCA,'k',label='FCA substracted')
self.ax2.plot(self.refdata[index1000:index1250,0],self.PurespectrumFCA[index1000:index1250],'k',label='FCA substracted')
self.ax3.plot(self.refdata[index500:index700,0],self.PurespectrumFCA[index500:index700],'k',label='FCA substracted')
if (self.diffOLine is not None) and self.ChooseSL.isChecked():
index1000 = (np.abs(self.refdata[:,0]-1000)).argmin()
index1250 = (np.abs(self.refdata[:,0]-1250)).argmin()
self.ax2.plot(self.refdata[index1000:index1250,0],self.diffOLine[index1000:index1250],'k',label='Phonon substracted')
if (self.diffCLine is not None) and self.ChooseSL.isChecked():
index500 = (np.abs(self.refdata[:,0]-500)).argmin()
index700 = (np.abs(self.refdata[:,0]-700)).argmin()
self.ax3.plot(self.refdata[index500:index700,0],self.diffCLine[index500:index700],'k',label='Phonon substracted')
self.ax1.legend(loc=0)
self.ax2.legend(loc=0)
self.ax3.legend(loc=0)
self.canvas1.draw()
self.canvas2.draw()
if __name__ == '__main__':
app = QApplication(sys.argv)
FTIR = FTIRAnalysis()
sys.exit(app.exec_())
| gpl-3.0 |
jblackburne/scikit-learn | sklearn/utils/tests/test_testing.py | 24 | 7902 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message,
ignore_warnings)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
def test_ignore_warning():
# This check that ignore_warning decorateur and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning, ignore_warnings(_warning_function,
category=UserWarning))
assert_warns(UserWarning,
ignore_warnings(_multiple_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning,
ignore_warnings(_multiple_warning_function,
category=UserWarning))
assert_no_warnings(ignore_warnings(_warning_function,
category=(DeprecationWarning,
UserWarning)))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
assert_warns(DeprecationWarning, decorator_no_user_warning)
assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
assert_warns(DeprecationWarning, context_manager_no_user_warning)
assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
Sklearn-HMM/scikit-learn-HMM | sklean-hmm/linear_model/tests/test_ridge.py | 3 | 14885 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics.scorer import SCORERS
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
"""Ridge regression convergence test using score
TODO: for this test to be robust, we should use a dataset instead
of np.random.
"""
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "dense_cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "dense_cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "dense_cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples / 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples / 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
alpha = 1.0
#for solver in ("svd", "sparse_cg", "dense_cholesky", "lsqr"):
for solver in ("dense_cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y, alpha, sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
def test_ridge_shapes():
"""Test shape of coef_ and intercept_
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
"""Test intercept with multiple targets GH issue #708
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
"""Test BayesianRegression ridge classifier
TODO: test also n_samples > n_features
"""
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
"""On alpha=0., Ridge and OLS yield the same solution."""
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
"""Tests the ridge object using individual penalties"""
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="dense_cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'dense_cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
ridge_gcv2 = RidgeCV(fit_intercept=False, loss_func=mean_squared_error)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
ridge_gcv3 = RidgeCV(fit_intercept=False, score_func=func)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = SCORERS['mean_squared_error']
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_class_weights():
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_class_weights_cv():
"""
Test class weights for cross validated ridge classifier.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
"""
Test _RidgeCV's store_cv_values attribute.
"""
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
| bsd-3-clause |
frank-tancf/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
tardis-sn/tardis | tardis/visualization/widgets/shell_info.py | 1 | 19729 | from tardis.base import run_tardis
from tardis.io.atom_data.util import download_atom_data
from tardis.util.base import (
atomic_number2element_symbol,
species_tuple_to_string,
)
from tardis.simulation import Simulation
from tardis.visualization.widgets.util import create_table_widget
import pandas as pd
import numpy as np
import ipywidgets as ipw
class BaseShellInfo:
"""The simulation information that is used by shell info widget"""
def __init__(
self,
t_radiative,
w,
abundance,
number_density,
ion_number_density,
level_number_density,
):
"""Initialize the object with all simulation properties in use
Parameters
----------
t_radiative : array_like
Radiative Temperature of each shell of simulation
w : array_like
Dilution Factor (W) of each shell of simulation model
abundance : pandas.DataFrame
Fractional abundance of elements where row labels are atomic number
and column labels are shell number
number_density : pandas.DataFrame
Number densities of elements where row labels are atomic number and
column labels are shell numbers
ion_number_density : pandas.DataFrame
Number densities of ions where rows are multi-indexed with (atomic
number, ion number) and column labels are shell number
level_number_density : pandas.DataFrame
Number densities of levels where rows are multi-indexed with (atomic
number, ion number, level number) and column labels are shell number
"""
self.t_radiative = t_radiative
self.w = w
self.abundance = abundance
self.number_density = number_density
self.ion_number_density = ion_number_density
self.level_number_density = level_number_density
def shells_data(self):
"""Generates shells data in a form that can be used by a table widget
Returns
-------
pandas.DataFrame
Dataframe containing Rad. Temp. and W against each shell of
simulation model
"""
shells_temp_w = pd.DataFrame(
{"Rad. Temp.": self.t_radiative, "W": self.w}
)
shells_temp_w.index = range(
1, len(self.t_radiative) + 1
) # Overwrite index
shells_temp_w.index.name = "Shell No."
# Format to string to make qgrid show values in scientific notations
return shells_temp_w.applymap(lambda x: f"{x:.6e}")
def element_count(self, shell_num):
"""Generates fractional abundance of elements present in a specific
shell in a form that can be used by a table widget
Parameters
----------
shell_num : int
Shell number (note: starts from 1, not 0 which is what simulation
model use)
Returns
-------
pandas.DataFrame
Dataframe containing element symbol and fractional abundance in a
specific shell, against each atomic number
"""
element_count_data = self.abundance[shell_num - 1].copy()
element_count_data.index.name = "Z"
element_count_data.fillna(0, inplace=True)
return pd.DataFrame(
{
"Element": element_count_data.index.map(
atomic_number2element_symbol
),
# Format to string to show in scientific notation
f"Frac. Ab. (Shell {shell_num})": element_count_data.map(
"{:.6e}".format
),
}
)
def ion_count(self, atomic_num, shell_num):
"""Generates fractional abundance of ions of a specific element and
shell, in a form that can be used by a table widget
Parameters
----------
atomic_num : int
Atomic number of element
shell_num : int
Shell number (note: starts from 1, not 0 which is what simulation
model use)
Returns
-------
pandas.DataFrame
Dataframe containing ion specie and fractional abundance for a
specific element, against each ion number
"""
ion_num_density = self.ion_number_density[shell_num - 1].loc[atomic_num]
element_num_density = self.number_density.loc[atomic_num, shell_num - 1]
ion_count_data = ion_num_density / element_num_density # Normalization
ion_count_data.index.name = "Ion"
ion_count_data.fillna(0, inplace=True)
return pd.DataFrame(
{
"Species": ion_count_data.index.map(
lambda x: species_tuple_to_string((atomic_num, x))
),
f"Frac. Ab. (Z={atomic_num})": ion_count_data.map(
"{:.6e}".format
),
}
)
def level_count(self, ion, atomic_num, shell_num):
"""Generates fractional abundance of levels of a specific ion, element
and shell, in a form that can be used by a table widget
Parameters
----------
ion : int
Ion number (note: starts from 0, same what is used by simulation
model)
atomic_num : int
Atomic number of element
shell_num : int
Shell number (note: starts from 1, not 0 which is what simulation
model use)
Returns
-------
pandas.DataFrame
Dataframe containing fractional abundance for a specific ion,
against each level number
"""
level_num_density = self.level_number_density[shell_num - 1].loc[
atomic_num, ion
]
ion_num_density = self.ion_number_density[shell_num - 1].loc[
atomic_num, ion
]
level_count_data = level_num_density / ion_num_density # Normalization
level_count_data.index.name = "Level"
level_count_data.name = f"Frac. Ab. (Ion={ion})"
level_count_data.fillna(0, inplace=True)
return level_count_data.map("{:.6e}".format).to_frame()
class SimulationShellInfo(BaseShellInfo):
"""The simulation information that is used by shell info widget, obtained
from a TARDIS Simulation object
"""
def __init__(self, sim_model):
"""Initialize the object with TARDIS Simulation object
Parameters
----------
sim_model : tardis.simulation.Simulation
TARDIS Simulation object produced by running a simulation
"""
super().__init__(
sim_model.model.t_radiative,
sim_model.model.w,
sim_model.plasma.abundance,
sim_model.plasma.number_density,
sim_model.plasma.ion_number_density,
sim_model.plasma.level_number_density,
)
class HDFShellInfo(BaseShellInfo):
"""The simulation information that is used by shell info widget, obtained
from a simulation HDF file
"""
def __init__(self, hdf_fpath):
"""Initialize the object with a simulation HDF file
Parameters
----------
hdf_fpath : str
A valid path to a simulation HDF file (HDF file must be created
from a TARDIS Simulation object using :code:`to_hdf` method with
default arguments)
"""
with pd.HDFStore(hdf_fpath, "r") as sim_data:
super().__init__(
sim_data["/simulation/model/t_radiative"],
sim_data["/simulation/model/w"],
sim_data["/simulation/plasma/abundance"],
sim_data["/simulation/plasma/number_density"],
sim_data["/simulation/plasma/ion_number_density"],
sim_data["/simulation/plasma/level_number_density"],
)
class ShellInfoWidget:
"""The Shell Info Widget to explore abundances in different shells.
It consists of four interlinked table widgets - shells table; element count,
ion count and level count tables - allowing to explore fractional abundances
all the way from elements, to ions, to levels by clicking on the rows of
tables.
"""
def __init__(self, shell_info_data):
"""Initialize the object with the shell information of a simulation
model
Parameters
----------
shell_info_data : subclass of BaseShellInfo
Shell information object constructed from Simulation object or HDF
file
"""
self.data = shell_info_data
# Creating the shells data table widget
self.shells_table = create_table_widget(
self.data.shells_data(), [30, 35, 35]
)
# Creating the element count table widget
self.element_count_table = create_table_widget(
self.data.element_count(self.shells_table.df.index[0]),
[15, 30, 55],
changeable_col={
"index": -1, # since last column will change names
# Shells table index will give all possible shell numbers
"other_names": [
f"Frac. Ab. (Shell {shell_num})"
for shell_num in self.shells_table.df.index
],
},
)
# Creating the ion count table widget
self.ion_count_table = create_table_widget(
self.data.ion_count(
self.element_count_table.df.index[0],
self.shells_table.df.index[0],
),
[20, 30, 50],
changeable_col={
"index": -1,
# Since element are same for each shell thus previous table
# (element counts for shell 1) will give all possible elements
"other_names": [
f"Frac. Ab. (Z={atomic_num})"
for atomic_num in self.element_count_table.df.index
],
},
)
# Creating the level count table widget
self.level_count_table = create_table_widget(
self.data.level_count(
self.ion_count_table.df.index[0],
self.element_count_table.df.index[0],
self.shells_table.df.index[0],
),
[30, 70],
changeable_col={
"index": -1,
# Ion values range from 0 to max atomic_num present in
# element count table
"other_names": [
f"Frac. Ab. (Ion={ion})"
for ion in range(
0, self.element_count_table.df.index.max() + 1
)
],
},
)
def update_element_count_table(self, event, qgrid_widget):
"""Event listener to update the data in element count table widget based
on interaction (row selected event) in shells table widget.
Parameters
----------
event : dict
Dictionary that holds information about event (see Notes section)
qgrid_widget : qgrid.QgridWidget
QgridWidget instance that fired the event (see Notes section)
Notes
-----
You will never need to pass any of these arguments explicitly. This is
the expected signature of the function passed to :code:`handler` argument
of :code:`on` method of a table widget (qgrid.QgridWidget object) as
explained in `qrid documentation <https://qgrid.readthedocs.io/en/latest/#qgrid.QgridWidget.on>`_.
"""
# Get shell number from row selected in shells_table
shell_num = event["new"][0] + 1
# Update data in element_count_table
self.element_count_table.df = self.data.element_count(shell_num)
# Get atomic_num of 0th row of element_count_table
atomic_num0 = self.element_count_table.df.index[0]
# Also update next table (ion counts) by triggering its event listener
# Listener won't trigger if last row selected in element_count_table was also 0th
if self.element_count_table.get_selected_rows() == [0]:
self.element_count_table.change_selection([]) # Unselect rows
# Select 0th row in count table which will trigger update_ion_count_table
self.element_count_table.change_selection([atomic_num0])
def update_ion_count_table(self, event, qgrid_widget):
"""Event listener to update the data in ion count table widget based
on interaction (row selected event) in element count table widget.
Parameters
----------
event : dict
Dictionary that holds information about event (see Notes section)
qgrid_widget : qgrid.QgridWidget
QgridWidget instance that fired the event (see Notes section)
Notes
-----
You will never need to pass any of these arguments explicitly. This is
the expected signature of the function passed to :code:`handler` argument
of :code:`on` method of a table widget (qgrid.QgridWidget object) as
explained in `qrid documentation <https://qgrid.readthedocs.io/en/latest/#qgrid.QgridWidget.on>`_.
"""
# Don't execute function if no row was selected, implicitly i.e. by api
if event["new"] == [] and event["source"] == "api":
return
# Get shell no. & atomic_num from rows selected in previous tables
shell_num = self.shells_table.get_selected_rows()[0] + 1
atomic_num = self.element_count_table.df.index[event["new"][0]]
# Update data in ion_count_table
self.ion_count_table.df = self.data.ion_count(atomic_num, shell_num)
# Also update next table (level counts) by triggering its event listener
ion0 = self.ion_count_table.df.index[0]
if self.ion_count_table.get_selected_rows() == [0]:
self.ion_count_table.change_selection([])
self.ion_count_table.change_selection([ion0])
def update_level_count_table(self, event, qgrid_widget):
"""Event listener to update the data in level count table widget based
on interaction (row selected event) in ion count table widget.
Parameters
----------
event : dict
Dictionary that holds information about event (see Notes section)
qgrid_widget : qgrid.QgridWidget
QgridWidget instance that fired the event (see Notes section)
Notes
-----
You will never need to pass any of these arguments explicitly. This is
the expected signature of the function passed to :code:`handler` argument
of :code:`on` method of a table widget (qgrid.QgridWidget object) as
explained in `qrid documentation <https://qgrid.readthedocs.io/en/latest/#qgrid.QgridWidget.on>`_.
"""
# Don't execute function if no row was selected implicitly (by api)
if event["new"] == [] and event["source"] == "api":
return
# Get shell no., atomic_num, ion from selected rows in previous tables
shell_num = self.shells_table.get_selected_rows()[0] + 1
atomic_num = self.element_count_table.df.index[
self.element_count_table.get_selected_rows()[0]
]
ion = self.ion_count_table.df.index[event["new"][0]]
# Update data in level_count_table
self.level_count_table.df = self.data.level_count(
ion, atomic_num, shell_num
)
def display(
self,
shells_table_width="30%",
element_count_table_width="24%",
ion_count_table_width="24%",
level_count_table_width="18%",
**layout_kwargs,
):
"""Display the shell info widget by putting all component widgets nicely
together and allowing interaction between the table widgets
Parameters
----------
shells_table_width : str, optional
CSS :code:`width` property value for shells table, by default '30%'
element_count_table_width : str, optional
CSS :code:`width` property value for element count table, by default '24%'
ion_count_table_width : str, optional
CSS :code:`width` property value for ion count table, by default '24%'
level_count_table_width : str, optional
CSS :code:`width` property value for level count table, by default '18%'
Other Parameters
----------------
**layout_kwargs
Any valid CSS properties to be passed to the :code:`layout` attribute
of table widgets container (HTML :code:`div`) as explained in
`ipywidgets documentation <https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Styling.html#The-layout-attribute>`_
Returns
-------
ipywidgets.Box
Shell info widget containing all component widgets
"""
# CSS properties of the layout of shell info tables container
tables_container_layout = dict(
display="flex",
align_items="flex-start",
justify_content="space-between",
)
tables_container_layout.update(layout_kwargs)
# Setting tables' widths
self.shells_table.layout.width = shells_table_width
self.element_count_table.layout.width = element_count_table_width
self.ion_count_table.layout.width = ion_count_table_width
self.level_count_table.layout.width = level_count_table_width
# Attach event listeners to table widgets
self.shells_table.on(
"selection_changed", self.update_element_count_table
)
self.element_count_table.on(
"selection_changed", self.update_ion_count_table
)
self.ion_count_table.on(
"selection_changed", self.update_level_count_table
)
# Putting all table widgets in a container styled with tables_container_layout
shell_info_tables_container = ipw.Box(
[
self.shells_table,
self.element_count_table,
self.ion_count_table,
self.level_count_table,
],
layout=ipw.Layout(**tables_container_layout),
)
self.shells_table.change_selection([1])
# Notes text explaining how to interpret tables widgets' data
text = ipw.HTML(
"<b>Frac. Ab.</b> denotes <i>Fractional Abundances</i> (i.e all "
"values sum to 1)<br><b>W</b> denotes <i>Dilution Factor</i> and "
"<b>Rad. Temp.</b> is <i>Radiative Temperature (in K)</i>"
)
# Put text horizontally before shell info container
shell_info_widget = ipw.VBox([text, shell_info_tables_container])
return shell_info_widget
def shell_info_from_simulation(sim_model):
"""Create shell info widget from a TARDIS simulation object
Parameters
----------
sim_model : tardis.simulation.Simulation
TARDIS Simulation object produced by running a simulation
Returns
-------
ShellInfoWidget
"""
shell_info_data = SimulationShellInfo(sim_model)
return ShellInfoWidget(shell_info_data)
def shell_info_from_hdf(hdf_fpath):
"""Create shell info widget from a simulation HDF file
Parameters
----------
hdf_fpath : str
A valid path to a simulation HDF file (HDF file must be created
from a TARDIS Simulation object using :code:`to_hdf` method with
default arguments)
Returns
-------
ShellInfoWidget
"""
shell_info_data = HDFShellInfo(hdf_fpath)
return ShellInfoWidget(shell_info_data)
| bsd-3-clause |
OshynSong/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/stats/kde.py | 31 | 18766 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from scipy.special import logsumexp
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
# This will raise LinAlgError if the new cov matrix is not s.p.d
# cho_factor returns (ndarray, bool) where bool is a flag for whether
# or not ndarray is upper or lower triangular
sum_cov_chol = linalg.cho_factor(sum_cov)
diff = self.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / norm_const / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
sqrt_det = np.prod(np.diagonal(sum_cov_chol[0]))
norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det
result /= norm_const * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
"""
points = atleast_2d(x)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
energy = zeros((self.n, m), dtype=float)
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy[i] = sum(diff*tdiff,axis=0) / 2.0
result = logsumexp(-energy, b=1/self._norm_factor, axis=0)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = logsumexp(-energy, b=1/self._norm_factor)
return result
| mit |
hofschroeer/gnuradio | gr-filter/examples/fir_filter_ccc.py | 7 | 4023 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print("Num. Taps: ", len(taps))
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=10000,
help="Number of samples to process [default=%(default)r]")
parser.add_argument("-s", "--samplerate", type=eng_float, default=8000,
help="System sample rate [default=%(default)r]")
parser.add_argument("-B", "--bandwidth", type=eng_float, default=1000,
help="Filter bandwidth [default=%(default)r]")
parser.add_argument("-T", "--transition", type=eng_float, default=100,
help="Transition band [default=%(default)r]")
parser.add_argument("-A", "--attenuation", type=eng_float, default=80,
help="Stopband attenuation [default=%(default)r]")
parser.add_argument("-D", "--decimation", type=int, default=1,
help="Decmation factor [default=%(default)r]")
args = parser.parse_args()
put = example_fir_filter_ccc(args.nsamples,
args.samplerate,
args.bandwidth,
args.transition,
args.attenuation,
args.decimation)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_snk = numpy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft / 4,
Fs=args.samplerate)
f2 = pyplot.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
raghavrv/scikit-learn | examples/applications/plot_face_recognition.py | 37 | 5706 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset:
================== ============ ======= ========== =======
precision recall f1-score support
================== ============ ======= ========== =======
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
================== ============ ======= ========== =======
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
# #############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
# #############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
# #############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = PCA(n_components=n_components, svd_solver='randomized',
whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
# #############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
# #############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
# #############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.